cwe_id
stringclasses 8
values | func
stringlengths 40
61.2k
| label
int64 0
1
| cve_id
stringlengths 13
16
| id
int64 0
3.29k
| text_label
stringclasses 2
values |
---|---|---|---|---|---|
CWE-125 | pim_print(netdissect_options *ndo,
register const u_char *bp, register u_int len, const u_char *bp2)
{
register const u_char *ep;
register const struct pim *pim = (const struct pim *)bp;
ep = (const u_char *)ndo->ndo_snapend;
if (bp >= ep)
return;
#ifdef notyet /* currently we see only version and type */
ND_TCHECK(pim->pim_rsv);
#endif
switch (PIM_VER(pim->pim_typever)) {
case 2:
if (!ndo->ndo_vflag) {
ND_PRINT((ndo, "PIMv%u, %s, length %u",
PIM_VER(pim->pim_typever),
tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)),
len));
return;
} else {
ND_PRINT((ndo, "PIMv%u, length %u\n\t%s",
PIM_VER(pim->pim_typever),
len,
tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever))));
pimv2_print(ndo, bp, len, bp2);
}
break;
default:
ND_PRINT((ndo, "PIMv%u, length %u",
PIM_VER(pim->pim_typever),
len));
break;
}
return;
} | 0 | CVE-2017-13030 | 2,354 | benign |
CWE-125 | pim_print(netdissect_options *ndo,
register const u_char *bp, register u_int len, const u_char *bp2)
{
register const struct pim *pim = (const struct pim *)bp;
#ifdef notyet /* currently we see only version and type */
ND_TCHECK(pim->pim_rsv);
#endif
ND_TCHECK(pim->pim_typever);
switch (PIM_VER(pim->pim_typever)) {
case 2:
if (!ndo->ndo_vflag) {
ND_PRINT((ndo, "PIMv%u, %s, length %u",
PIM_VER(pim->pim_typever),
tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)),
len));
return;
} else {
ND_PRINT((ndo, "PIMv%u, length %u\n\t%s",
PIM_VER(pim->pim_typever),
len,
tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever))));
pimv2_print(ndo, bp, len, bp2);
}
break;
default:
ND_PRINT((ndo, "PIMv%u, length %u",
PIM_VER(pim->pim_typever),
len));
break;
}
return;
trunc:
ND_PRINT((ndo, "[|pim]"));
return;
} | 1 | CVE-2017-13030 | 2,354 | vulnerable |
CWE-119 | R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
if (!sz) {
return NULL;
}
ut64 offset = 0;
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
offset += 6;
if (!attr) {
return NULL;
}
attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR;
// if (buffer + offset > buffer + sz) return NULL;
attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
attr->size = offset;
// IFDBG r_bin_java_print_source_code_file_attr_summary(attr);
return attr;
} | 0 | CVE-2022-0519 | 3,280 | benign |
CWE-119 | R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
if (!sz || sz == UT64_MAX) {
return NULL;
}
#if 0
/// XXX this breaks tests
if (sz < 8) {
return NULL;
}
#endif
ut64 offset = 0;
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
offset += 6;
if (attr) {
attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR;
attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
attr->size = offset;
// IFDBG r_bin_java_print_source_code_file_attr_summary(attr);
}
return attr;
} | 1 | CVE-2022-0519 | 3,280 | vulnerable |
CWE-476 | static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
{
HEVCLocalContext *lc = s->HEVClc;
GetBitContext *gb = &lc->gb;
int ctb_addr_ts, ret;
*gb = nal->gb;
s->nal_unit_type = nal->type;
s->temporal_id = nal->temporal_id;
switch (s->nal_unit_type) {
case HEVC_NAL_VPS:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_SPS:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
s->apply_defdispwin);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_PPS:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_SEI_PREFIX:
case HEVC_NAL_SEI_SUFFIX:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_TRAIL_R:
case HEVC_NAL_TRAIL_N:
case HEVC_NAL_TSA_N:
case HEVC_NAL_TSA_R:
case HEVC_NAL_STSA_N:
case HEVC_NAL_STSA_R:
case HEVC_NAL_BLA_W_LP:
case HEVC_NAL_BLA_W_RADL:
case HEVC_NAL_BLA_N_LP:
case HEVC_NAL_IDR_W_RADL:
case HEVC_NAL_IDR_N_LP:
case HEVC_NAL_CRA_NUT:
case HEVC_NAL_RADL_N:
case HEVC_NAL_RADL_R:
case HEVC_NAL_RASL_N:
case HEVC_NAL_RASL_R:
ret = hls_slice_header(s);
if (ret < 0)
return ret;
if (
(s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
(s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
(s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
break;
}
if (s->sh.first_slice_in_pic_flag) {
if (s->ref) {
av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
goto fail;
}
if (s->max_ra == INT_MAX) {
if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
s->max_ra = s->poc;
} else {
if (IS_IDR(s))
s->max_ra = INT_MIN;
}
}
if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
s->poc <= s->max_ra) {
s->is_decoded = 0;
break;
} else {
if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
s->max_ra = INT_MIN;
}
s->overlap ++;
ret = hevc_frame_start(s);
if (ret < 0)
return ret;
} else if (!s->ref) {
av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
goto fail;
}
if (s->nal_unit_type != s->first_nal_type) {
av_log(s->avctx, AV_LOG_ERROR,
"Non-matching NAL types of the VCL NALUs: %d %d\n",
s->first_nal_type, s->nal_unit_type);
return AVERROR_INVALIDDATA;
}
if (!s->sh.dependent_slice_segment_flag &&
s->sh.slice_type != HEVC_SLICE_I) {
ret = ff_hevc_slice_rpl(s);
if (ret < 0) {
av_log(s->avctx, AV_LOG_WARNING,
"Error constructing the reference lists for the current slice.\n");
goto fail;
}
}
if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
if (ret < 0)
goto fail;
}
if (s->avctx->hwaccel) {
ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
if (ret < 0)
goto fail;
} else {
if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
ctb_addr_ts = hls_slice_data_wpp(s, nal);
else
ctb_addr_ts = hls_slice_data(s);
if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
s->is_decoded = 1;
}
if (ctb_addr_ts < 0) {
ret = ctb_addr_ts;
goto fail;
}
}
break;
case HEVC_NAL_EOS_NUT:
case HEVC_NAL_EOB_NUT:
s->seq_decode = (s->seq_decode + 1) & 0xff;
s->max_ra = INT_MAX;
break;
case HEVC_NAL_AUD:
case HEVC_NAL_FD_NUT:
break;
default:
av_log(s->avctx, AV_LOG_INFO,
"Skipping NAL unit %d\n", s->nal_unit_type);
}
return 0;
fail:
if (s->avctx->err_recognition & AV_EF_EXPLODE)
return ret;
return 0;
} | 0 | CVE-2019-11338 | 1,114 | benign |
CWE-476 | static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
{
HEVCLocalContext *lc = s->HEVClc;
GetBitContext *gb = &lc->gb;
int ctb_addr_ts, ret;
*gb = nal->gb;
s->nal_unit_type = nal->type;
s->temporal_id = nal->temporal_id;
switch (s->nal_unit_type) {
case HEVC_NAL_VPS:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_SPS:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
s->apply_defdispwin);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_PPS:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_SEI_PREFIX:
case HEVC_NAL_SEI_SUFFIX:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_TRAIL_R:
case HEVC_NAL_TRAIL_N:
case HEVC_NAL_TSA_N:
case HEVC_NAL_TSA_R:
case HEVC_NAL_STSA_N:
case HEVC_NAL_STSA_R:
case HEVC_NAL_BLA_W_LP:
case HEVC_NAL_BLA_W_RADL:
case HEVC_NAL_BLA_N_LP:
case HEVC_NAL_IDR_W_RADL:
case HEVC_NAL_IDR_N_LP:
case HEVC_NAL_CRA_NUT:
case HEVC_NAL_RADL_N:
case HEVC_NAL_RADL_R:
case HEVC_NAL_RASL_N:
case HEVC_NAL_RASL_R:
ret = hls_slice_header(s);
if (ret < 0)
return ret;
if (ret == 1) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
if (
(s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
(s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
(s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
break;
}
if (s->sh.first_slice_in_pic_flag) {
if (s->max_ra == INT_MAX) {
if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
s->max_ra = s->poc;
} else {
if (IS_IDR(s))
s->max_ra = INT_MIN;
}
}
if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
s->poc <= s->max_ra) {
s->is_decoded = 0;
break;
} else {
if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
s->max_ra = INT_MIN;
}
s->overlap ++;
ret = hevc_frame_start(s);
if (ret < 0)
return ret;
} else if (!s->ref) {
av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
goto fail;
}
if (s->nal_unit_type != s->first_nal_type) {
av_log(s->avctx, AV_LOG_ERROR,
"Non-matching NAL types of the VCL NALUs: %d %d\n",
s->first_nal_type, s->nal_unit_type);
return AVERROR_INVALIDDATA;
}
if (!s->sh.dependent_slice_segment_flag &&
s->sh.slice_type != HEVC_SLICE_I) {
ret = ff_hevc_slice_rpl(s);
if (ret < 0) {
av_log(s->avctx, AV_LOG_WARNING,
"Error constructing the reference lists for the current slice.\n");
goto fail;
}
}
if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
if (ret < 0)
goto fail;
}
if (s->avctx->hwaccel) {
ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
if (ret < 0)
goto fail;
} else {
if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
ctb_addr_ts = hls_slice_data_wpp(s, nal);
else
ctb_addr_ts = hls_slice_data(s);
if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
s->is_decoded = 1;
}
if (ctb_addr_ts < 0) {
ret = ctb_addr_ts;
goto fail;
}
}
break;
case HEVC_NAL_EOS_NUT:
case HEVC_NAL_EOB_NUT:
s->seq_decode = (s->seq_decode + 1) & 0xff;
s->max_ra = INT_MAX;
break;
case HEVC_NAL_AUD:
case HEVC_NAL_FD_NUT:
break;
default:
av_log(s->avctx, AV_LOG_INFO,
"Skipping NAL unit %d\n", s->nal_unit_type);
}
return 0;
fail:
if (s->avctx->err_recognition & AV_EF_EXPLODE)
return ret;
return 0;
} | 1 | CVE-2019-11338 | 1,114 | vulnerable |
CWE-119 | static int do_cmd (xd3_stream *stream, const char *buf)
{
int ret;
if ((ret = system (buf)) != 0)
{
if (WIFEXITED (ret))
{
stream->msg = "command exited non-zero";
IF_DEBUG1 (XPR(NT "command was: %s\n", buf));
}
else
{
stream->msg = "abnormal command termination";
}
return XD3_INTERNAL;
}
return 0;
} | 0 | CVE-2014-9765 | 1,738 | benign |
CWE-119 | static int do_cmd (xd3_stream *stream, const char *buf)
{
int ret;
if ((ret = system (buf)) != 0)
{
if (WIFEXITED (ret))
{
stream->msg = "command exited non-zero";
IF_DEBUG1 (XPR(NT "command was: %s\n", buf));
}
else
{
stream->msg = "abnormal command termination";
}
return ret;
}
return 0;
} | 1 | CVE-2014-9765 | 1,738 | vulnerable |
CWE-119 | cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
assert((size_t)CDF_SHORT_SEC_SIZE(h) == len);
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len);
return len;
} | 0 | CVE-2012-1571 | 3,066 | benign |
CWE-119 | cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
if (sst->sst_len < (size_t)id) {
DPRINTF(("bad sector id %d > %d\n", id, sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + pos, len);
return len;
} | 1 | CVE-2012-1571 | 3,066 | vulnerable |
CWE-119 | static int mark_source_chains(const struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
unsigned int hook;
/* No recursion; use packet counter to save back ptrs (reset
* to 0 as we leave), and comefrom to save source hook bitmask.
*/
for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) {
unsigned int pos = newinfo->hook_entry[hook];
struct arpt_entry *e
= (struct arpt_entry *)(entry0 + pos);
if (!(valid_hooks & (1 << hook)))
continue;
/* Set initial back pointer. */
e->counters.pcnt = pos;
for (;;) {
const struct xt_standard_target *t
= (void *)arpt_get_target_c(e);
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
pr_notice("arptables: loop hook %u pos %u %08X.\n",
hook, pos, e->comefrom);
return 0;
}
e->comefrom
|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
/* Unconditional return/END. */
if ((e->target_offset == sizeof(struct arpt_entry) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < 0 && unconditional(&e->arp)) ||
visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < -NF_MAX_VERDICT - 1) {
duprintf("mark_source_chains: bad "
"negative verdict (%i)\n",
t->verdict);
return 0;
}
/* Return: backtrack through the last
* big jump.
*/
do {
e->comefrom ^= (1<<NF_ARP_NUMHOOKS);
oldpos = pos;
pos = e->counters.pcnt;
e->counters.pcnt = 0;
/* We're at the start. */
if (pos == oldpos)
goto next;
e = (struct arpt_entry *)
(entry0 + pos);
} while (oldpos == pos + e->next_offset);
/* Move along one */
size = e->next_offset;
e = (struct arpt_entry *)
(entry0 + pos + size);
e->counters.pcnt = pos;
pos += size;
} else {
int newpos = t->verdict;
if (strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0 &&
newpos >= 0) {
if (newpos > newinfo->size -
sizeof(struct arpt_entry)) {
duprintf("mark_source_chains: "
"bad verdict (%i)\n",
newpos);
return 0;
}
/* This a jump; chase it. */
duprintf("Jump rule %u -> %u\n",
pos, newpos);
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
}
e = (struct arpt_entry *)
(entry0 + newpos);
e->counters.pcnt = pos;
pos = newpos;
}
}
next:
duprintf("Finished chain %u\n", hook);
}
return 1;
} | 0 | CVE-2016-3134 | 2,691 | benign |
CWE-119 | static int mark_source_chains(const struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
unsigned int hook;
/* No recursion; use packet counter to save back ptrs (reset
* to 0 as we leave), and comefrom to save source hook bitmask.
*/
for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) {
unsigned int pos = newinfo->hook_entry[hook];
struct arpt_entry *e
= (struct arpt_entry *)(entry0 + pos);
if (!(valid_hooks & (1 << hook)))
continue;
/* Set initial back pointer. */
e->counters.pcnt = pos;
for (;;) {
const struct xt_standard_target *t
= (void *)arpt_get_target_c(e);
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
pr_notice("arptables: loop hook %u pos %u %08X.\n",
hook, pos, e->comefrom);
return 0;
}
e->comefrom
|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
/* Unconditional return/END. */
if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < -NF_MAX_VERDICT - 1) {
duprintf("mark_source_chains: bad "
"negative verdict (%i)\n",
t->verdict);
return 0;
}
/* Return: backtrack through the last
* big jump.
*/
do {
e->comefrom ^= (1<<NF_ARP_NUMHOOKS);
oldpos = pos;
pos = e->counters.pcnt;
e->counters.pcnt = 0;
/* We're at the start. */
if (pos == oldpos)
goto next;
e = (struct arpt_entry *)
(entry0 + pos);
} while (oldpos == pos + e->next_offset);
/* Move along one */
size = e->next_offset;
e = (struct arpt_entry *)
(entry0 + pos + size);
e->counters.pcnt = pos;
pos += size;
} else {
int newpos = t->verdict;
if (strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0 &&
newpos >= 0) {
if (newpos > newinfo->size -
sizeof(struct arpt_entry)) {
duprintf("mark_source_chains: "
"bad verdict (%i)\n",
newpos);
return 0;
}
/* This a jump; chase it. */
duprintf("Jump rule %u -> %u\n",
pos, newpos);
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
}
e = (struct arpt_entry *)
(entry0 + newpos);
e->counters.pcnt = pos;
pos = newpos;
}
}
next:
duprintf("Finished chain %u\n", hook);
}
return 1;
} | 1 | CVE-2016-3134 | 2,691 | vulnerable |
CWE-119 | int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
struct hexline *hx;
u8 reset;
int ret,pos=0;
hx = kmalloc(sizeof(*hx), GFP_KERNEL);
if (!hx)
return -ENOMEM;
/* stop the CPU */
reset = 1;
if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
err("could not stop the USB controller CPU.");
while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
if (ret != hx->len) {
err("error while transferring firmware (transferred size: %d, block size: %d)",
ret, hx->len);
ret = -EINVAL;
break;
}
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
kfree(hx);
return ret;
}
if (ret == 0) {
/* restart the CPU */
reset = 0;
if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
} else
ret = -EIO;
kfree(hx);
return ret;
} | 0 | CVE-2017-8061 | 550 | benign |
CWE-119 | int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
struct hexline *hx;
u8 *buf;
int ret, pos = 0;
u16 cpu_cs_register = cypress[type].cpu_cs_register;
buf = kmalloc(sizeof(*hx), GFP_KERNEL);
if (!buf)
return -ENOMEM;
hx = (struct hexline *)buf;
/* stop the CPU */
buf[0] = 1;
if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
err("could not stop the USB controller CPU.");
while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
if (ret != hx->len) {
err("error while transferring firmware (transferred size: %d, block size: %d)",
ret, hx->len);
ret = -EINVAL;
break;
}
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
kfree(buf);
return ret;
}
if (ret == 0) {
/* restart the CPU */
buf[0] = 0;
if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
} else
ret = -EIO;
kfree(buf);
return ret;
} | 1 | CVE-2017-8061 | 550 | vulnerable |
CWE-125 | static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
} | 0 | CVE-2016-9777 | 2,296 | benign |
CWE-125 | static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
} | 1 | CVE-2016-9777 | 2,296 | vulnerable |
CWE-20 | static zval **spl_array_get_dimension_ptr_ptr(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
zval **retval;
char *key;
uint len;
long index;
HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (!offset) {
return &EG(uninitialized_zval_ptr);
}
if ((type == BP_VAR_W || type == BP_VAR_RW) && (ht->nApplyCount > 0)) {
zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited");
return &EG(error_zval_ptr);;
}
switch (Z_TYPE_P(offset)) {
case IS_STRING:
key = Z_STRVAL_P(offset);
len = Z_STRLEN_P(offset) + 1;
string_offest:
if (zend_symtable_find(ht, key, len, (void **) &retval) == FAILURE) {
switch (type) {
case BP_VAR_R:
zend_error(E_NOTICE, "Undefined index: %s", key);
case BP_VAR_UNSET:
case BP_VAR_IS:
retval = &EG(uninitialized_zval_ptr);
break;
case BP_VAR_RW:
zend_error(E_NOTICE,"Undefined index: %s", key);
case BP_VAR_W: {
zval *value;
ALLOC_INIT_ZVAL(value);
zend_symtable_update(ht, key, len, (void**)&value, sizeof(void*), (void **)&retval);
}
}
}
return retval;
case IS_NULL:
key = "";
len = 1;
goto string_offest;
case IS_RESOURCE:
zend_error(E_STRICT, "Resource ID#%ld used as offset, casting to integer (%ld)", Z_LVAL_P(offset), Z_LVAL_P(offset));
case IS_DOUBLE:
case IS_BOOL:
case IS_LONG:
if (offset->type == IS_DOUBLE) {
index = (long)Z_DVAL_P(offset);
} else {
index = Z_LVAL_P(offset);
}
if (zend_hash_index_find(ht, index, (void **) &retval) == FAILURE) {
switch (type) {
case BP_VAR_R:
zend_error(E_NOTICE, "Undefined offset: %ld", index);
case BP_VAR_UNSET:
case BP_VAR_IS:
retval = &EG(uninitialized_zval_ptr);
break;
case BP_VAR_RW:
zend_error(E_NOTICE, "Undefined offset: %ld", index);
case BP_VAR_W: {
zval *value;
ALLOC_INIT_ZVAL(value);
zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), (void **)&retval);
}
}
}
return retval;
default:
zend_error(E_WARNING, "Illegal offset type");
return (type == BP_VAR_W || type == BP_VAR_RW) ?
&EG(error_zval_ptr) : &EG(uninitialized_zval_ptr);
}
} /* }}} */ | 0 | CVE-2016-7417 | 1,297 | benign |
CWE-20 | static zval **spl_array_get_dimension_ptr_ptr(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */
{
spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC);
zval **retval;
char *key;
uint len;
long index;
HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC);
if (!offset || !ht) {
return &EG(uninitialized_zval_ptr);
}
if ((type == BP_VAR_W || type == BP_VAR_RW) && (ht->nApplyCount > 0)) {
zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited");
return &EG(error_zval_ptr);;
}
switch (Z_TYPE_P(offset)) {
case IS_STRING:
key = Z_STRVAL_P(offset);
len = Z_STRLEN_P(offset) + 1;
string_offest:
if (zend_symtable_find(ht, key, len, (void **) &retval) == FAILURE) {
switch (type) {
case BP_VAR_R:
zend_error(E_NOTICE, "Undefined index: %s", key);
case BP_VAR_UNSET:
case BP_VAR_IS:
retval = &EG(uninitialized_zval_ptr);
break;
case BP_VAR_RW:
zend_error(E_NOTICE,"Undefined index: %s", key);
case BP_VAR_W: {
zval *value;
ALLOC_INIT_ZVAL(value);
zend_symtable_update(ht, key, len, (void**)&value, sizeof(void*), (void **)&retval);
}
}
}
return retval;
case IS_NULL:
key = "";
len = 1;
goto string_offest;
case IS_RESOURCE:
zend_error(E_STRICT, "Resource ID#%ld used as offset, casting to integer (%ld)", Z_LVAL_P(offset), Z_LVAL_P(offset));
case IS_DOUBLE:
case IS_BOOL:
case IS_LONG:
if (offset->type == IS_DOUBLE) {
index = (long)Z_DVAL_P(offset);
} else {
index = Z_LVAL_P(offset);
}
if (zend_hash_index_find(ht, index, (void **) &retval) == FAILURE) {
switch (type) {
case BP_VAR_R:
zend_error(E_NOTICE, "Undefined offset: %ld", index);
case BP_VAR_UNSET:
case BP_VAR_IS:
retval = &EG(uninitialized_zval_ptr);
break;
case BP_VAR_RW:
zend_error(E_NOTICE, "Undefined offset: %ld", index);
case BP_VAR_W: {
zval *value;
ALLOC_INIT_ZVAL(value);
zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), (void **)&retval);
}
}
}
return retval;
default:
zend_error(E_WARNING, "Illegal offset type");
return (type == BP_VAR_W || type == BP_VAR_RW) ?
&EG(error_zval_ptr) : &EG(uninitialized_zval_ptr);
}
} /* }}} */ | 1 | CVE-2016-7417 | 1,297 | vulnerable |
CWE-416 | regtilde(char_u *source, int magic)
{
char_u *newsub = source;
char_u *tmpsub;
char_u *p;
int len;
int prevlen;
for (p = newsub; *p; ++p)
{
if ((*p == '~' && magic) || (*p == '\\' && *(p + 1) == '~' && !magic))
{
if (reg_prev_sub != NULL)
{
// length = len(newsub) - 1 + len(prev_sub) + 1
prevlen = (int)STRLEN(reg_prev_sub);
tmpsub = alloc(STRLEN(newsub) + prevlen);
if (tmpsub != NULL)
{
// copy prefix
len = (int)(p - newsub); // not including ~
mch_memmove(tmpsub, newsub, (size_t)len);
// interpret tilde
mch_memmove(tmpsub + len, reg_prev_sub, (size_t)prevlen);
// copy postfix
if (!magic)
++p; // back off backslash
STRCPY(tmpsub + len + prevlen, p + 1);
if (newsub != source) // already allocated newsub
vim_free(newsub);
newsub = tmpsub;
p = newsub + len + prevlen;
}
}
else if (magic)
STRMOVE(p, p + 1); // remove '~'
else
STRMOVE(p, p + 2); // remove '\~'
--p;
}
else
{
if (*p == '\\' && p[1]) // skip escaped characters
++p;
if (has_mbyte)
p += (*mb_ptr2len)(p) - 1;
}
}
vim_free(reg_prev_sub);
if (newsub != source) // newsub was allocated, just keep it
reg_prev_sub = newsub;
else // no ~ found, need to save newsub
reg_prev_sub = vim_strsave(newsub);
return newsub;
} | 0 | CVE-2022-2345 | 1,429 | benign |
CWE-416 | regtilde(char_u *source, int magic)
{
char_u *newsub = source;
char_u *tmpsub;
char_u *p;
int len;
int prevlen;
for (p = newsub; *p; ++p)
{
if ((*p == '~' && magic) || (*p == '\\' && *(p + 1) == '~' && !magic))
{
if (reg_prev_sub != NULL)
{
// length = len(newsub) - 1 + len(prev_sub) + 1
prevlen = (int)STRLEN(reg_prev_sub);
tmpsub = alloc(STRLEN(newsub) + prevlen);
if (tmpsub != NULL)
{
// copy prefix
len = (int)(p - newsub); // not including ~
mch_memmove(tmpsub, newsub, (size_t)len);
// interpret tilde
mch_memmove(tmpsub + len, reg_prev_sub, (size_t)prevlen);
// copy postfix
if (!magic)
++p; // back off backslash
STRCPY(tmpsub + len + prevlen, p + 1);
if (newsub != source) // already allocated newsub
vim_free(newsub);
newsub = tmpsub;
p = newsub + len + prevlen;
}
}
else if (magic)
STRMOVE(p, p + 1); // remove '~'
else
STRMOVE(p, p + 2); // remove '\~'
--p;
}
else
{
if (*p == '\\' && p[1]) // skip escaped characters
++p;
if (has_mbyte)
p += (*mb_ptr2len)(p) - 1;
}
}
// Store a copy of newsub in reg_prev_sub. It is always allocated,
// because recursive calls may make the returned string invalid.
vim_free(reg_prev_sub);
reg_prev_sub = vim_strsave(newsub);
return newsub;
} | 1 | CVE-2022-2345 | 1,429 | vulnerable |
CWE-476 | static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
return crypto_init_skcipher_ops_blkcipher(tfm);
if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
tfm->__crt_alg->cra_type == &crypto_givcipher_type)
return crypto_init_skcipher_ops_ablkcipher(tfm);
skcipher->setkey = alg->setkey;
skcipher->encrypt = alg->encrypt;
skcipher->decrypt = alg->decrypt;
skcipher->ivsize = alg->ivsize;
skcipher->keysize = alg->max_keysize;
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
if (alg->init)
return alg->init(skcipher);
return 0;
} | 0 | CVE-2017-9211 | 2,760 | benign |
CWE-476 | static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
return crypto_init_skcipher_ops_blkcipher(tfm);
if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
tfm->__crt_alg->cra_type == &crypto_givcipher_type)
return crypto_init_skcipher_ops_ablkcipher(tfm);
skcipher->setkey = skcipher_setkey;
skcipher->encrypt = alg->encrypt;
skcipher->decrypt = alg->decrypt;
skcipher->ivsize = alg->ivsize;
skcipher->keysize = alg->max_keysize;
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
if (alg->init)
return alg->init(skcipher);
return 0;
} | 1 | CVE-2017-9211 | 2,760 | vulnerable |
CWE-416 | static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
if (!pud_access_permitted(orig, write))
return 0;
if (pud_devmap(orig))
return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
refs = 0;
page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
head = compound_head(pud_page(orig));
if (!page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
}
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
SetPageReferenced(head);
return 1;
} | 0 | CVE-2019-11487 | 985 | benign |
CWE-416 | static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
if (!pud_access_permitted(orig, write))
return 0;
if (pud_devmap(orig))
return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
refs = 0;
page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
head = try_get_compound_head(pud_page(orig), refs);
if (!head) {
*nr -= refs;
return 0;
}
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
SetPageReferenced(head);
return 1;
} | 1 | CVE-2019-11487 | 985 | vulnerable |
CWE-119 | horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc)
{
tmsize_t stride = PredictorState(tif)->stride;
uint32* wp = (uint32*) cp0;
tmsize_t wc = cc / 4;
assert((cc%(4*stride))==0);
if (wc > stride) {
wc -= stride;
do {
REPEAT4(stride, wp[stride] += wp[0]; wp++)
wc -= stride;
} while (wc > 0);
}
} | 0 | CVE-2016-9535 | 954 | benign |
CWE-119 | horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc)
{
tmsize_t stride = PredictorState(tif)->stride;
uint32* wp = (uint32*) cp0;
tmsize_t wc = cc / 4;
if((cc%(4*stride))!=0)
{
TIFFErrorExt(tif->tif_clientdata, "horAcc32",
"%s", "cc%(4*stride))!=0");
return 0;
}
if (wc > stride) {
wc -= stride;
do {
REPEAT4(stride, wp[stride] += wp[0]; wp++)
wc -= stride;
} while (wc > 0);
}
return 1;
} | 1 | CVE-2016-9535 | 954 | vulnerable |
CWE-20 | static inline long decode_twos_comp(ulong c, int prec)
{
long result;
assert(prec >= 2);
jas_eprintf("warning: support for signed data is untested\n");
// NOTE: Is this correct?
result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1)));
return result;
} | 0 | CVE-2016-9395 | 2,976 | benign |
CWE-20 | static inline long decode_twos_comp(jas_ulong c, int prec)
{
long result;
assert(prec >= 2);
jas_eprintf("warning: support for signed data is untested\n");
// NOTE: Is this correct?
result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1)));
return result;
} | 1 | CVE-2016-9395 | 2,976 | vulnerable |
CWE-125 | snmp_ber_encode_length(unsigned char *out, uint32_t *out_len, uint8_t length)
{
*out-- = length;
(*out_len)++;
return out;
} | 0 | CVE-2020-12141 | 394 | benign |
CWE-125 | snmp_ber_encode_length(snmp_packet_t *snmp_packet, uint16_t length)
{
if(length > 0xFF) {
if(snmp_packet->used == snmp_packet->max) {
return 0;
}
*snmp_packet->out-- = (uint8_t)length & 0xFF;
snmp_packet->used++;
if(snmp_packet->used == snmp_packet->max) {
return 0;
}
*snmp_packet->out-- = (uint8_t)(length >> 8) & 0xFF;
snmp_packet->used++;
if(snmp_packet->used == snmp_packet->max) {
return 0;
}
*snmp_packet->out-- = 0x82;
snmp_packet->used++;
} else if(length > 0x7F) {
if(snmp_packet->used == snmp_packet->max) {
return 0;
}
*snmp_packet->out-- = (uint8_t)length & 0xFF;
snmp_packet->used++;
if(snmp_packet->used == snmp_packet->max) {
return 0;
}
*snmp_packet->out-- = 0x81;
snmp_packet->used++;
} else {
if(snmp_packet->used == snmp_packet->max) {
return 0;
}
*snmp_packet->out-- = (uint8_t)length & 0x7F;
snmp_packet->used++;
}
return 1;
} | 1 | CVE-2020-12141 | 394 | vulnerable |
CWE-787 | int TLSInStream::readTLS(U8* buf, int len, bool wait)
{
int n;
n = in->check(1, 1, wait);
if (n == 0)
return 0;
n = gnutls_record_recv(session, (void *) buf, len);
if (n == GNUTLS_E_INTERRUPTED || n == GNUTLS_E_AGAIN)
return 0;
if (n < 0) throw TLSException("readTLS", n);
return n;
} | 0 | CVE-2019-15694 | 1,270 | benign |
CWE-787 | size_t TLSInStream::readTLS(U8* buf, size_t len, bool wait)
{
int n;
n = in->check(1, 1, wait);
if (n == 0)
return 0;
n = gnutls_record_recv(session, (void *) buf, len);
if (n == GNUTLS_E_INTERRUPTED || n == GNUTLS_E_AGAIN)
return 0;
if (n < 0) throw TLSException("readTLS", n);
return n;
} | 1 | CVE-2019-15694 | 1,270 | vulnerable |
CWE-119 | decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len)
{
size_t cipher_len;
size_t i;
unsigned char iv[16] = { 0 };
unsigned char plaintext[4096] = { 0 };
epass2003_exdata *exdata = NULL;
if (!card->drv_data)
return SC_ERROR_INVALID_ARGUMENTS;
exdata = (epass2003_exdata *)card->drv_data;
/* no cipher */
if (in[0] == 0x99)
return 0;
/* parse cipher length */
if (0x01 == in[2] && 0x82 != in[1]) {
cipher_len = in[1];
i = 3;
}
else if (0x01 == in[3] && 0x81 == in[1]) {
cipher_len = in[2];
i = 4;
}
else if (0x01 == in[4] && 0x82 == in[1]) {
cipher_len = in[2] * 0x100;
cipher_len += in[3];
i = 5;
}
else {
return -1;
}
if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext)
return -1;
/* decrypt */
if (KEY_TYPE_AES == exdata->smtype)
aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext);
else
des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext);
/* unpadding */
while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0))
cipher_len--;
if (2 == cipher_len)
return -1;
memcpy(out, plaintext, cipher_len - 2);
*out_len = cipher_len - 2;
return 0;
} | 0 | CVE-2018-16391 | 1,656 | benign |
CWE-119 | decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len)
{
size_t cipher_len;
size_t i;
unsigned char iv[16] = { 0 };
unsigned char plaintext[4096] = { 0 };
epass2003_exdata *exdata = NULL;
if (!card->drv_data)
return SC_ERROR_INVALID_ARGUMENTS;
exdata = (epass2003_exdata *)card->drv_data;
/* no cipher */
if (in[0] == 0x99)
return 0;
/* parse cipher length */
if (0x01 == in[2] && 0x82 != in[1]) {
cipher_len = in[1];
i = 3;
}
else if (0x01 == in[3] && 0x81 == in[1]) {
cipher_len = in[2];
i = 4;
}
else if (0x01 == in[4] && 0x82 == in[1]) {
cipher_len = in[2] * 0x100;
cipher_len += in[3];
i = 5;
}
else {
return -1;
}
if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext)
return -1;
/* decrypt */
if (KEY_TYPE_AES == exdata->smtype)
aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext);
else
des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext);
/* unpadding */
while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0))
cipher_len--;
if (2 == cipher_len || *out_len < cipher_len - 2)
return -1;
memcpy(out, plaintext, cipher_len - 2);
*out_len = cipher_len - 2;
return 0;
} | 1 | CVE-2018-16391 | 1,656 | vulnerable |
CWE-476 | template <class T> void testFeatTable(const T & table, const char * testName)
{
FeatureMap testFeatureMap;
dummyFace.replace_table(TtfUtil::Tag::Feat, &table, sizeof(T));
gr_face * face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, gr_face_dumbRendering);
if (!face) throw std::runtime_error("failed to load font");
bool readStatus = testFeatureMap.readFeats(*face);
testAssert("readFeats", readStatus);
fprintf(stderr, testName, NULL);
testAssertEqual("test num features %hu,%hu\n", testFeatureMap.numFeats(), table.m_header.m_numFeat);
for (size_t i = 0; i < sizeof(table.m_defs) / sizeof(FeatDefn); i++)
{
const FeatureRef * ref = testFeatureMap.findFeatureRef(table.m_defs[i].m_featId);
testAssert("test feat\n", ref);
testAssertEqual("test feat settings %hu %hu\n", ref->getNumSettings(), table.m_defs[i].m_numFeatSettings);
testAssertEqual("test feat label %hu %hu\n", ref->getNameId(), table.m_defs[i].m_label);
size_t settingsIndex = (table.m_defs[i].m_settingsOffset - sizeof(FeatHeader)
- (sizeof(FeatDefn) * table.m_header.m_numFeat)) / sizeof(FeatSetting);
for (size_t j = 0; j < table.m_defs[i].m_numFeatSettings; j++)
{
testAssertEqual("setting label %hu %hu\n", ref->getSettingName(j),
table.m_settings[settingsIndex+j].m_label);
}
}
gr_face_destroy(face);
} | 0 | CVE-2018-7999 | 718 | benign |
CWE-476 | template <class T> void testFeatTable(const T & table, const char * testName)
{
FeatureMap testFeatureMap;
dummyFace.replace_table(TtfUtil::Tag::Feat, &table, sizeof(T));
gr_face * face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, 0);
if (!face) throw std::runtime_error("failed to load font");
bool readStatus = testFeatureMap.readFeats(*face);
testAssert("readFeats", readStatus);
fprintf(stderr, testName, NULL);
testAssertEqual("test num features %hu,%hu\n", testFeatureMap.numFeats(), table.m_header.m_numFeat);
for (size_t i = 0; i < sizeof(table.m_defs) / sizeof(FeatDefn); i++)
{
const FeatureRef * ref = testFeatureMap.findFeatureRef(table.m_defs[i].m_featId);
testAssert("test feat\n", ref);
testAssertEqual("test feat settings %hu %hu\n", ref->getNumSettings(), table.m_defs[i].m_numFeatSettings);
testAssertEqual("test feat label %hu %hu\n", ref->getNameId(), table.m_defs[i].m_label);
size_t settingsIndex = (table.m_defs[i].m_settingsOffset - sizeof(FeatHeader)
- (sizeof(FeatDefn) * table.m_header.m_numFeat)) / sizeof(FeatSetting);
for (size_t j = 0; j < table.m_defs[i].m_numFeatSettings; j++)
{
testAssertEqual("setting label %hu %hu\n", ref->getSettingName(j),
table.m_settings[settingsIndex+j].m_label);
}
}
gr_face_destroy(face);
} | 1 | CVE-2018-7999 | 718 | vulnerable |
CWE-119 | check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
const char *name)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
int ret, off, h;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
if (e->next_offset < sizeof(struct compat_ip6t_entry) +
sizeof(struct compat_xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct ip6t_entry *)e);
if (ret)
return ret;
off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
if (ret != 0)
goto release_matches;
++j;
}
t = compat_ip6t_get_target(e);
target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
if (ret)
goto out;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h])
newinfo->underflow[h] = underflows[h];
}
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
return 0;
out:
module_put(t->u.kernel.target->me);
release_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
module_put(ematch->u.kernel.match->me);
}
return ret;
} | 0 | CVE-2016-4998 | 3,082 | benign |
CWE-119 | check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
const char *name)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
int ret, off, h;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
if (e->next_offset < sizeof(struct compat_ip6t_entry) +
sizeof(struct compat_xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct ip6t_entry *)e);
if (ret)
return ret;
off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
if (ret != 0)
goto release_matches;
++j;
}
t = compat_ip6t_get_target(e);
target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
if (ret)
goto out;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h])
newinfo->underflow[h] = underflows[h];
}
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
return 0;
out:
module_put(t->u.kernel.target->me);
release_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
module_put(ematch->u.kernel.match->me);
}
return ret;
} | 1 | CVE-2016-4998 | 3,082 | vulnerable |
CWE-190 | static int burl_normalize_2F_to_slash_fix (buffer *b, int qs, int i)
{
char * const s = b->ptr;
const int blen = (int)buffer_string_length(b);
const int used = qs < 0 ? blen : qs;
int j = i;
for (; i < used; ++i, ++j) {
s[j] = s[i];
if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') {
s[j] = '/';
i+=2;
}
}
if (qs >= 0) {
memmove(s+j, s+qs, blen - qs);
j += blen - qs;
}
buffer_string_set_length(b, j);
return qs;
} | 0 | CVE-2019-11072 | 536 | benign |
CWE-190 | static int burl_normalize_2F_to_slash_fix (buffer *b, int qs, int i)
{
char * const s = b->ptr;
const int blen = (int)buffer_string_length(b);
const int used = qs < 0 ? blen : qs;
int j = i;
for (; i < used; ++i, ++j) {
s[j] = s[i];
if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') {
s[j] = '/';
i+=2;
}
}
if (qs >= 0) {
const int qslen = blen - qs;
memmove(s+j, s+qs, (size_t)qslen);
qs = j;
j += qslen;
}
buffer_string_set_length(b, j);
return qs;
} | 1 | CVE-2019-11072 | 536 | vulnerable |
CWE-125 | int jpc_dec_decodepkts(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in)
{
jpc_dec_tile_t *tile;
jpc_pi_t *pi;
int ret;
tile = dec->curtile;
pi = tile->pi;
for (;;) {
if (!tile->pkthdrstream || jas_stream_peekc(tile->pkthdrstream) == EOF) {
switch (jpc_dec_lookahead(in)) {
case JPC_MS_EOC:
case JPC_MS_SOT:
return 0;
break;
case JPC_MS_SOP:
case JPC_MS_EPH:
case 0:
break;
default:
return -1;
break;
}
}
if ((ret = jpc_pi_next(pi))) {
return ret;
}
if (dec->maxpkts >= 0 && dec->numpkts >= dec->maxpkts) {
jas_eprintf("warning: stopping decode prematurely as requested\n");
return 0;
}
if (jas_getdbglevel() >= 1) {
jas_eprintf("packet offset=%08ld prg=%d cmptno=%02d "
"rlvlno=%02d prcno=%03d lyrno=%02d\n", (long)
jas_stream_getrwcount(in), jpc_pi_prg(pi), jpc_pi_cmptno(pi),
jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi));
}
if (jpc_dec_decodepkt(dec, pkthdrstream, in, jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi),
jpc_pi_prcno(pi), jpc_pi_lyrno(pi))) {
return -1;
}
++dec->numpkts;
}
return 0;
} | 0 | CVE-2016-9583 | 504 | benign |
CWE-125 | int jpc_dec_decodepkts(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in)
{
jpc_dec_tile_t *tile;
jpc_pi_t *pi;
int ret;
tile = dec->curtile;
pi = tile->pi;
for (;;) {
if (!tile->pkthdrstream || jas_stream_peekc(tile->pkthdrstream) == EOF) {
switch (jpc_dec_lookahead(in)) {
case JPC_MS_EOC:
case JPC_MS_SOT:
return 0;
break;
case JPC_MS_SOP:
case JPC_MS_EPH:
case 0:
break;
default:
return -1;
break;
}
}
if ((ret = jpc_pi_next(pi))) {
return ret;
}
if (dec->maxpkts >= 0 && dec->numpkts >= dec->maxpkts) {
jas_eprintf("warning: stopping decode prematurely as requested\n");
return 0;
}
if (jas_getdbglevel() >= 1) {
jas_eprintf("packet offset=%08ld prg=%d cmptno=%02d "
"rlvlno=%02d prcno=%03d lyrno=%02d\n", (long)
jas_stream_getrwcount(in), jpc_pi_prg(pi), jpc_pi_cmptno(pi),
jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi));
}
if (jpc_dec_decodepkt(dec, pkthdrstream, in, jpc_pi_cmptno(pi),
jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi))) {
return -1;
}
++dec->numpkts;
}
return 0;
} | 1 | CVE-2016-9583 | 504 | vulnerable |
CWE-787 | void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) {
size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
DCHECK_LT(newCapacity, UINT32_MAX);
// reserveSlow() is dangerous if anyone else is sharing the buffer, as we may
// reallocate and free the original buffer. It should only ever be called if
// we are the only user of the buffer.
DCHECK(!isSharedOne());
// We'll need to reallocate the buffer.
// There are a few options.
// - If we have enough total room, move the data around in the buffer
// and adjust the data_ pointer.
// - If we're using an internal buffer, we'll switch to an external
// buffer with enough headroom and tailroom.
// - If we have enough headroom (headroom() >= minHeadroom) but not too much
// (so we don't waste memory), we can try one of two things, depending on
// whether we use jemalloc or not:
// - If using jemalloc, we can try to expand in place, avoiding a memcpy()
// - If not using jemalloc and we don't have too much to copy,
// we'll use realloc() (note that realloc might have to copy
// headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h)
// - Otherwise, bite the bullet and reallocate.
if (headroom() + tailroom() >= minHeadroom + minTailroom) {
uint8_t* newData = writableBuffer() + minHeadroom;
memmove(newData, data_, length_);
data_ = newData;
return;
}
size_t newAllocatedCapacity = 0;
uint8_t* newBuffer = nullptr;
std::size_t newHeadroom = 0;
std::size_t oldHeadroom = headroom();
// If we have a buffer allocated with malloc and we just need more tailroom,
// try to use realloc()/xallocx() to grow the buffer in place.
SharedInfo* info = sharedInfo();
bool useHeapFullStorage = info && info->useHeapFullStorage;
if (info && (info->freeFn == nullptr) && length_ != 0 &&
oldHeadroom >= minHeadroom) {
size_t headSlack = oldHeadroom - minHeadroom;
newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack);
if (usingJEMalloc()) {
// We assume that tailroom is more useful and more important than
// headroom (not least because realloc / xallocx allow us to grow the
// buffer at the tail, but not at the head) So, if we have more headroom
// than we need, we consider that "wasted". We arbitrarily define "too
// much" headroom to be 25% of the capacity.
if (headSlack * 4 <= newCapacity) {
size_t allocatedCapacity = capacity() + sizeof(SharedInfo);
void* p = buf_;
if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) {
if (io_buf_free_cb) {
io_buf_free_cb(p, reinterpret_cast<size_t>(info->userData));
}
newBuffer = static_cast<uint8_t*>(p);
newHeadroom = oldHeadroom;
// update the userData
info->userData = reinterpret_cast<void*>(newAllocatedCapacity);
if (io_buf_alloc_cb) {
io_buf_alloc_cb(newBuffer, newAllocatedCapacity);
}
}
// if xallocx failed, do nothing, fall back to malloc/memcpy/free
}
}
} else { // Not using jemalloc
size_t copySlack = capacity() - length_;
if (copySlack * 2 <= length_) {
void* p = realloc(buf_, newAllocatedCapacity);
if (UNLIKELY(p == nullptr)) {
throw_exception<std::bad_alloc>();
}
newBuffer = static_cast<uint8_t*>(p);
newHeadroom = oldHeadroom;
}
}
}
// None of the previous reallocation strategies worked (or we're using
// an internal buffer). malloc/copy/free.
if (newBuffer == nullptr) {
newAllocatedCapacity = goodExtBufferSize(newCapacity);
newBuffer = static_cast<uint8_t*>(checkedMalloc(newAllocatedCapacity));
if (length_ > 0) {
assert(data_ != nullptr);
memcpy(newBuffer + minHeadroom, data_, length_);
}
if (sharedInfo()) {
freeExtBuffer();
}
newHeadroom = minHeadroom;
}
std::size_t cap;
initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
if (flags() & kFlagFreeSharedInfo) {
delete sharedInfo();
} else {
if (useHeapFullStorage) {
SharedInfo::releaseStorage(sharedInfo());
}
}
setFlagsAndSharedInfo(0, info);
capacity_ = cap;
buf_ = newBuffer;
data_ = newBuffer + newHeadroom;
// length_ is unchanged
} | 0 | CVE-2021-24036 | 1,042 | benign |
CWE-787 | void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) {
size_t newCapacity = length_;
if (!checked_add(&newCapacity, newCapacity, minHeadroom) ||
!checked_add(&newCapacity, newCapacity, minTailroom) ||
newCapacity > kMaxIOBufSize) {
// overflow
throw_exception<std::bad_alloc>();
}
// reserveSlow() is dangerous if anyone else is sharing the buffer, as we may
// reallocate and free the original buffer. It should only ever be called if
// we are the only user of the buffer.
DCHECK(!isSharedOne());
// We'll need to reallocate the buffer.
// There are a few options.
// - If we have enough total room, move the data around in the buffer
// and adjust the data_ pointer.
// - If we're using an internal buffer, we'll switch to an external
// buffer with enough headroom and tailroom.
// - If we have enough headroom (headroom() >= minHeadroom) but not too much
// (so we don't waste memory), we can try one of two things, depending on
// whether we use jemalloc or not:
// - If using jemalloc, we can try to expand in place, avoiding a memcpy()
// - If not using jemalloc and we don't have too much to copy,
// we'll use realloc() (note that realloc might have to copy
// headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h)
// - Otherwise, bite the bullet and reallocate.
if (headroom() + tailroom() >= minHeadroom + minTailroom) {
uint8_t* newData = writableBuffer() + minHeadroom;
memmove(newData, data_, length_);
data_ = newData;
return;
}
size_t newAllocatedCapacity = 0;
uint8_t* newBuffer = nullptr;
std::size_t newHeadroom = 0;
std::size_t oldHeadroom = headroom();
// If we have a buffer allocated with malloc and we just need more tailroom,
// try to use realloc()/xallocx() to grow the buffer in place.
SharedInfo* info = sharedInfo();
bool useHeapFullStorage = info && info->useHeapFullStorage;
if (info && (info->freeFn == nullptr) && length_ != 0 &&
oldHeadroom >= minHeadroom) {
size_t headSlack = oldHeadroom - minHeadroom;
newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack);
if (usingJEMalloc()) {
// We assume that tailroom is more useful and more important than
// headroom (not least because realloc / xallocx allow us to grow the
// buffer at the tail, but not at the head) So, if we have more headroom
// than we need, we consider that "wasted". We arbitrarily define "too
// much" headroom to be 25% of the capacity.
if (headSlack * 4 <= newCapacity) {
size_t allocatedCapacity = capacity() + sizeof(SharedInfo);
void* p = buf_;
if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) {
if (io_buf_free_cb) {
io_buf_free_cb(p, reinterpret_cast<size_t>(info->userData));
}
newBuffer = static_cast<uint8_t*>(p);
newHeadroom = oldHeadroom;
// update the userData
info->userData = reinterpret_cast<void*>(newAllocatedCapacity);
if (io_buf_alloc_cb) {
io_buf_alloc_cb(newBuffer, newAllocatedCapacity);
}
}
// if xallocx failed, do nothing, fall back to malloc/memcpy/free
}
}
} else { // Not using jemalloc
size_t copySlack = capacity() - length_;
if (copySlack * 2 <= length_) {
void* p = realloc(buf_, newAllocatedCapacity);
if (UNLIKELY(p == nullptr)) {
throw_exception<std::bad_alloc>();
}
newBuffer = static_cast<uint8_t*>(p);
newHeadroom = oldHeadroom;
}
}
}
// None of the previous reallocation strategies worked (or we're using
// an internal buffer). malloc/copy/free.
if (newBuffer == nullptr) {
newAllocatedCapacity = goodExtBufferSize(newCapacity);
newBuffer = static_cast<uint8_t*>(checkedMalloc(newAllocatedCapacity));
if (length_ > 0) {
assert(data_ != nullptr);
memcpy(newBuffer + minHeadroom, data_, length_);
}
if (sharedInfo()) {
freeExtBuffer();
}
newHeadroom = minHeadroom;
}
std::size_t cap;
initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
if (flags() & kFlagFreeSharedInfo) {
delete sharedInfo();
} else {
if (useHeapFullStorage) {
SharedInfo::releaseStorage(sharedInfo());
}
}
setFlagsAndSharedInfo(0, info);
capacity_ = cap;
buf_ = newBuffer;
data_ = newBuffer + newHeadroom;
// length_ is unchanged
} | 1 | CVE-2021-24036 | 1,042 | vulnerable |
CWE-190 | MONGO_EXPORT gridfs_offset gridfile_read( gridfile *gfile, gridfs_offset size, char *buf ) {
mongo_cursor *chunks;
bson chunk;
int first_chunk;
int last_chunk;
int total_chunks;
gridfs_offset chunksize;
gridfs_offset contentlength;
gridfs_offset bytes_left;
int i;
bson_iterator it;
gridfs_offset chunk_len;
const char *chunk_data;
contentlength = gridfile_get_contentlength( gfile );
chunksize = gridfile_get_chunksize( gfile );
size = ( contentlength - gfile->pos < size )
? contentlength - gfile->pos
: size;
bytes_left = size;
first_chunk = ( gfile->pos )/chunksize;
last_chunk = ( gfile->pos+size-1 )/chunksize;
total_chunks = last_chunk - first_chunk + 1;
chunks = gridfile_get_chunks( gfile, first_chunk, total_chunks );
for ( i = 0; i < total_chunks; i++ ) {
mongo_cursor_next( chunks );
chunk = chunks->current;
bson_find( &it, &chunk, "data" );
chunk_len = bson_iterator_bin_len( &it );
chunk_data = bson_iterator_bin_data( &it );
if ( i == 0 ) {
chunk_data += ( gfile->pos )%chunksize;
chunk_len -= ( gfile->pos )%chunksize;
}
if ( bytes_left > chunk_len ) {
memcpy( buf, chunk_data, chunk_len );
bytes_left -= chunk_len;
buf += chunk_len;
}
else {
memcpy( buf, chunk_data, bytes_left );
}
}
mongo_cursor_destroy( chunks );
gfile->pos = gfile->pos + size;
return size;
} | 0 | CVE-2020-12135 | 2,285 | benign |
CWE-190 | MONGO_EXPORT gridfs_offset gridfile_read( gridfile *gfile, gridfs_offset size, char *buf ) {
mongo_cursor *chunks;
bson chunk;
size_t first_chunk;
size_t last_chunk;
size_t total_chunks;
gridfs_offset chunksize;
gridfs_offset contentlength;
gridfs_offset bytes_left;
int i;
bson_iterator it;
gridfs_offset chunk_len;
const char *chunk_data;
contentlength = gridfile_get_contentlength( gfile );
chunksize = gridfile_get_chunksize( gfile );
size = ( contentlength - gfile->pos < size )
? contentlength - gfile->pos
: size;
bytes_left = size;
first_chunk = ( gfile->pos )/chunksize;
last_chunk = ( gfile->pos+size-1 )/chunksize;
total_chunks = last_chunk - first_chunk + 1;
chunks = gridfile_get_chunks( gfile, first_chunk, total_chunks );
for ( i = 0; i < total_chunks; i++ ) {
mongo_cursor_next( chunks );
chunk = chunks->current;
bson_find( &it, &chunk, "data" );
chunk_len = bson_iterator_bin_len( &it );
chunk_data = bson_iterator_bin_data( &it );
if ( i == 0 ) {
chunk_data += ( gfile->pos )%chunksize;
chunk_len -= ( gfile->pos )%chunksize;
}
if ( bytes_left > chunk_len ) {
memcpy( buf, chunk_data, chunk_len );
bytes_left -= chunk_len;
buf += chunk_len;
}
else {
memcpy( buf, chunk_data, bytes_left );
}
}
mongo_cursor_destroy( chunks );
gfile->pos = gfile->pos + size;
return size;
} | 1 | CVE-2020-12135 | 2,285 | vulnerable |
CWE-119 | static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
rdesc[30] = 0x0c;
}
return rdesc;
} | 0 | CVE-2014-3184 | 237 | benign |
CWE-119 | static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
rdesc[30] = 0x0c;
}
return rdesc;
} | 1 | CVE-2014-3184 | 237 | vulnerable |
CWE-20 | static void _out_result(conn_t out, nad_t nad) {
int attr;
jid_t from, to;
char *rkey;
int rkeylen;
attr = nad_find_attr(nad, 0, -1, "from", NULL);
if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) {
log_debug(ZONE, "missing or invalid from on db result packet");
nad_free(nad);
return;
}
attr = nad_find_attr(nad, 0, -1, "to", NULL);
if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) {
log_debug(ZONE, "missing or invalid to on db result packet");
jid_free(from);
nad_free(nad);
return;
}
rkey = s2s_route_key(NULL, to->domain, from->domain);
rkeylen = strlen(rkey);
/* key is valid */
if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0) {
log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now valid%s%s", out->fd->fd, out->ip, out->port, rkey, (out->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", out->s->compressed ? ", ZLIB compression enabled" : "");
xhash_put(out->states, pstrdup(xhash_pool(out->states), rkey), (void *) conn_VALID); /* !!! small leak here */
log_debug(ZONE, "%s valid, flushing queue", rkey);
/* flush the queue */
out_flush_route_queue(out->s2s, rkey, rkeylen);
free(rkey);
jid_free(from);
jid_free(to);
nad_free(nad);
return;
}
/* invalid */
log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now invalid", out->fd->fd, out->ip, out->port, rkey);
/* close connection */
log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] closing connection", out->fd->fd, out->ip, out->port);
/* report stream error */
sx_error(out->s, stream_err_INVALID_ID, "dialback negotiation failed");
/* close the stream */
sx_close(out->s);
/* bounce queue */
out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE);
free(rkey);
jid_free(from);
jid_free(to);
nad_free(nad);
} | 0 | CVE-2012-3525 | 1,289 | benign |
CWE-20 | static void _out_result(conn_t out, nad_t nad) {
int attr;
jid_t from, to;
char *rkey;
int rkeylen;
attr = nad_find_attr(nad, 0, -1, "from", NULL);
if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) {
log_debug(ZONE, "missing or invalid from on db result packet");
nad_free(nad);
return;
}
attr = nad_find_attr(nad, 0, -1, "to", NULL);
if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) {
log_debug(ZONE, "missing or invalid to on db result packet");
jid_free(from);
nad_free(nad);
return;
}
rkey = s2s_route_key(NULL, to->domain, from->domain);
rkeylen = strlen(rkey);
/* key is valid */
if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0 && xhash_get(out->states, rkey) == (void*) conn_INPROGRESS) {
log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now valid%s%s", out->fd->fd, out->ip, out->port, rkey, (out->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", out->s->compressed ? ", ZLIB compression enabled" : "");
xhash_put(out->states, pstrdup(xhash_pool(out->states), rkey), (void *) conn_VALID); /* !!! small leak here */
log_debug(ZONE, "%s valid, flushing queue", rkey);
/* flush the queue */
out_flush_route_queue(out->s2s, rkey, rkeylen);
free(rkey);
jid_free(from);
jid_free(to);
nad_free(nad);
return;
}
/* invalid */
log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now invalid", out->fd->fd, out->ip, out->port, rkey);
/* close connection */
log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] closing connection", out->fd->fd, out->ip, out->port);
/* report stream error */
sx_error(out->s, stream_err_INVALID_ID, "dialback negotiation failed");
/* close the stream */
sx_close(out->s);
/* bounce queue */
out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE);
free(rkey);
jid_free(from);
jid_free(to);
nad_free(nad);
} | 1 | CVE-2012-3525 | 1,289 | vulnerable |
CWE-20 | static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
{
struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
struct sk_buff *skb;
int ret;
while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
!list_empty(&netbk->net_schedule_list)) {
struct xenvif *vif;
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
struct page *page;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
u16 pending_idx;
RING_IDX idx;
int work_to_do;
unsigned int data_len;
pending_ring_idx_t index;
/* Get a netif from the list with work to do. */
vif = poll_net_schedule_list(netbk);
if (!vif)
continue;
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
if (!work_to_do) {
xenvif_put(vif);
continue;
}
idx = vif->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
/* Credit-based scheduling. */
if (txreq.size > vif->remaining_credit &&
tx_credit_exceeded(vif, txreq.size)) {
xenvif_put(vif);
continue;
}
vif->remaining_credit -= txreq.size;
work_to_do--;
vif->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) {
work_to_do = xen_netbk_get_extras(vif, extras,
work_to_do);
idx = vif->tx.req_cons;
if (unlikely(work_to_do < 0)) {
netbk_tx_err(vif, &txreq, idx);
continue;
}
}
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0)) {
netbk_tx_err(vif, &txreq, idx - ret);
continue;
}
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(vif->dev,
"Bad packet size: %d\n", txreq.size);
netbk_tx_err(vif, &txreq, idx);
continue;
}
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
netdev_dbg(vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
netbk_tx_err(vif, &txreq, idx);
continue;
}
index = pending_index(netbk->pending_cons);
pending_idx = netbk->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
ret < MAX_SKB_FRAGS) ?
PKT_PROT_LEN : txreq.size;
skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
netbk_tx_err(vif, &txreq, idx);
break;
}
/* Packets passed to netif_rx() must have some headroom. */
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (netbk_set_skb_gso(vif, skb, gso)) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
continue;
}
}
/* XXX could copy straight to head */
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
if (!page) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
continue;
}
gop->source.u.ref = txreq.gref;
gop->source.domid = vif->domid;
gop->source.offset = txreq.offset;
gop->dest.u.gmfn = virt_to_mfn(page_address(page));
gop->dest.domid = DOMID_SELF;
gop->dest.offset = txreq.offset;
gop->len = txreq.size;
gop->flags = GNTCOPY_source_gref;
gop++;
memcpy(&netbk->pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
netbk->pending_tx_info[pending_idx].vif = vif;
*((u16 *)skb->data) = pending_idx;
__skb_put(skb, data_len);
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
}
netbk->pending_cons++;
request_gop = xen_netbk_get_requests(netbk, vif,
skb, txfrags, gop);
if (request_gop == NULL) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
continue;
}
gop = request_gop;
__skb_queue_tail(&netbk->tx_queue, skb);
vif->tx.req_cons = idx;
xen_netbk_check_rx_xenvif(vif);
if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
break;
}
return gop - netbk->tx_copy_ops;
} | 0 | CVE-2013-0216 | 1,586 | benign |
CWE-20 | static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
{
struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
struct sk_buff *skb;
int ret;
while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
!list_empty(&netbk->net_schedule_list)) {
struct xenvif *vif;
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
struct page *page;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
u16 pending_idx;
RING_IDX idx;
int work_to_do;
unsigned int data_len;
pending_ring_idx_t index;
/* Get a netif from the list with work to do. */
vif = poll_net_schedule_list(netbk);
/* This can sometimes happen because the test of
* list_empty(net_schedule_list) at the top of the
* loop is unlocked. Just go back and have another
* look.
*/
if (!vif)
continue;
if (vif->tx.sring->req_prod - vif->tx.req_cons >
XEN_NETIF_TX_RING_SIZE) {
netdev_err(vif->dev,
"Impossible number of requests. "
"req_prod %d, req_cons %d, size %ld\n",
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
netbk_fatal_tx_err(vif);
continue;
}
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
if (!work_to_do) {
xenvif_put(vif);
continue;
}
idx = vif->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
/* Credit-based scheduling. */
if (txreq.size > vif->remaining_credit &&
tx_credit_exceeded(vif, txreq.size)) {
xenvif_put(vif);
continue;
}
vif->remaining_credit -= txreq.size;
work_to_do--;
vif->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) {
work_to_do = xen_netbk_get_extras(vif, extras,
work_to_do);
idx = vif->tx.req_cons;
if (unlikely(work_to_do < 0))
continue;
}
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
continue;
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(vif->dev,
"Bad packet size: %d\n", txreq.size);
netbk_tx_err(vif, &txreq, idx);
continue;
}
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
netdev_err(vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
netbk_fatal_tx_err(vif);
continue;
}
index = pending_index(netbk->pending_cons);
pending_idx = netbk->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
ret < MAX_SKB_FRAGS) ?
PKT_PROT_LEN : txreq.size;
skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
netbk_tx_err(vif, &txreq, idx);
break;
}
/* Packets passed to netif_rx() must have some headroom. */
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (netbk_set_skb_gso(vif, skb, gso)) {
/* Failure in netbk_set_skb_gso is fatal. */
kfree_skb(skb);
continue;
}
}
/* XXX could copy straight to head */
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
if (!page) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
continue;
}
gop->source.u.ref = txreq.gref;
gop->source.domid = vif->domid;
gop->source.offset = txreq.offset;
gop->dest.u.gmfn = virt_to_mfn(page_address(page));
gop->dest.domid = DOMID_SELF;
gop->dest.offset = txreq.offset;
gop->len = txreq.size;
gop->flags = GNTCOPY_source_gref;
gop++;
memcpy(&netbk->pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
netbk->pending_tx_info[pending_idx].vif = vif;
*((u16 *)skb->data) = pending_idx;
__skb_put(skb, data_len);
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
}
netbk->pending_cons++;
request_gop = xen_netbk_get_requests(netbk, vif,
skb, txfrags, gop);
if (request_gop == NULL) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
continue;
}
gop = request_gop;
__skb_queue_tail(&netbk->tx_queue, skb);
vif->tx.req_cons = idx;
xen_netbk_check_rx_xenvif(vif);
if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
break;
}
return gop - netbk->tx_copy_ops;
} | 1 | CVE-2013-0216 | 1,586 | vulnerable |
CWE-476 | static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
{
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
u64 logical;
u64 length;
u64 devid;
u8 uuid[BTRFS_UUID_SIZE];
int num_stripes;
int ret;
int i;
logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
if (ret)
return ret;
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
read_unlock(&map_tree->map_tree.lock);
/* already mapped? */
if (em && em->start <= logical && em->start + em->len > logical) {
free_extent_map(em);
return 0;
} else if (em) {
free_extent_map(em);
}
em = alloc_extent_map();
if (!em)
return -ENOMEM;
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
free_extent_map(em);
return -ENOMEM;
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->map_lookup = map;
em->start = logical;
em->len = length;
em->orig_start = 0;
em->block_start = 0;
em->block_len = em->len;
map->num_stripes = num_stripes;
map->io_width = btrfs_chunk_io_width(leaf, chunk);
map->io_align = btrfs_chunk_io_align(leaf, chunk);
map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
map->type = btrfs_chunk_type(leaf, chunk);
map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
map->verified_stripes = 0;
for (i = 0; i < num_stripes; i++) {
map->stripes[i].physical =
btrfs_stripe_offset_nr(leaf, chunk, i);
devid = btrfs_stripe_devid_nr(leaf, chunk, i);
read_extent_buffer(leaf, uuid, (unsigned long)
btrfs_stripe_dev_uuid_nr(chunk, i),
BTRFS_UUID_SIZE);
map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
devid, uuid, NULL);
if (!map->stripes[i].dev &&
!btrfs_test_opt(fs_info, DEGRADED)) {
free_extent_map(em);
btrfs_report_missing_device(fs_info, devid, uuid, true);
return -ENOENT;
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
add_missing_dev(fs_info->fs_devices, devid,
uuid);
if (IS_ERR(map->stripes[i].dev)) {
free_extent_map(em);
btrfs_err(fs_info,
"failed to init missing dev %llu: %ld",
devid, PTR_ERR(map->stripes[i].dev));
return PTR_ERR(map->stripes[i].dev);
}
btrfs_report_missing_device(fs_info, devid, uuid, false);
}
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
&(map->stripes[i].dev->dev_state));
}
write_lock(&map_tree->map_tree.lock);
ret = add_extent_mapping(&map_tree->map_tree, em, 0);
write_unlock(&map_tree->map_tree.lock);
if (ret < 0) {
btrfs_err(fs_info,
"failed to add chunk map, start=%llu len=%llu: %d",
em->start, em->len, ret);
}
free_extent_map(em);
return ret;
} | 0 | CVE-2019-18885 | 3,267 | benign |
CWE-476 | static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
{
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
u64 logical;
u64 length;
u64 devid;
u8 uuid[BTRFS_UUID_SIZE];
int num_stripes;
int ret;
int i;
logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
if (ret)
return ret;
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
read_unlock(&map_tree->map_tree.lock);
/* already mapped? */
if (em && em->start <= logical && em->start + em->len > logical) {
free_extent_map(em);
return 0;
} else if (em) {
free_extent_map(em);
}
em = alloc_extent_map();
if (!em)
return -ENOMEM;
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
free_extent_map(em);
return -ENOMEM;
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->map_lookup = map;
em->start = logical;
em->len = length;
em->orig_start = 0;
em->block_start = 0;
em->block_len = em->len;
map->num_stripes = num_stripes;
map->io_width = btrfs_chunk_io_width(leaf, chunk);
map->io_align = btrfs_chunk_io_align(leaf, chunk);
map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
map->type = btrfs_chunk_type(leaf, chunk);
map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
map->verified_stripes = 0;
for (i = 0; i < num_stripes; i++) {
map->stripes[i].physical =
btrfs_stripe_offset_nr(leaf, chunk, i);
devid = btrfs_stripe_devid_nr(leaf, chunk, i);
read_extent_buffer(leaf, uuid, (unsigned long)
btrfs_stripe_dev_uuid_nr(chunk, i),
BTRFS_UUID_SIZE);
map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
devid, uuid, NULL, true);
if (!map->stripes[i].dev &&
!btrfs_test_opt(fs_info, DEGRADED)) {
free_extent_map(em);
btrfs_report_missing_device(fs_info, devid, uuid, true);
return -ENOENT;
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
add_missing_dev(fs_info->fs_devices, devid,
uuid);
if (IS_ERR(map->stripes[i].dev)) {
free_extent_map(em);
btrfs_err(fs_info,
"failed to init missing dev %llu: %ld",
devid, PTR_ERR(map->stripes[i].dev));
return PTR_ERR(map->stripes[i].dev);
}
btrfs_report_missing_device(fs_info, devid, uuid, false);
}
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
&(map->stripes[i].dev->dev_state));
}
write_lock(&map_tree->map_tree.lock);
ret = add_extent_mapping(&map_tree->map_tree, em, 0);
write_unlock(&map_tree->map_tree.lock);
if (ret < 0) {
btrfs_err(fs_info,
"failed to add chunk map, start=%llu len=%llu: %d",
em->start, em->len, ret);
}
free_extent_map(em);
return ret;
} | 1 | CVE-2019-18885 | 3,267 | vulnerable |
CWE-20 | jas_image_t *jpg_decode(jas_stream_t *in, char *optstr)
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
FILE *input_file;
jpg_dest_t dest_mgr_buf;
jpg_dest_t *dest_mgr = &dest_mgr_buf;
JDIMENSION num_scanlines;
jas_image_t *image;
int ret;
jpg_dec_importopts_t opts;
size_t size;
if (jpg_dec_parseopts(optstr, &opts)) {
goto error;
}
// In theory, the two memset calls that follow are not needed.
// They are only here to make the code more predictable in the event
// that the JPEG library fails to initialize a member.
memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct));
memset(dest_mgr, 0, sizeof(jpg_dest_t));
dest_mgr->data = 0;
image = 0;
input_file = 0;
if (!(input_file = tmpfile())) {
jas_eprintf("cannot make temporary file\n");
goto error;
}
if (jpg_copystreamtofile(input_file, in)) {
jas_eprintf("cannot copy stream\n");
goto error;
}
rewind(input_file);
/* Allocate and initialize a JPEG decompression object. */
JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr));
cinfo.err = jpeg_std_error(&jerr);
JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo));
jpeg_create_decompress(&cinfo);
/* Specify the data source for decompression. */
JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file));
jpeg_stdio_src(&cinfo, input_file);
/* Read the file header to obtain the image information. */
JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo));
ret = jpeg_read_header(&cinfo, TRUE);
JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret));
if (ret != JPEG_HEADER_OK) {
jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n");
}
JAS_DBGLOG(10, (
"header: image_width %d; image_height %d; num_components %d\n",
cinfo.image_width, cinfo.image_height, cinfo.num_components)
);
/* Start the decompressor. */
JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo));
ret = jpeg_start_decompress(&cinfo);
JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret));
JAS_DBGLOG(10, (
"header: output_width %d; output_height %d; output_components %d\n",
cinfo.output_width, cinfo.output_height, cinfo.output_components)
);
if (opts.max_size) {
if (!jas_safe_size_mul(cinfo.output_width, cinfo.output_height,
&size) ||
!jas_safe_size_mul(size, cinfo.output_components, &size)) {
goto error;
}
if (size > opts.max_size) {
jas_eprintf("image is too large\n");
goto error;
}
}
/* Create an image object to hold the decoded data. */
if (!(image = jpg_mkimage(&cinfo))) {
jas_eprintf("jpg_mkimage failed\n");
goto error;
}
/* Initialize the data sink object. */
dest_mgr->image = image;
if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) {
jas_eprintf("jas_matrix_create failed\n");
goto error;
}
dest_mgr->start_output = jpg_start_output;
dest_mgr->put_pixel_rows = jpg_put_pixel_rows;
dest_mgr->finish_output = jpg_finish_output;
dest_mgr->buffer = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE,
cinfo.output_width * cinfo.output_components, (JDIMENSION) 1);
dest_mgr->buffer_height = 1;
dest_mgr->error = 0;
/* Process the compressed data. */
(*dest_mgr->start_output)(&cinfo, dest_mgr);
while (cinfo.output_scanline < cinfo.output_height) {
JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo,
dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height)));
num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer,
dest_mgr->buffer_height);
JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n",
JAS_CAST(unsigned long, num_scanlines)));
(*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines);
}
(*dest_mgr->finish_output)(&cinfo, dest_mgr);
/* Complete the decompression process. */
JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo));
jpeg_finish_decompress(&cinfo);
/* Destroy the JPEG decompression object. */
JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo));
jpeg_destroy_decompress(&cinfo);
jas_matrix_destroy(dest_mgr->data);
JAS_DBGLOG(10, ("fclose(%p)\n", input_file));
fclose(input_file);
input_file = 0;
if (dest_mgr->error) {
jas_eprintf("error during decoding\n");
goto error;
}
return image;
error:
if (dest_mgr->data) {
jas_matrix_destroy(dest_mgr->data);
}
if (image) {
jas_image_destroy(image);
}
if (input_file) {
fclose(input_file);
}
return 0;
} | 0 | CVE-2016-9395 | 2,373 | benign |
CWE-20 | jas_image_t *jpg_decode(jas_stream_t *in, char *optstr)
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
FILE *input_file;
jpg_dest_t dest_mgr_buf;
jpg_dest_t *dest_mgr = &dest_mgr_buf;
JDIMENSION num_scanlines;
jas_image_t *image;
int ret;
jpg_dec_importopts_t opts;
size_t num_samples;
JAS_DBGLOG(100, ("jpg_decode(%p, \"%s\")\n", in, optstr));
if (jpg_dec_parseopts(optstr, &opts)) {
goto error;
}
// In theory, the two memset calls that follow are not needed.
// They are only here to make the code more predictable in the event
// that the JPEG library fails to initialize a member.
memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct));
memset(dest_mgr, 0, sizeof(jpg_dest_t));
dest_mgr->data = 0;
image = 0;
input_file = 0;
if (!(input_file = tmpfile())) {
jas_eprintf("cannot make temporary file\n");
goto error;
}
if (jpg_copystreamtofile(input_file, in)) {
jas_eprintf("cannot copy stream\n");
goto error;
}
rewind(input_file);
/* Allocate and initialize a JPEG decompression object. */
JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr));
cinfo.err = jpeg_std_error(&jerr);
JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo));
jpeg_create_decompress(&cinfo);
/* Specify the data source for decompression. */
JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file));
jpeg_stdio_src(&cinfo, input_file);
/* Read the file header to obtain the image information. */
JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo));
ret = jpeg_read_header(&cinfo, TRUE);
JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret));
if (ret != JPEG_HEADER_OK) {
jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n");
}
JAS_DBGLOG(10, (
"header: image_width %d; image_height %d; num_components %d\n",
cinfo.image_width, cinfo.image_height, cinfo.num_components)
);
if (opts.max_samples > 0) {
if (!jas_safe_size_mul3(cinfo.image_width, cinfo.image_height,
cinfo.num_components, &num_samples)) {
goto error;
}
if (num_samples > opts.max_samples) {
jas_eprintf("image is too large (%zu > %zu)\n", num_samples,
opts.max_samples);
goto error;
}
}
/* Start the decompressor. */
JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo));
ret = jpeg_start_decompress(&cinfo);
JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret));
JAS_DBGLOG(10, (
"header: output_width %d; output_height %d; output_components %d\n",
cinfo.output_width, cinfo.output_height, cinfo.output_components)
);
/* Create an image object to hold the decoded data. */
if (!(image = jpg_mkimage(&cinfo))) {
jas_eprintf("jpg_mkimage failed\n");
goto error;
}
/* Initialize the data sink object. */
dest_mgr->image = image;
if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) {
jas_eprintf("jas_matrix_create failed\n");
goto error;
}
dest_mgr->start_output = jpg_start_output;
dest_mgr->put_pixel_rows = jpg_put_pixel_rows;
dest_mgr->finish_output = jpg_finish_output;
dest_mgr->buffer = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE,
cinfo.output_width * cinfo.output_components, (JDIMENSION) 1);
dest_mgr->buffer_height = 1;
dest_mgr->error = 0;
/* Process the compressed data. */
(*dest_mgr->start_output)(&cinfo, dest_mgr);
while (cinfo.output_scanline < cinfo.output_height) {
JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo,
dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height)));
num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer,
dest_mgr->buffer_height);
JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n",
JAS_CAST(unsigned long, num_scanlines)));
(*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines);
}
(*dest_mgr->finish_output)(&cinfo, dest_mgr);
/* Complete the decompression process. */
JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo));
jpeg_finish_decompress(&cinfo);
/* Destroy the JPEG decompression object. */
JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo));
jpeg_destroy_decompress(&cinfo);
jas_matrix_destroy(dest_mgr->data);
JAS_DBGLOG(10, ("fclose(%p)\n", input_file));
fclose(input_file);
input_file = 0;
if (dest_mgr->error) {
jas_eprintf("error during decoding\n");
goto error;
}
return image;
error:
if (dest_mgr->data) {
jas_matrix_destroy(dest_mgr->data);
}
if (image) {
jas_image_destroy(image);
}
if (input_file) {
fclose(input_file);
}
return 0;
} | 1 | CVE-2016-9395 | 2,373 | vulnerable |
CWE-190 | static int bson_append_estart( bson *b, int type, const char *name, const int dataSize ) {
const int len = strlen( name ) + 1;
if ( b->finished ) {
b->err |= BSON_ALREADY_FINISHED;
return BSON_ERROR;
}
if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) {
return BSON_ERROR;
}
if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) {
bson_builder_error( b );
return BSON_ERROR;
}
bson_append_byte( b, ( char )type );
bson_append( b, name, len );
return BSON_OK;
} | 0 | CVE-2020-12135 | 2,703 | benign |
CWE-190 | static int bson_append_estart( bson *b, int type, const char *name, const size_t dataSize ) {
const int len = strlen( name ) + 1;
if ( b->finished ) {
b->err |= BSON_ALREADY_FINISHED;
return BSON_ERROR;
}
if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) {
return BSON_ERROR;
}
if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) {
bson_builder_error( b );
return BSON_ERROR;
}
bson_append_byte( b, ( char )type );
bson_append( b, name, len );
return BSON_OK;
} | 1 | CVE-2020-12135 | 2,703 | vulnerable |
CWE-787 | void Compute(OpKernelContext* ctx) override {
const Tensor* hypothesis_indices;
const Tensor* hypothesis_values;
const Tensor* hypothesis_shape;
const Tensor* truth_indices;
const Tensor* truth_values;
const Tensor* truth_shape;
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_indices", &hypothesis_indices));
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_values", &hypothesis_values));
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_shape", &hypothesis_shape));
OP_REQUIRES_OK(ctx, ctx->input("truth_indices", &truth_indices));
OP_REQUIRES_OK(ctx, ctx->input("truth_values", &truth_values));
OP_REQUIRES_OK(ctx, ctx->input("truth_shape", &truth_shape));
OP_REQUIRES_OK(
ctx, ValidateShapes(ctx, *hypothesis_indices, *hypothesis_values,
*hypothesis_shape, *truth_indices, *truth_values,
*truth_shape));
TensorShape hypothesis_st_shape;
OP_REQUIRES_OK(ctx,
TensorShapeUtils::MakeShape(
hypothesis_shape->vec<int64_t>().data(),
hypothesis_shape->NumElements(), &hypothesis_st_shape));
TensorShape truth_st_shape;
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(
truth_shape->vec<int64_t>().data(),
truth_shape->NumElements(), &truth_st_shape));
// Assume indices are sorted in row-major order.
std::vector<int64_t> sorted_order(truth_st_shape.dims());
std::iota(sorted_order.begin(), sorted_order.end(), 0);
sparse::SparseTensor hypothesis;
OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
*hypothesis_indices, *hypothesis_values,
hypothesis_st_shape, sorted_order, &hypothesis));
sparse::SparseTensor truth;
OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
*truth_indices, *truth_values, truth_st_shape,
sorted_order, &truth));
// Group dims 0, 1, ..., RANK - 1. The very last dim is assumed
// to store the variable length sequences.
std::vector<int64_t> group_dims(truth_st_shape.dims() - 1);
std::iota(group_dims.begin(), group_dims.end(), 0);
TensorShape output_shape;
for (int d = 0; d < static_cast<int>(group_dims.size()); ++d) {
output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d),
truth_st_shape.dim_size(d)));
}
const auto output_elements = output_shape.num_elements();
OP_REQUIRES(
ctx, output_elements > 0,
errors::InvalidArgument("Got output shape ", output_shape.DebugString(),
" which has 0 elements"));
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output));
auto output_t = output->flat<float>();
output_t.setZero();
std::vector<int64_t> output_strides(output_shape.dims());
output_strides[output_shape.dims() - 1] = 1;
for (int d = output_shape.dims() - 2; d >= 0; --d) {
output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1);
}
auto hypothesis_grouper = hypothesis.group(group_dims);
auto truth_grouper = truth.group(group_dims);
auto hypothesis_iter = hypothesis_grouper.begin();
auto truth_iter = truth_grouper.begin();
auto cmp = std::equal_to<T>();
while (hypothesis_iter != hypothesis_grouper.end() &&
truth_iter != truth_grouper.end()) {
sparse::Group truth_i = *truth_iter;
sparse::Group hypothesis_j = *hypothesis_iter;
std::vector<int64_t> g_truth = truth_i.group();
std::vector<int64_t> g_hypothesis = hypothesis_j.group();
auto truth_seq = truth_i.values<T>();
auto hypothesis_seq = hypothesis_j.values<T>();
if (g_truth == g_hypothesis) {
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) =
gtl::LevenshteinDistance<T>(truth_seq, hypothesis_seq, cmp);
if (normalize_) output_t(loc) /= truth_seq.size();
++hypothesis_iter;
++truth_iter;
} else if (g_truth > g_hypothesis) { // zero-length truth
auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = hypothesis_seq.size();
if (normalize_ && output_t(loc) != 0.0f) {
output_t(loc) = std::numeric_limits<float>::infinity();
}
++hypothesis_iter;
} else { // zero-length hypothesis
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = (normalize_) ? 1.0 : truth_seq.size();
++truth_iter;
}
}
while (hypothesis_iter != hypothesis_grouper.end()) { // zero-length truths
sparse::Group hypothesis_j = *hypothesis_iter;
std::vector<int64_t> g_hypothesis = hypothesis_j.group();
auto hypothesis_seq = hypothesis_j.values<T>();
auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of the "
"buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = hypothesis_seq.size();
if (normalize_ && output_t(loc) != 0.0f) {
output_t(loc) = std::numeric_limits<float>::infinity();
}
++hypothesis_iter;
}
while (truth_iter != truth_grouper.end()) { // missing hypotheses
sparse::Group truth_i = *truth_iter;
std::vector<int64_t> g_truth = truth_i.group();
auto truth_seq = truth_i.values<T>();
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require in writing to outside of the "
"buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = (normalize_) ? 1.0 : truth_seq.size();
++truth_iter;
}
} | 0 | CVE-2022-29208 | 2,973 | benign |
CWE-787 | void Compute(OpKernelContext* ctx) override {
const Tensor* hypothesis_indices;
const Tensor* hypothesis_values;
const Tensor* hypothesis_shape;
const Tensor* truth_indices;
const Tensor* truth_values;
const Tensor* truth_shape;
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_indices", &hypothesis_indices));
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_values", &hypothesis_values));
OP_REQUIRES_OK(ctx, ctx->input("hypothesis_shape", &hypothesis_shape));
OP_REQUIRES_OK(ctx, ctx->input("truth_indices", &truth_indices));
OP_REQUIRES_OK(ctx, ctx->input("truth_values", &truth_values));
OP_REQUIRES_OK(ctx, ctx->input("truth_shape", &truth_shape));
OP_REQUIRES_OK(
ctx, ValidateShapes(ctx, *hypothesis_indices, *hypothesis_values,
*hypothesis_shape, *truth_indices, *truth_values,
*truth_shape));
TensorShape hypothesis_st_shape;
OP_REQUIRES_OK(ctx,
TensorShapeUtils::MakeShape(
hypothesis_shape->vec<int64_t>().data(),
hypothesis_shape->NumElements(), &hypothesis_st_shape));
TensorShape truth_st_shape;
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(
truth_shape->vec<int64_t>().data(),
truth_shape->NumElements(), &truth_st_shape));
// Assume indices are sorted in row-major order.
std::vector<int64_t> sorted_order(truth_st_shape.dims());
std::iota(sorted_order.begin(), sorted_order.end(), 0);
sparse::SparseTensor hypothesis;
OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
*hypothesis_indices, *hypothesis_values,
hypothesis_st_shape, sorted_order, &hypothesis));
sparse::SparseTensor truth;
OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
*truth_indices, *truth_values, truth_st_shape,
sorted_order, &truth));
// Group dims 0, 1, ..., RANK - 1. The very last dim is assumed
// to store the variable length sequences.
std::vector<int64_t> group_dims(truth_st_shape.dims() - 1);
std::iota(group_dims.begin(), group_dims.end(), 0);
TensorShape output_shape;
for (int d = 0; d < static_cast<int>(group_dims.size()); ++d) {
output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d),
truth_st_shape.dim_size(d)));
}
const auto output_elements = output_shape.num_elements();
OP_REQUIRES(
ctx, output_elements > 0,
errors::InvalidArgument("Got output shape ", output_shape.DebugString(),
" which has 0 elements"));
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output));
auto output_t = output->flat<float>();
output_t.setZero();
std::vector<int64_t> output_strides(output_shape.dims());
output_strides[output_shape.dims() - 1] = 1;
for (int d = output_shape.dims() - 2; d >= 0; --d) {
output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1);
}
auto hypothesis_grouper = hypothesis.group(group_dims);
auto truth_grouper = truth.group(group_dims);
auto hypothesis_iter = hypothesis_grouper.begin();
auto truth_iter = truth_grouper.begin();
auto cmp = std::equal_to<T>();
while (hypothesis_iter != hypothesis_grouper.end() &&
truth_iter != truth_grouper.end()) {
sparse::Group truth_i = *truth_iter;
sparse::Group hypothesis_j = *hypothesis_iter;
std::vector<int64_t> g_truth = truth_i.group();
std::vector<int64_t> g_hypothesis = hypothesis_j.group();
auto truth_seq = truth_i.values<T>();
auto hypothesis_seq = hypothesis_j.values<T>();
if (g_truth == g_hypothesis) {
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, 0 <= loc && loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) =
gtl::LevenshteinDistance<T>(truth_seq, hypothesis_seq, cmp);
if (normalize_) output_t(loc) /= truth_seq.size();
++hypothesis_iter;
++truth_iter;
} else if (g_truth > g_hypothesis) { // zero-length truth
auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, 0 <= loc && loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = hypothesis_seq.size();
if (normalize_ && output_t(loc) != 0.0f) {
output_t(loc) = std::numeric_limits<float>::infinity();
}
++hypothesis_iter;
} else { // zero-length hypothesis
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, 0 <= loc && loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require writing to outside of "
"the buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = (normalize_) ? 1.0 : truth_seq.size();
++truth_iter;
}
}
while (hypothesis_iter != hypothesis_grouper.end()) { // zero-length truths
sparse::Group hypothesis_j = *hypothesis_iter;
std::vector<int64_t> g_hypothesis = hypothesis_j.group();
auto hypothesis_seq = hypothesis_j.values<T>();
auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, 0 <= loc && loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require writing to outside of the "
"buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = hypothesis_seq.size();
if (normalize_ && output_t(loc) != 0.0f) {
output_t(loc) = std::numeric_limits<float>::infinity();
}
++hypothesis_iter;
}
while (truth_iter != truth_grouper.end()) { // missing hypotheses
sparse::Group truth_i = *truth_iter;
std::vector<int64_t> g_truth = truth_i.group();
auto truth_seq = truth_i.values<T>();
auto loc = std::inner_product(g_truth.begin(), g_truth.end(),
output_strides.begin(), int64_t{0});
OP_REQUIRES(
ctx, 0 <= loc && loc < output_elements,
errors::Internal("Got an inner product ", loc,
" which would require writing to outside of the "
"buffer for the output tensor (max elements ",
output_elements, ")"));
output_t(loc) = (normalize_) ? 1.0 : truth_seq.size();
++truth_iter;
}
} | 1 | CVE-2022-29208 | 2,973 | vulnerable |
CWE-125 | pimv1_join_prune_print(netdissect_options *ndo,
register const u_char *bp, register u_int len)
{
int ngroups, njoin, nprune;
int njp;
/* If it's a single group and a single source, use 1-line output. */
if (ND_TTEST2(bp[0], 30) && bp[11] == 1 &&
((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) {
int hold;
ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp)));
hold = EXTRACT_16BITS(&bp[6]);
if (hold != 180) {
ND_PRINT((ndo, "Hold "));
unsigned_relts_print(ndo, hold);
}
ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune",
ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f,
ipaddr_string(ndo, &bp[12])));
if (EXTRACT_32BITS(&bp[16]) != 0xffffffff)
ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16])));
ND_PRINT((ndo, ") %s%s %s",
(bp[24] & 0x01) ? "Sparse" : "Dense",
(bp[25] & 0x80) ? " WC" : "",
(bp[25] & 0x40) ? "RP" : "SPT"));
return;
}
ND_TCHECK2(bp[0], sizeof(struct in_addr));
if (ndo->ndo_vflag > 1)
ND_PRINT((ndo, "\n"));
ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp)));
ND_TCHECK2(bp[6], 2);
if (ndo->ndo_vflag > 1)
ND_PRINT((ndo, "\n"));
ND_PRINT((ndo, " Hold time: "));
unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6]));
if (ndo->ndo_vflag < 2)
return;
bp += 8;
len -= 8;
ND_TCHECK2(bp[0], 4);
ngroups = bp[3];
bp += 4;
len -= 4;
while (ngroups--) {
/*
* XXX - does the address have length "addrlen" and the
* mask length "maddrlen"?
*/
ND_TCHECK2(bp[0], sizeof(struct in_addr));
ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp)));
ND_TCHECK2(bp[4], sizeof(struct in_addr));
if (EXTRACT_32BITS(&bp[4]) != 0xffffffff)
ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4])));
ND_TCHECK2(bp[8], 4);
njoin = EXTRACT_16BITS(&bp[8]);
nprune = EXTRACT_16BITS(&bp[10]);
ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune));
bp += 12;
len -= 12;
for (njp = 0; njp < (njoin + nprune); njp++) {
const char *type;
if (njp < njoin)
type = "Join ";
else
type = "Prune";
ND_TCHECK2(bp[0], 6);
ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type,
(bp[0] & 0x01) ? "Sparse " : "Dense ",
(bp[1] & 0x80) ? "WC " : "",
(bp[1] & 0x40) ? "RP " : "SPT ",
ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f));
bp += 6;
len -= 6;
}
}
return;
trunc:
ND_PRINT((ndo, "[|pim]"));
return;
} | 0 | CVE-2017-13030 | 2,355 | benign |
CWE-125 | pimv1_join_prune_print(netdissect_options *ndo,
register const u_char *bp, register u_int len)
{
int ngroups, njoin, nprune;
int njp;
/* If it's a single group and a single source, use 1-line output. */
if (ND_TTEST2(bp[0], 30) && bp[11] == 1 &&
((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) {
int hold;
ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp)));
hold = EXTRACT_16BITS(&bp[6]);
if (hold != 180) {
ND_PRINT((ndo, "Hold "));
unsigned_relts_print(ndo, hold);
}
ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune",
ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f,
ipaddr_string(ndo, &bp[12])));
if (EXTRACT_32BITS(&bp[16]) != 0xffffffff)
ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16])));
ND_PRINT((ndo, ") %s%s %s",
(bp[24] & 0x01) ? "Sparse" : "Dense",
(bp[25] & 0x80) ? " WC" : "",
(bp[25] & 0x40) ? "RP" : "SPT"));
return;
}
if (len < sizeof(struct in_addr))
goto trunc;
ND_TCHECK2(bp[0], sizeof(struct in_addr));
if (ndo->ndo_vflag > 1)
ND_PRINT((ndo, "\n"));
ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp)));
bp += 4;
len -= 4;
if (len < 4)
goto trunc;
ND_TCHECK2(bp[2], 2);
if (ndo->ndo_vflag > 1)
ND_PRINT((ndo, "\n"));
ND_PRINT((ndo, " Hold time: "));
unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2]));
if (ndo->ndo_vflag < 2)
return;
bp += 4;
len -= 4;
if (len < 4)
goto trunc;
ND_TCHECK2(bp[0], 4);
ngroups = bp[3];
bp += 4;
len -= 4;
while (ngroups--) {
/*
* XXX - does the address have length "addrlen" and the
* mask length "maddrlen"?
*/
if (len < 4)
goto trunc;
ND_TCHECK2(bp[0], sizeof(struct in_addr));
ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp)));
bp += 4;
len -= 4;
if (len < 4)
goto trunc;
ND_TCHECK2(bp[0], sizeof(struct in_addr));
if (EXTRACT_32BITS(&bp[0]) != 0xffffffff)
ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0])));
bp += 4;
len -= 4;
if (len < 4)
goto trunc;
ND_TCHECK2(bp[0], 4);
njoin = EXTRACT_16BITS(&bp[0]);
nprune = EXTRACT_16BITS(&bp[2]);
ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune));
bp += 4;
len -= 4;
for (njp = 0; njp < (njoin + nprune); njp++) {
const char *type;
if (njp < njoin)
type = "Join ";
else
type = "Prune";
if (len < 6)
goto trunc;
ND_TCHECK2(bp[0], 6);
ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type,
(bp[0] & 0x01) ? "Sparse " : "Dense ",
(bp[1] & 0x80) ? "WC " : "",
(bp[1] & 0x40) ? "RP " : "SPT ",
ipaddr_string(ndo, &bp[2]),
bp[1] & 0x3f));
bp += 6;
len -= 6;
}
}
return;
trunc:
ND_PRINT((ndo, "[|pim]"));
return;
} | 1 | CVE-2017-13030 | 2,355 | vulnerable |
CWE-125 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size,
params->stride));
const int64_t sample_count = input->dims->data[0];
const int64_t length_minus_window = (sample_count - params->window_size);
if (length_minus_window < 0) {
params->output_height = 0;
} else {
params->output_height = 1 + (length_minus_window / params->stride);
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(3);
output_size->data[0] = input->dims->data[1];
output_size->data[1] = params->output_height;
output_size->data[2] = params->spectrogram->output_frequency_channels();
return context->ResizeTensor(context, output, output_size);
} | 0 | CVE-2020-15211 | 2,966 | benign |
CWE-125 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size,
params->stride));
const int64_t sample_count = input->dims->data[0];
const int64_t length_minus_window = (sample_count - params->window_size);
if (length_minus_window < 0) {
params->output_height = 0;
} else {
params->output_height = 1 + (length_minus_window / params->stride);
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(3);
output_size->data[0] = input->dims->data[1];
output_size->data[1] = params->output_height;
output_size->data[2] = params->spectrogram->output_frequency_channels();
return context->ResizeTensor(context, output, output_size);
} | 1 | CVE-2020-15211 | 2,966 | vulnerable |
CWE-787 | void edge_sparse_csr_reader_double( const char* i_csr_file_in,
unsigned int** o_row_idx,
unsigned int** o_column_idx,
double** o_values,
unsigned int* o_row_count,
unsigned int* o_column_count,
unsigned int* o_element_count ) {
FILE *l_csr_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int* l_row_idx_id = NULL;
unsigned int l_i = 0;
l_csr_file_handle = fopen( i_csr_file_in, "r" );
if ( l_csr_file_handle == NULL ) {
fprintf( stderr, "cannot open CSR file!\n" );
return;
}
while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) {
/* allocate CSC datastructure matching mtx file */
*o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count));
*o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1));
*o_values = (double*) malloc(sizeof(double) * (*o_element_count));
l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count));
/* check if mallocs were successful */
if ( ( *o_row_idx == NULL ) ||
( *o_column_idx == NULL ) ||
( *o_values == NULL ) ||
( l_row_idx_id == NULL ) ) {
fprintf( stderr, "could not allocate sp data!\n" );
return;
}
/* set everything to zero for init */
memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1));
memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count));
memset(*o_values, 0, sizeof(double)*(*o_element_count));
memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count));
/* init column idx */
for ( l_i = 0; l_i < (*o_row_count + 1); l_i++)
(*o_row_idx)[l_i] = (*o_element_count);
/* init */
(*o_row_idx)[0] = 0;
l_i = 0;
l_header_read = 1;
} else {
fprintf( stderr, "could not csr description!\n" );
return;
}
/* now we read the actual content */
} else {
unsigned int l_row, l_column;
double l_value;
/* read a line of content */
if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return;
}
/* adjust numbers to zero termination */
l_row--;
l_column--;
/* add these values to row and value structure */
(*o_column_idx)[l_i] = l_column;
(*o_values)[l_i] = l_value;
l_i++;
/* handle columns, set id to own for this column, yeah we need to handle empty columns */
l_row_idx_id[l_row] = 1;
(*o_row_idx)[l_row+1] = l_i;
}
}
}
/* close mtx file */
fclose( l_csr_file_handle );
/* check if we read a file which was consistent */
if ( l_i != (*o_element_count) ) {
fprintf( stderr, "we were not able to read all elements!\n" );
return;
}
/* let's handle empty rows */
for ( l_i = 0; l_i < (*o_row_count); l_i++) {
if ( l_row_idx_id[l_i] == 0 ) {
(*o_row_idx)[l_i+1] = (*o_row_idx)[l_i];
}
}
/* free helper data structure */
if ( l_row_idx_id != NULL ) {
free( l_row_idx_id );
}
} | 0 | CVE-2018-20541 | 2,332 | benign |
CWE-787 | void edge_sparse_csr_reader_double( const char* i_csr_file_in,
unsigned int** o_row_idx,
unsigned int** o_column_idx,
double** o_values,
unsigned int* o_row_count,
unsigned int* o_column_count,
unsigned int* o_element_count ) {
FILE *l_csr_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int* l_row_idx_id = NULL;
unsigned int l_i = 0;
l_csr_file_handle = fopen( i_csr_file_in, "r" );
if ( l_csr_file_handle == NULL ) {
fprintf( stderr, "cannot open CSR file!\n" );
return;
}
while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) &&
0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count)
{
/* allocate CSC datastructure matching mtx file */
*o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count));
*o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1));
*o_values = (double*) malloc(sizeof(double) * (*o_element_count));
l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count));
/* check if mallocs were successful */
if ( ( *o_row_idx == NULL ) ||
( *o_column_idx == NULL ) ||
( *o_values == NULL ) ||
( l_row_idx_id == NULL ) ) {
fprintf( stderr, "could not allocate sp data!\n" );
return;
}
/* set everything to zero for init */
memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1));
memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count));
memset(*o_values, 0, sizeof(double)*(*o_element_count));
memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count));
/* init column idx */
for ( l_i = 0; l_i < (*o_row_count + 1); l_i++)
(*o_row_idx)[l_i] = (*o_element_count);
/* init */
(*o_row_idx)[0] = 0;
l_i = 0;
l_header_read = 1;
} else {
fprintf( stderr, "could not csr description!\n" );
return;
}
/* now we read the actual content */
} else {
unsigned int l_row, l_column;
double l_value;
/* read a line of content */
if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return;
}
/* adjust numbers to zero termination */
l_row--;
l_column--;
/* add these values to row and value structure */
(*o_column_idx)[l_i] = l_column;
(*o_values)[l_i] = l_value;
l_i++;
/* handle columns, set id to own for this column, yeah we need to handle empty columns */
l_row_idx_id[l_row] = 1;
(*o_row_idx)[l_row+1] = l_i;
}
}
}
/* close mtx file */
fclose( l_csr_file_handle );
/* check if we read a file which was consistent */
if ( l_i != (*o_element_count) ) {
fprintf( stderr, "we were not able to read all elements!\n" );
return;
}
/* let's handle empty rows */
for ( l_i = 0; l_i < (*o_row_count); l_i++) {
if ( l_row_idx_id[l_i] == 0 ) {
(*o_row_idx)[l_i+1] = (*o_row_idx)[l_i];
}
}
/* free helper data structure */
if ( l_row_idx_id != NULL ) {
free( l_row_idx_id );
}
} | 1 | CVE-2018-20541 | 2,332 | vulnerable |
CWE-476 | static void naludmx_queue_param_set(GF_NALUDmxCtx *ctx, char *data, u32 size, u32 ps_type, s32 ps_id)
{
GF_List *list = NULL, *alt_list = NULL;
GF_NALUFFParam *sl;
u32 i, count;
u32 crc = gf_crc_32(data, size);
if (ctx->codecid==GF_CODECID_HEVC) {
switch (ps_type) {
case GF_HEVC_NALU_VID_PARAM:
if (!ctx->vps) ctx->vps = gf_list_new();
list = ctx->vps;
break;
case GF_HEVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_HEVC_NALU_PIC_PARAM:
list = ctx->pps;
break;
default:
assert(0);
return;
}
} else if (ctx->codecid==GF_CODECID_VVC) {
switch (ps_type) {
case GF_VVC_NALU_VID_PARAM:
if (!ctx->vps) ctx->vps = gf_list_new();
list = ctx->vps;
break;
case GF_VVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_VVC_NALU_PIC_PARAM:
list = ctx->pps;
break;
case GF_VVC_NALU_DEC_PARAM:
if (!ctx->vvc_dci) ctx->vvc_dci = gf_list_new();
list = ctx->vvc_dci;
break;
case GF_VVC_NALU_APS_PREFIX:
if (!ctx->vvc_aps_pre) ctx->vvc_aps_pre = gf_list_new();
list = ctx->vvc_aps_pre;
break;
default:
assert(0);
return;
}
} else {
switch (ps_type) {
case GF_AVC_NALU_SVC_SUBSEQ_PARAM:
case GF_AVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_AVC_NALU_PIC_PARAM:
list = ctx->pps;
alt_list = ctx->pps_svc;
break;
case GF_AVC_NALU_SEQ_PARAM_EXT:
if (!ctx->sps_ext) ctx->sps_ext = gf_list_new();
list = ctx->sps_ext;
break;
default:
assert(0);
return;
}
}
sl = NULL;
count = gf_list_count(list);
for (i=0; i<count; i++) {
sl = gf_list_get(list, i);
if (sl->id != ps_id) {
sl = NULL;
continue;
}
//same ID, same CRC, we don't change our state
if (sl->crc == crc) return;
break;
}
//handle alt PPS list for SVC
if (!sl && alt_list) {
count = gf_list_count(alt_list);
for (i=0; i<count; i++) {
sl = gf_list_get(alt_list, i);
if (sl->id != ps_id) {
sl = NULL;
continue;
}
//same ID, same CRC, we don't change our state
if (sl->crc == crc) return;
break;
}
}
if (sl) {
//otherwise we keep this new param set
sl->data = gf_realloc(sl->data, size);
memcpy(sl->data, data, size);
sl->size = size;
sl->crc = crc;
ctx->ps_modified = GF_TRUE;
return;
}
//TODO we might want to purge the list after a while !!
GF_SAFEALLOC(sl, GF_NALUFFParam);
if (!sl) return;
sl->data = gf_malloc(sizeof(char) * size);
if (!sl->data) {
gf_free(sl);
return;
}
memcpy(sl->data, data, size);
sl->size = size;
sl->id = ps_id;
sl->crc = crc;
ctx->ps_modified = GF_TRUE;
gf_list_add(list, sl);
} | 0 | CVE-2021-40563 | 2,901 | benign |
CWE-476 | static void naludmx_queue_param_set(GF_NALUDmxCtx *ctx, char *data, u32 size, u32 ps_type, s32 ps_id)
{
GF_List *list = NULL, *alt_list = NULL;
GF_NALUFFParam *sl;
u32 i, count, crc;
if (!size) return;
crc = gf_crc_32(data, size);
if (ctx->codecid==GF_CODECID_HEVC) {
switch (ps_type) {
case GF_HEVC_NALU_VID_PARAM:
if (!ctx->vps) ctx->vps = gf_list_new();
list = ctx->vps;
break;
case GF_HEVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_HEVC_NALU_PIC_PARAM:
list = ctx->pps;
break;
default:
assert(0);
return;
}
} else if (ctx->codecid==GF_CODECID_VVC) {
switch (ps_type) {
case GF_VVC_NALU_VID_PARAM:
if (!ctx->vps) ctx->vps = gf_list_new();
list = ctx->vps;
break;
case GF_VVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_VVC_NALU_PIC_PARAM:
list = ctx->pps;
break;
case GF_VVC_NALU_DEC_PARAM:
if (!ctx->vvc_dci) ctx->vvc_dci = gf_list_new();
list = ctx->vvc_dci;
break;
case GF_VVC_NALU_APS_PREFIX:
if (!ctx->vvc_aps_pre) ctx->vvc_aps_pre = gf_list_new();
list = ctx->vvc_aps_pre;
break;
default:
assert(0);
return;
}
} else {
switch (ps_type) {
case GF_AVC_NALU_SVC_SUBSEQ_PARAM:
case GF_AVC_NALU_SEQ_PARAM:
list = ctx->sps;
break;
case GF_AVC_NALU_PIC_PARAM:
list = ctx->pps;
alt_list = ctx->pps_svc;
break;
case GF_AVC_NALU_SEQ_PARAM_EXT:
if (!ctx->sps_ext) ctx->sps_ext = gf_list_new();
list = ctx->sps_ext;
break;
default:
assert(0);
return;
}
}
sl = NULL;
count = gf_list_count(list);
for (i=0; i<count; i++) {
sl = gf_list_get(list, i);
if (sl->id != ps_id) {
sl = NULL;
continue;
}
//same ID, same CRC, we don't change our state
if (sl->crc == crc) return;
break;
}
//handle alt PPS list for SVC
if (!sl && alt_list) {
count = gf_list_count(alt_list);
for (i=0; i<count; i++) {
sl = gf_list_get(alt_list, i);
if (sl->id != ps_id) {
sl = NULL;
continue;
}
//same ID, same CRC, we don't change our state
if (sl->crc == crc) return;
break;
}
}
if (sl) {
//otherwise we keep this new param set
sl->data = gf_realloc(sl->data, size);
memcpy(sl->data, data, size);
sl->size = size;
sl->crc = crc;
ctx->ps_modified = GF_TRUE;
return;
}
//TODO we might want to purge the list after a while !!
GF_SAFEALLOC(sl, GF_NALUFFParam);
if (!sl) return;
sl->data = gf_malloc(sizeof(char) * size);
if (!sl->data) {
gf_free(sl);
return;
}
memcpy(sl->data, data, size);
sl->size = size;
sl->id = ps_id;
sl->crc = crc;
ctx->ps_modified = GF_TRUE;
gf_list_add(list, sl);
} | 1 | CVE-2021-40563 | 2,901 | vulnerable |
CWE-362 | int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
unsigned long opt),
unsigned long opt, u32 timeout, u8 *hci_status)
{
int ret;
if (!test_bit(HCI_UP, &hdev->flags))
return -ENETDOWN;
/* Serialize all requests */
hci_req_sync_lock(hdev);
ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
hci_req_sync_unlock(hdev);
return ret;
} | 0 | CVE-2021-32399 | 888 | benign |
CWE-362 | int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
unsigned long opt),
unsigned long opt, u32 timeout, u8 *hci_status)
{
int ret;
/* Serialize all requests */
hci_req_sync_lock(hdev);
/* check the state after obtaing the lock to protect the HCI_UP
* against any races from hci_dev_do_close when the controller
* gets removed.
*/
if (test_bit(HCI_UP, &hdev->flags))
ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
else
ret = -ENETDOWN;
hci_req_sync_unlock(hdev);
return ret;
} | 1 | CVE-2021-32399 | 888 | vulnerable |
CWE-20 | parse_netscreen_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer* buf,
char *line, int *err, gchar **err_info)
{
int sec;
int dsec;
char cap_int[NETSCREEN_MAX_INT_NAME_LENGTH];
char direction[2];
guint pkt_len;
char cap_src[13];
char cap_dst[13];
guint8 *pd;
gchar *p;
int n, i = 0;
guint offset = 0;
gchar dststr[13];
phdr->rec_type = REC_TYPE_PACKET;
phdr->presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN;
if (sscanf(line, "%9d.%9d: %15[a-z0-9/:.-](%1[io]) len=%9u:%12s->%12s/",
&sec, &dsec, cap_int, direction, &pkt_len, cap_src, cap_dst) < 5) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("netscreen: Can't parse packet-header");
return -1;
}
if (pkt_len > WTAP_MAX_PACKET_SIZE) {
/*
* Probably a corrupt capture file; don't blow up trying
* to allocate space for an immensely-large packet.
*/
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup_printf("netscreen: File has %u-byte packet, bigger than maximum of %u",
pkt_len, WTAP_MAX_PACKET_SIZE);
return FALSE;
}
/*
* If direction[0] is 'o', the direction is NETSCREEN_EGRESS,
* otherwise it's NETSCREEN_INGRESS.
*/
phdr->ts.secs = sec;
phdr->ts.nsecs = dsec * 100000000;
phdr->len = pkt_len;
/* Make sure we have enough room for the packet */
ws_buffer_assure_space(buf, pkt_len);
pd = ws_buffer_start_ptr(buf);
while(1) {
/* The last packet is not delimited by an empty line, but by EOF
* So accept EOF as a valid delimiter too
*/
if (file_gets(line, NETSCREEN_LINE_LENGTH, fh) == NULL) {
break;
}
/*
* Skip blanks.
* The number of blanks is not fixed - for wireless
* interfaces, there may be 14 extra spaces before
* the hex data.
*/
for (p = &line[0]; g_ascii_isspace(*p); p++)
;
/* packets are delimited with empty lines */
if (*p == '\0') {
break;
}
n = parse_single_hex_dump_line(p, pd, offset);
/* the smallest packet has a length of 6 bytes, if
* the first hex-data is less then check whether
* it is a info-line and act accordingly
*/
if (offset == 0 && n < 6) {
if (info_line(line)) {
if (++i <= NETSCREEN_MAX_INFOLINES) {
continue;
}
} else {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("netscreen: cannot parse hex-data");
return FALSE;
}
}
/* If there is no more data and the line was not empty,
* then there must be an error in the file
*/
if (n == -1) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("netscreen: cannot parse hex-data");
return FALSE;
}
/* Adjust the offset to the data that was just added to the buffer */
offset += n;
/* If there was more hex-data than was announced in the len=x
* header, then then there must be an error in the file
*/
if (offset > pkt_len) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("netscreen: too much hex-data");
return FALSE;
}
}
/*
* Determine the encapsulation type, based on the
* first 4 characters of the interface name
*
* XXX convert this to a 'case' structure when adding more
* (non-ethernet) interfacetypes
*/
if (strncmp(cap_int, "adsl", 4) == 0) {
/* The ADSL interface can be bridged with or without
* PPP encapsulation. Check whether the first six bytes
* of the hex data are the same as the destination mac
* address in the header. If they are, assume ethernet
* LinkLayer or else PPP
*/
g_snprintf(dststr, 13, "%02x%02x%02x%02x%02x%02x",
pd[0], pd[1], pd[2], pd[3], pd[4], pd[5]);
if (strncmp(dststr, cap_dst, 12) == 0)
phdr->pkt_encap = WTAP_ENCAP_ETHERNET;
else
phdr->pkt_encap = WTAP_ENCAP_PPP;
}
else if (strncmp(cap_int, "seri", 4) == 0)
phdr->pkt_encap = WTAP_ENCAP_PPP;
else
phdr->pkt_encap = WTAP_ENCAP_ETHERNET;
phdr->caplen = offset;
return TRUE;
} | 0 | CVE-2016-5357 | 1,710 | benign |
CWE-20 | parse_netscreen_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer* buf,
char *line, int *err, gchar **err_info)
{
int pkt_len;
int sec;
int dsec;
char cap_int[NETSCREEN_MAX_INT_NAME_LENGTH];
char direction[2];
char cap_src[13];
char cap_dst[13];
guint8 *pd;
gchar *p;
int n, i = 0;
int offset = 0;
gchar dststr[13];
phdr->rec_type = REC_TYPE_PACKET;
phdr->presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN;
if (sscanf(line, "%9d.%9d: %15[a-z0-9/:.-](%1[io]) len=%9d:%12s->%12s/",
&sec, &dsec, cap_int, direction, &pkt_len, cap_src, cap_dst) < 5) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("netscreen: Can't parse packet-header");
return -1;
}
if (pkt_len < 0) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("netscreen: packet header has a negative packet length");
return FALSE;
}
if (pkt_len > WTAP_MAX_PACKET_SIZE) {
/*
* Probably a corrupt capture file; don't blow up trying
* to allocate space for an immensely-large packet.
*/
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup_printf("netscreen: File has %u-byte packet, bigger than maximum of %u",
pkt_len, WTAP_MAX_PACKET_SIZE);
return FALSE;
}
/*
* If direction[0] is 'o', the direction is NETSCREEN_EGRESS,
* otherwise it's NETSCREEN_INGRESS.
*/
phdr->ts.secs = sec;
phdr->ts.nsecs = dsec * 100000000;
phdr->len = pkt_len;
/* Make sure we have enough room for the packet */
ws_buffer_assure_space(buf, pkt_len);
pd = ws_buffer_start_ptr(buf);
while(1) {
/* The last packet is not delimited by an empty line, but by EOF
* So accept EOF as a valid delimiter too
*/
if (file_gets(line, NETSCREEN_LINE_LENGTH, fh) == NULL) {
break;
}
/*
* Skip blanks.
* The number of blanks is not fixed - for wireless
* interfaces, there may be 14 extra spaces before
* the hex data.
*/
for (p = &line[0]; g_ascii_isspace(*p); p++)
;
/* packets are delimited with empty lines */
if (*p == '\0') {
break;
}
n = parse_single_hex_dump_line(p, pd, offset);
/* the smallest packet has a length of 6 bytes, if
* the first hex-data is less then check whether
* it is a info-line and act accordingly
*/
if (offset == 0 && n < 6) {
if (info_line(line)) {
if (++i <= NETSCREEN_MAX_INFOLINES) {
continue;
}
} else {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("netscreen: cannot parse hex-data");
return FALSE;
}
}
/* If there is no more data and the line was not empty,
* then there must be an error in the file
*/
if (n == -1) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("netscreen: cannot parse hex-data");
return FALSE;
}
/* Adjust the offset to the data that was just added to the buffer */
offset += n;
/* If there was more hex-data than was announced in the len=x
* header, then then there must be an error in the file
*/
if (offset > pkt_len) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("netscreen: too much hex-data");
return FALSE;
}
}
/*
* Determine the encapsulation type, based on the
* first 4 characters of the interface name
*
* XXX convert this to a 'case' structure when adding more
* (non-ethernet) interfacetypes
*/
if (strncmp(cap_int, "adsl", 4) == 0) {
/* The ADSL interface can be bridged with or without
* PPP encapsulation. Check whether the first six bytes
* of the hex data are the same as the destination mac
* address in the header. If they are, assume ethernet
* LinkLayer or else PPP
*/
g_snprintf(dststr, 13, "%02x%02x%02x%02x%02x%02x",
pd[0], pd[1], pd[2], pd[3], pd[4], pd[5]);
if (strncmp(dststr, cap_dst, 12) == 0)
phdr->pkt_encap = WTAP_ENCAP_ETHERNET;
else
phdr->pkt_encap = WTAP_ENCAP_PPP;
}
else if (strncmp(cap_int, "seri", 4) == 0)
phdr->pkt_encap = WTAP_ENCAP_PPP;
else
phdr->pkt_encap = WTAP_ENCAP_ETHERNET;
phdr->caplen = offset;
return TRUE;
} | 1 | CVE-2016-5357 | 1,710 | vulnerable |
CWE-416 | static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
err = sock_error(sk);
if (err)
return err;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, msg, len);
else
err = -ENOTCONN;
release_sock(sk);
return err;
} | 0 | CVE-2021-3640 | 1,001 | benign |
CWE-416 | static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
void *buf;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
err = sock_error(sk);
if (err)
return err;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (memcpy_from_msg(buf, msg, len)) {
kfree(buf);
return -EFAULT;
}
lock_sock(sk);
if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, buf, len, msg->msg_flags);
else
err = -ENOTCONN;
release_sock(sk);
kfree(buf);
return err;
} | 1 | CVE-2021-3640 | 1,001 | vulnerable |
CWE-787 | std::string encodeBase64(const std::string& input) {
using namespace boost::archive::iterators;
using b64it = base64_from_binary<transform_width<const char*, 6, 8>>;
auto data = input.data();
std::string encoded(b64it(data), b64it(data + (input.length())));
encoded.append((3 - (input.length() % 3)) % 3, '=');
return encoded;
} | 0 | CVE-2019-11921 | 55 | benign |
CWE-787 | std::string encodeBase64(const std::string& input) {
return Base64::encode(folly::ByteRange(
reinterpret_cast<const uint8_t*>(input.c_str()),
input.length()));
} | 1 | CVE-2019-11921 | 55 | vulnerable |
CWE-416 | static int snd_ctl_elem_add(struct snd_ctl_file *file,
struct snd_ctl_elem_info *info, int replace)
{
struct snd_card *card = file->card;
struct snd_kcontrol kctl, *_kctl;
unsigned int access;
long private_size;
struct user_element *ue;
int idx, err;
if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
return -ENOMEM;
if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
info->id.numid = 0;
memset(&kctl, 0, sizeof(kctl));
down_write(&card->controls_rwsem);
_kctl = snd_ctl_find_id(card, &info->id);
err = 0;
if (_kctl) {
if (replace)
err = snd_ctl_remove(card, _kctl);
else
err = -EBUSY;
} else {
if (replace)
err = -ENOENT;
}
up_write(&card->controls_rwsem);
if (err < 0)
return err;
memcpy(&kctl.id, &info->id, sizeof(info->id));
kctl.count = info->owner ? info->owner : 1;
access |= SNDRV_CTL_ELEM_ACCESS_USER;
if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED)
kctl.info = snd_ctl_elem_user_enum_info;
else
kctl.info = snd_ctl_elem_user_info;
if (access & SNDRV_CTL_ELEM_ACCESS_READ)
kctl.get = snd_ctl_elem_user_get;
if (access & SNDRV_CTL_ELEM_ACCESS_WRITE)
kctl.put = snd_ctl_elem_user_put;
if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) {
kctl.tlv.c = snd_ctl_elem_user_tlv;
access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
switch (info->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
case SNDRV_CTL_ELEM_TYPE_INTEGER:
private_size = sizeof(long);
if (info->count > 128)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
private_size = sizeof(long long);
if (info->count > 64)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
private_size = sizeof(unsigned int);
if (info->count > 128 || info->value.enumerated.items == 0)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_BYTES:
private_size = sizeof(unsigned char);
if (info->count > 512)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_IEC958:
private_size = sizeof(struct snd_aes_iec958);
if (info->count != 1)
return -EINVAL;
break;
default:
return -EINVAL;
}
private_size *= info->count;
ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
if (ue == NULL)
return -ENOMEM;
ue->card = card;
ue->info = *info;
ue->info.access = 0;
ue->elem_data = (char *)ue + sizeof(*ue);
ue->elem_data_size = private_size;
if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) {
err = snd_ctl_elem_init_enum_names(ue);
if (err < 0) {
kfree(ue);
return err;
}
}
kctl.private_free = snd_ctl_elem_user_free;
_kctl = snd_ctl_new(&kctl, access);
if (_kctl == NULL) {
kfree(ue->priv_data);
kfree(ue);
return -ENOMEM;
}
_kctl->private_data = ue;
for (idx = 0; idx < _kctl->count; idx++)
_kctl->vd[idx].owner = file;
err = snd_ctl_add(card, _kctl);
if (err < 0)
return err;
down_write(&card->controls_rwsem);
card->user_ctl_count++;
up_write(&card->controls_rwsem);
return 0;
} | 0 | CVE-2014-4654 | 1,558 | benign |
CWE-416 | static int snd_ctl_elem_add(struct snd_ctl_file *file,
struct snd_ctl_elem_info *info, int replace)
{
struct snd_card *card = file->card;
struct snd_kcontrol kctl, *_kctl;
unsigned int access;
long private_size;
struct user_element *ue;
int idx, err;
if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
info->id.numid = 0;
memset(&kctl, 0, sizeof(kctl));
if (replace) {
err = snd_ctl_remove_user_ctl(file, &info->id);
if (err)
return err;
}
if (card->user_ctl_count >= MAX_USER_CONTROLS)
return -ENOMEM;
memcpy(&kctl.id, &info->id, sizeof(info->id));
kctl.count = info->owner ? info->owner : 1;
access |= SNDRV_CTL_ELEM_ACCESS_USER;
if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED)
kctl.info = snd_ctl_elem_user_enum_info;
else
kctl.info = snd_ctl_elem_user_info;
if (access & SNDRV_CTL_ELEM_ACCESS_READ)
kctl.get = snd_ctl_elem_user_get;
if (access & SNDRV_CTL_ELEM_ACCESS_WRITE)
kctl.put = snd_ctl_elem_user_put;
if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) {
kctl.tlv.c = snd_ctl_elem_user_tlv;
access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
switch (info->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
case SNDRV_CTL_ELEM_TYPE_INTEGER:
private_size = sizeof(long);
if (info->count > 128)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
private_size = sizeof(long long);
if (info->count > 64)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
private_size = sizeof(unsigned int);
if (info->count > 128 || info->value.enumerated.items == 0)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_BYTES:
private_size = sizeof(unsigned char);
if (info->count > 512)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_IEC958:
private_size = sizeof(struct snd_aes_iec958);
if (info->count != 1)
return -EINVAL;
break;
default:
return -EINVAL;
}
private_size *= info->count;
ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
if (ue == NULL)
return -ENOMEM;
ue->card = card;
ue->info = *info;
ue->info.access = 0;
ue->elem_data = (char *)ue + sizeof(*ue);
ue->elem_data_size = private_size;
if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) {
err = snd_ctl_elem_init_enum_names(ue);
if (err < 0) {
kfree(ue);
return err;
}
}
kctl.private_free = snd_ctl_elem_user_free;
_kctl = snd_ctl_new(&kctl, access);
if (_kctl == NULL) {
kfree(ue->priv_data);
kfree(ue);
return -ENOMEM;
}
_kctl->private_data = ue;
for (idx = 0; idx < _kctl->count; idx++)
_kctl->vd[idx].owner = file;
err = snd_ctl_add(card, _kctl);
if (err < 0)
return err;
down_write(&card->controls_rwsem);
card->user_ctl_count++;
up_write(&card->controls_rwsem);
return 0;
} | 1 | CVE-2014-4654 | 1,558 | vulnerable |
CWE-20 | void enc28j60SelectBank(NetInterface *interface, uint16_t address)
{
uint16_t bank;
Enc28j60Context *context;
//Point to the driver context
context = (Enc28j60Context *) interface->nicContext;
//Get the bank number from the specified address
bank = address & REG_BANK_MASK;
//Rewrite the bank number only if a change is detected
if(bank != context->currentBank)
{
//Select specified bank
switch(bank)
{
case BANK_0:
//Select bank 0
enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1 | ECON1_BSEL0);
break;
case BANK_1:
//Select bank 1
enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL0);
enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1);
break;
case BANK_2:
//Select bank 2
enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL0);
enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1);
break;
case BANK_3:
//Select bank 3
enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1 | ECON1_BSEL0);
break;
default:
//Invalid bank
break;
}
//Save bank number
context->currentBank = bank;
}
} | 0 | CVE-2021-26788 | 1,956 | benign |
CWE-20 | void enc28j60SelectBank(NetInterface *interface, uint16_t address)
{
uint16_t bank;
Enc28j60Context *context;
//Point to the driver context
context = (Enc28j60Context *) interface->nicContext;
//Get the bank number from the specified address
bank = address & REG_BANK_MASK;
//Rewrite the bank number only if a change is detected
if(bank != context->currentBank)
{
//Select the relevant bank
if(bank == BANK_0)
{
//Select bank 0
enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1 |
ENC28J60_ECON1_BSEL0);
}
else if(bank == BANK_1)
{
//Select bank 1
enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL0);
enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1);
}
else if(bank == BANK_2)
{
//Select bank 2
enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL0);
enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1);
}
else
{
//Select bank 3
enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1 |
ENC28J60_ECON1_BSEL0);
}
//Save bank number
context->currentBank = bank;
}
} | 1 | CVE-2021-26788 | 1,956 | vulnerable |
CWE-119 | static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
const uint8_t *src, int src_size)
{
int width, height;
int hdr, zsize, npal, tidx = -1, ret;
int i, j;
const uint8_t *src_end = src + src_size;
uint8_t pal[768], transp[3];
uLongf dlen = (c->tile_width + 1) * c->tile_height;
int sub_type;
int nblocks, cblocks, bstride;
int bits, bitbuf, coded;
uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
tile_y * c->tile_height * c->framebuf_stride;
if (src_size < 2)
return AVERROR_INVALIDDATA;
width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
hdr = *src++;
sub_type = hdr >> 5;
if (sub_type == 0) {
int j;
memcpy(transp, src, 3);
src += 3;
for (j = 0; j < height; j++, dst += c->framebuf_stride)
for (i = 0; i < width; i++)
memcpy(dst + i * 3, transp, 3);
return 0;
} else if (sub_type == 1) {
return jpg_decode_data(&c->jc, width, height, src, src_end - src,
dst, c->framebuf_stride, NULL, 0, 0, 0);
}
if (sub_type != 2) {
memcpy(transp, src, 3);
src += 3;
}
npal = *src++ + 1;
memcpy(pal, src, npal * 3); src += npal * 3;
if (sub_type != 2) {
for (i = 0; i < npal; i++) {
if (!memcmp(pal + i * 3, transp, 3)) {
tidx = i;
break;
}
}
}
if (src_end - src < 2)
return 0;
zsize = (src[0] << 8) | src[1]; src += 2;
if (src_end - src < zsize)
return AVERROR_INVALIDDATA;
ret = uncompress(c->kempf_buf, &dlen, src, zsize);
if (ret)
return AVERROR_INVALIDDATA;
src += zsize;
if (sub_type == 2) {
kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
NULL, 0, width, height, pal, npal, tidx);
return 0;
}
nblocks = *src++ + 1;
cblocks = 0;
bstride = FFALIGN(width, 16) >> 4;
// blocks are coded LSB and we need normal bitreader for JPEG data
bits = 0;
for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
if (!bits) {
bitbuf = *src++;
bits = 8;
}
coded = bitbuf & 1;
bits--;
bitbuf >>= 1;
cblocks += coded;
if (cblocks > nblocks)
return AVERROR_INVALIDDATA;
c->kempf_flags[j + i * bstride] = coded;
}
}
memset(c->jpeg_tile, 0, c->tile_stride * height);
jpg_decode_data(&c->jc, width, height, src, src_end - src,
c->jpeg_tile, c->tile_stride,
c->kempf_flags, bstride, nblocks, 0);
kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
c->jpeg_tile, c->tile_stride,
width, height, pal, npal, tidx);
return 0;
} | 0 | CVE-2013-4264 | 3,273 | benign |
CWE-119 | static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
const uint8_t *src, int src_size)
{
int width, height;
int hdr, zsize, npal, tidx = -1, ret;
int i, j;
const uint8_t *src_end = src + src_size;
uint8_t pal[768], transp[3];
uLongf dlen = (c->tile_width + 1) * c->tile_height;
int sub_type;
int nblocks, cblocks, bstride;
int bits, bitbuf, coded;
uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
tile_y * c->tile_height * c->framebuf_stride;
if (src_size < 2)
return AVERROR_INVALIDDATA;
width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
hdr = *src++;
sub_type = hdr >> 5;
if (sub_type == 0) {
int j;
memcpy(transp, src, 3);
src += 3;
for (j = 0; j < height; j++, dst += c->framebuf_stride)
for (i = 0; i < width; i++)
memcpy(dst + i * 3, transp, 3);
return 0;
} else if (sub_type == 1) {
return jpg_decode_data(&c->jc, width, height, src, src_end - src,
dst, c->framebuf_stride, NULL, 0, 0, 0);
}
if (sub_type != 2) {
memcpy(transp, src, 3);
src += 3;
}
npal = *src++ + 1;
memcpy(pal, src, npal * 3); src += npal * 3;
if (sub_type != 2) {
for (i = 0; i < npal; i++) {
if (!memcmp(pal + i * 3, transp, 3)) {
tidx = i;
break;
}
}
}
if (src_end - src < 2)
return 0;
zsize = (src[0] << 8) | src[1]; src += 2;
if (src_end - src < zsize + (sub_type != 2))
return AVERROR_INVALIDDATA;
ret = uncompress(c->kempf_buf, &dlen, src, zsize);
if (ret)
return AVERROR_INVALIDDATA;
src += zsize;
if (sub_type == 2) {
kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
NULL, 0, width, height, pal, npal, tidx);
return 0;
}
nblocks = *src++ + 1;
cblocks = 0;
bstride = FFALIGN(width, 16) >> 4;
// blocks are coded LSB and we need normal bitreader for JPEG data
bits = 0;
for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
if (!bits) {
if (src >= src_end)
return AVERROR_INVALIDDATA;
bitbuf = *src++;
bits = 8;
}
coded = bitbuf & 1;
bits--;
bitbuf >>= 1;
cblocks += coded;
if (cblocks > nblocks)
return AVERROR_INVALIDDATA;
c->kempf_flags[j + i * bstride] = coded;
}
}
memset(c->jpeg_tile, 0, c->tile_stride * height);
jpg_decode_data(&c->jc, width, height, src, src_end - src,
c->jpeg_tile, c->tile_stride,
c->kempf_flags, bstride, nblocks, 0);
kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
c->jpeg_tile, c->tile_stride,
width, height, pal, npal, tidx);
return 0;
} | 1 | CVE-2013-4264 | 3,273 | vulnerable |
CWE-119 | static void dma_rx(struct b43_dmaring *ring, int *slot)
{
const struct b43_dma_ops *ops = ring->ops;
struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
struct b43_rxhdr_fw4 *rxhdr;
struct sk_buff *skb;
u16 len;
int err;
dma_addr_t dmaaddr;
desc = ops->idx2desc(ring, *slot, &meta);
sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
skb = meta->skb;
rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
len = le16_to_cpu(rxhdr->frame_len);
if (len == 0) {
int i = 0;
do {
udelay(2);
barrier();
len = le16_to_cpu(rxhdr->frame_len);
} while (len == 0 && i++ < 5);
if (unlikely(len == 0)) {
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
}
if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
/* Something went wrong with the DMA.
* The device did not touch the buffer and did not overwrite the poison. */
b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
if (unlikely(len > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer
* and is split over multiple buffers.
* This should never happen, as we try to allocate buffers
* big enough. So simply ignore this packet.
*/
int cnt = 0;
s32 tmp = len;
while (1) {
desc = ops->idx2desc(ring, *slot, &meta);
/* recycle the descriptor buffer. */
b43_poison_rx_buffer(ring, meta->skb);
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
*slot = next_slot(ring, *slot);
cnt++;
tmp -= ring->rx_buffersize;
if (tmp <= 0)
break;
}
b43err(ring->dev->wl, "DMA RX buffer too small "
"(len: %u, buffer: %u, nr-dropped: %d)\n",
len, ring->rx_buffersize, cnt);
goto drop;
}
dmaaddr = meta->dmaaddr;
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
if (unlikely(err)) {
b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
goto drop_recycle_buffer;
}
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
skb_put(skb, len + ring->frameoffset);
skb_pull(skb, ring->frameoffset);
b43_rx(ring->dev, skb, rxhdr);
drop:
return;
drop_recycle_buffer:
/* Poison and recycle the RX buffer. */
b43_poison_rx_buffer(ring, skb);
sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
} | 0 | CVE-2011-3359 | 2,057 | benign |
CWE-119 | static void dma_rx(struct b43_dmaring *ring, int *slot)
{
const struct b43_dma_ops *ops = ring->ops;
struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
struct b43_rxhdr_fw4 *rxhdr;
struct sk_buff *skb;
u16 len;
int err;
dma_addr_t dmaaddr;
desc = ops->idx2desc(ring, *slot, &meta);
sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
skb = meta->skb;
rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
len = le16_to_cpu(rxhdr->frame_len);
if (len == 0) {
int i = 0;
do {
udelay(2);
barrier();
len = le16_to_cpu(rxhdr->frame_len);
} while (len == 0 && i++ < 5);
if (unlikely(len == 0)) {
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
}
if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
/* Something went wrong with the DMA.
* The device did not touch the buffer and did not overwrite the poison. */
b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer
* and is split over multiple buffers.
* This should never happen, as we try to allocate buffers
* big enough. So simply ignore this packet.
*/
int cnt = 0;
s32 tmp = len;
while (1) {
desc = ops->idx2desc(ring, *slot, &meta);
/* recycle the descriptor buffer. */
b43_poison_rx_buffer(ring, meta->skb);
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
*slot = next_slot(ring, *slot);
cnt++;
tmp -= ring->rx_buffersize;
if (tmp <= 0)
break;
}
b43err(ring->dev->wl, "DMA RX buffer too small "
"(len: %u, buffer: %u, nr-dropped: %d)\n",
len, ring->rx_buffersize, cnt);
goto drop;
}
dmaaddr = meta->dmaaddr;
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
if (unlikely(err)) {
b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
goto drop_recycle_buffer;
}
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
skb_put(skb, len + ring->frameoffset);
skb_pull(skb, ring->frameoffset);
b43_rx(ring->dev, skb, rxhdr);
drop:
return;
drop_recycle_buffer:
/* Poison and recycle the RX buffer. */
b43_poison_rx_buffer(ring, skb);
sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
} | 1 | CVE-2011-3359 | 2,057 | vulnerable |
CWE-119 | static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) {
const char *section_name = "";
const char *link_section_name = "";
char *end = NULL;
Elf_(Shdr) *link_shdr = NULL;
ut8 dfs[sizeof (Elf_(Verdef))] = {0};
Sdb *sdb;
int cnt, i;
if (shdr->sh_link > bin->ehdr.e_shnum) {
return false;
}
link_shdr = &bin->shdr[shdr->sh_link];
if (shdr->sh_size < 1) {
return false;
}
Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char));
if (!defs) {
return false;
}
if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) {
section_name = &bin->shstrtab[shdr->sh_name];
}
if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) {
link_section_name = &bin->shstrtab[link_shdr->sh_name];
}
if (!defs) {
bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n");
return NULL;
}
sdb = sdb_new0 ();
end = (char *)defs + shdr->sh_size;
sdb_set (sdb, "section_name", section_name, 0);
sdb_num_set (sdb, "entries", shdr->sh_info, 0);
sdb_num_set (sdb, "addr", shdr->sh_addr, 0);
sdb_num_set (sdb, "offset", shdr->sh_offset, 0);
sdb_num_set (sdb, "link", shdr->sh_link, 0);
sdb_set (sdb, "link_section_name", link_section_name, 0);
for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) {
Sdb *sdb_verdef = sdb_new0 ();
char *vstart = ((char*)defs) + i;
char key[32] = {0};
Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart;
Elf_(Verdaux) aux = {0};
int j = 0;
int isum = 0;
r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef)));
verdef->vd_version = READ16 (dfs, j)
verdef->vd_flags = READ16 (dfs, j)
verdef->vd_ndx = READ16 (dfs, j)
verdef->vd_cnt = READ16 (dfs, j)
verdef->vd_hash = READ32 (dfs, j)
verdef->vd_aux = READ32 (dfs, j)
verdef->vd_next = READ32 (dfs, j)
int vdaux = verdef->vd_aux;
if (vdaux < 1) {
sdb_free (sdb_verdef);
goto out_error;
}
vstart += vdaux;
if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
goto out_error;
}
j = 0;
aux.vda_name = READ32 (vstart, j)
aux.vda_next = READ32 (vstart, j)
isum = i + verdef->vd_aux;
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
goto out_error;
}
sdb_num_set (sdb_verdef, "idx", i, 0);
sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0);
sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0);
sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0);
sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0);
sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0);
for (j = 1; j < verdef->vd_cnt; ++j) {
int k;
Sdb *sdb_parent = sdb_new0 ();
isum += aux.vda_next;
vstart += aux.vda_next;
if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
k = 0;
aux.vda_name = READ32 (vstart, k)
aux.vda_next = READ32 (vstart, k)
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
sdb_num_set (sdb_parent, "idx", isum, 0);
sdb_num_set (sdb_parent, "parent", j, 0);
sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0);
snprintf (key, sizeof (key), "parent%d", j - 1);
sdb_ns_set (sdb_verdef, key, sdb_parent);
}
snprintf (key, sizeof (key), "verdef%d", cnt);
sdb_ns_set (sdb, key, sdb_verdef);
if (!verdef->vd_next) {
sdb_free (sdb_verdef);
goto out_error;
}
if ((st32)verdef->vd_next < 1) {
eprintf ("Warning: Invalid vd_next in the ELF version\n");
break;
}
i += verdef->vd_next;
}
free (defs);
return sdb;
out_error:
free (defs);
sdb_free (sdb);
return NULL;
} | 0 | CVE-2017-16357 | 877 | benign |
CWE-119 | static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) {
const char *section_name = "";
const char *link_section_name = "";
char *end = NULL;
Elf_(Shdr) *link_shdr = NULL;
ut8 dfs[sizeof (Elf_(Verdef))] = {0};
Sdb *sdb;
int cnt, i;
if (shdr->sh_link > bin->ehdr.e_shnum) {
return false;
}
link_shdr = &bin->shdr[shdr->sh_link];
if (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) {
return false;
}
Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char));
if (!defs) {
return false;
}
if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) {
section_name = &bin->shstrtab[shdr->sh_name];
}
if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) {
link_section_name = &bin->shstrtab[link_shdr->sh_name];
}
if (!defs) {
bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n");
return NULL;
}
sdb = sdb_new0 ();
end = (char *)defs + shdr->sh_size;
sdb_set (sdb, "section_name", section_name, 0);
sdb_num_set (sdb, "entries", shdr->sh_info, 0);
sdb_num_set (sdb, "addr", shdr->sh_addr, 0);
sdb_num_set (sdb, "offset", shdr->sh_offset, 0);
sdb_num_set (sdb, "link", shdr->sh_link, 0);
sdb_set (sdb, "link_section_name", link_section_name, 0);
for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) {
Sdb *sdb_verdef = sdb_new0 ();
char *vstart = ((char*)defs) + i;
char key[32] = {0};
Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart;
Elf_(Verdaux) aux = {0};
int j = 0;
int isum = 0;
r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef)));
verdef->vd_version = READ16 (dfs, j)
verdef->vd_flags = READ16 (dfs, j)
verdef->vd_ndx = READ16 (dfs, j)
verdef->vd_cnt = READ16 (dfs, j)
verdef->vd_hash = READ32 (dfs, j)
verdef->vd_aux = READ32 (dfs, j)
verdef->vd_next = READ32 (dfs, j)
int vdaux = verdef->vd_aux;
if (vdaux < 1) {
sdb_free (sdb_verdef);
goto out_error;
}
vstart += vdaux;
if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
goto out_error;
}
j = 0;
aux.vda_name = READ32 (vstart, j)
aux.vda_next = READ32 (vstart, j)
isum = i + verdef->vd_aux;
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
goto out_error;
}
sdb_num_set (sdb_verdef, "idx", i, 0);
sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0);
sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0);
sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0);
sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0);
sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0);
for (j = 1; j < verdef->vd_cnt; ++j) {
int k;
Sdb *sdb_parent = sdb_new0 ();
isum += aux.vda_next;
vstart += aux.vda_next;
if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
k = 0;
aux.vda_name = READ32 (vstart, k)
aux.vda_next = READ32 (vstart, k)
if (aux.vda_name > bin->dynstr_size) {
sdb_free (sdb_verdef);
sdb_free (sdb_parent);
goto out_error;
}
sdb_num_set (sdb_parent, "idx", isum, 0);
sdb_num_set (sdb_parent, "parent", j, 0);
sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0);
snprintf (key, sizeof (key), "parent%d", j - 1);
sdb_ns_set (sdb_verdef, key, sdb_parent);
}
snprintf (key, sizeof (key), "verdef%d", cnt);
sdb_ns_set (sdb, key, sdb_verdef);
if (!verdef->vd_next) {
sdb_free (sdb_verdef);
goto out_error;
}
if ((st32)verdef->vd_next < 1) {
eprintf ("Warning: Invalid vd_next in the ELF version\n");
break;
}
i += verdef->vd_next;
}
free (defs);
return sdb;
out_error:
free (defs);
sdb_free (sdb);
return NULL;
} | 1 | CVE-2017-16357 | 877 | vulnerable |
CWE-119 | check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
const char *name)
{
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
int ret, off, h;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
if (e->next_offset < sizeof(struct compat_arpt_entry) +
sizeof(struct compat_xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct arpt_entry *)e);
if (ret)
return ret;
off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
entry_offset = (void *)e - (void *)base;
t = compat_arpt_get_target(e);
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
ret = PTR_ERR(target);
goto out;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
if (ret)
goto release_target;
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h])
newinfo->underflow[h] = underflows[h];
}
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
return 0;
release_target:
module_put(t->u.kernel.target->me);
out:
return ret;
} | 0 | CVE-2016-4998 | 1,668 | benign |
CWE-119 | check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
const char *name)
{
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
int ret, off, h;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
if (e->next_offset < sizeof(struct compat_arpt_entry) +
sizeof(struct compat_xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct arpt_entry *)e);
if (ret)
return ret;
off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
entry_offset = (void *)e - (void *)base;
t = compat_arpt_get_target(e);
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
ret = PTR_ERR(target);
goto out;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
if (ret)
goto release_target;
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h])
newinfo->underflow[h] = underflows[h];
}
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
return 0;
release_target:
module_put(t->u.kernel.target->me);
out:
return ret;
} | 1 | CVE-2016-4998 | 1,668 | vulnerable |
CWE-362 | static struct ip_options *ip_options_get_alloc(const int optlen)
{
return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3),
GFP_KERNEL);
} | 0 | CVE-2012-3552 | 508 | benign |
CWE-362 | static struct ip_options_rcu *ip_options_get_alloc(const int optlen)
{
return kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3),
GFP_KERNEL);
} | 1 | CVE-2012-3552 | 508 | vulnerable |
CWE-787 | MemInStream(const void* data, int len, bool deleteWhenDone_=false)
: start((const U8*)data), deleteWhenDone(deleteWhenDone_)
{
ptr = start;
end = start + len;
} | 0 | CVE-2019-15694 | 2,059 | benign |
CWE-787 | MemInStream(const void* data, size_t len, bool deleteWhenDone_=false)
: start((const U8*)data), deleteWhenDone(deleteWhenDone_)
{
ptr = start;
end = start + len;
} | 1 | CVE-2019-15694 | 2,059 | vulnerable |
CWE-125 | void nego_process_negotiation_request(rdpNego* nego, wStream* s)
{
BYTE flags;
UINT16 length;
Stream_Read_UINT8(s, flags);
Stream_Read_UINT16(s, length);
Stream_Read_UINT32(s, nego->RequestedProtocols);
WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols);
nego->state = NEGO_STATE_FINAL;
} | 0 | CVE-2020-11089 | 1,148 | benign |
CWE-125 | BOOL nego_process_negotiation_request(rdpNego* nego, wStream* s)
{
BYTE flags;
UINT16 length;
if (Stream_GetRemainingLength(s) < 7)
return FALSE;
Stream_Read_UINT8(s, flags);
Stream_Read_UINT16(s, length);
Stream_Read_UINT32(s, nego->RequestedProtocols);
WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols);
nego->state = NEGO_STATE_FINAL;
return TRUE;
} | 1 | CVE-2020-11089 | 1,148 | vulnerable |
CWE-476 | AP4_DataAtom::AP4_DataAtom(AP4_UI32 size, AP4_ByteStream& stream) :
AP4_Atom(AP4_ATOM_TYPE_DATA, size)
{
if (size < AP4_ATOM_HEADER_SIZE+8) return;
AP4_UI32 i;
stream.ReadUI32(i); m_DataType = (DataType)i;
stream.ReadUI32(i); m_DataLang = (DataLang)i;
// the stream for the data is a substream of this source
AP4_Position data_offset;
stream.Tell(data_offset);
AP4_Size data_size = size-AP4_ATOM_HEADER_SIZE-8;
m_Source = new AP4_SubStream(stream, data_offset, data_size);
} | 0 | CVE-2017-14641 | 2,964 | benign |
CWE-476 | AP4_DataAtom::AP4_DataAtom(AP4_UI32 size, AP4_ByteStream& stream) :
AP4_Atom(AP4_ATOM_TYPE_DATA, size),
m_Source(NULL)
{
if (size < AP4_ATOM_HEADER_SIZE+8) return;
AP4_UI32 i;
stream.ReadUI32(i); m_DataType = (DataType)i;
stream.ReadUI32(i); m_DataLang = (DataLang)i;
// the stream for the data is a substream of this source
AP4_Position data_offset;
stream.Tell(data_offset);
AP4_Size data_size = size-AP4_ATOM_HEADER_SIZE-8;
m_Source = new AP4_SubStream(stream, data_offset, data_size);
} | 1 | CVE-2017-14641 | 2,964 | vulnerable |
CWE-125 | static void youngcollection (lua_State *L, global_State *g) {
GCObject **psurvival; /* to point to first non-dead survival object */
lua_assert(g->gcstate == GCSpropagate);
markold(g, g->survival, g->reallyold);
markold(g, g->finobj, g->finobjrold);
atomic(L);
/* sweep nursery and get a pointer to its last live element */
psurvival = sweepgen(L, g, &g->allgc, g->survival);
/* sweep 'survival' and 'old' */
sweepgen(L, g, psurvival, g->reallyold);
g->reallyold = g->old;
g->old = *psurvival; /* 'survival' survivals are old now */
g->survival = g->allgc; /* all news are survivals */
/* repeat for 'finobj' lists */
psurvival = sweepgen(L, g, &g->finobj, g->finobjsur);
/* sweep 'survival' and 'old' */
sweepgen(L, g, psurvival, g->finobjrold);
g->finobjrold = g->finobjold;
g->finobjold = *psurvival; /* 'survival' survivals are old now */
g->finobjsur = g->finobj; /* all news are survivals */
sweepgen(L, g, &g->tobefnz, NULL);
finishgencycle(L, g);
} | 0 | CVE-2020-15889 | 2,233 | benign |
CWE-125 | static void youngcollection (lua_State *L, global_State *g) {
GCObject **psurvival; /* to point to first non-dead survival object */
lua_assert(g->gcstate == GCSpropagate);
markold(g, g->allgc, g->reallyold);
markold(g, g->finobj, g->finobjrold);
atomic(L);
/* sweep nursery and get a pointer to its last live element */
psurvival = sweepgen(L, g, &g->allgc, g->survival);
/* sweep 'survival' and 'old' */
sweepgen(L, g, psurvival, g->reallyold);
g->reallyold = g->old;
g->old = *psurvival; /* 'survival' survivals are old now */
g->survival = g->allgc; /* all news are survivals */
/* repeat for 'finobj' lists */
psurvival = sweepgen(L, g, &g->finobj, g->finobjsur);
/* sweep 'survival' and 'old' */
sweepgen(L, g, psurvival, g->finobjrold);
g->finobjrold = g->finobjold;
g->finobjold = *psurvival; /* 'survival' survivals are old now */
g->finobjsur = g->finobj; /* all news are survivals */
sweepgen(L, g, &g->tobefnz, NULL);
finishgencycle(L, g);
} | 1 | CVE-2020-15889 | 2,233 | vulnerable |
CWE-119 | static pyc_object *get_tuple_object(RBuffer *buffer) {
pyc_object *ret = NULL;
bool error = false;
ut32 n = 0;
n = get_ut32 (buffer, &error);
if (n > ST32_MAX) {
eprintf ("bad marshal data (tuple size out of range)\n");
return NULL;
}
if (error) {
return NULL;
}
ret = get_array_object_generic (buffer, n);
if (ret) {
ret->type = TYPE_TUPLE;
return ret;
}
return NULL;
} | 0 | CVE-2022-0523 | 687 | benign |
CWE-119 | static pyc_object *get_tuple_object(RBuffer *buffer) {
bool error = false;
ut32 n = get_ut32 (buffer, &error);
if (n > ST32_MAX) {
eprintf ("bad marshal data (tuple size out of range)\n");
return NULL;
}
if (error) {
return NULL;
}
pyc_object *ret = get_array_object_generic (buffer, n);
if (ret) {
ret->type = TYPE_TUPLE;
}
return ret;
} | 1 | CVE-2022-0523 | 687 | vulnerable |