code
stringlengths
46
2.03k
label_name
stringclasses
15 values
label
int64
0
14
void ZydisFormatterBufferInitTokenized(ZydisFormatterBuffer* buffer, ZydisFormatterToken** first_token, void* user_buffer, ZyanUSize length) { ZYAN_ASSERT(buffer); ZYAN_ASSERT(first_token); ZYAN_ASSERT(user_buffer); ZYAN_ASSERT(length); *first_token = user_buffer; (*first_token)->type = ZYDIS_TOKEN_INVALID; (*first_token)->next = 0; user_buffer = (ZyanU8*)user_buffer + sizeof(ZydisFormatterToken); length -= sizeof(ZydisFormatterToken); buffer->is_token_list = ZYAN_TRUE; buffer->capacity = length; buffer->string.flags = ZYAN_STRING_HAS_FIXED_CAPACITY; buffer->string.vector.allocator = ZYAN_NULL; buffer->string.vector.element_size = sizeof(char); buffer->string.vector.size = 1; buffer->string.vector.capacity = length; buffer->string.vector.data = user_buffer; *(char*)user_buffer = '\0'; }
CWE-122
8
static int sof_set_get_large_ctrl_data(struct snd_sof_dev *sdev, struct sof_ipc_ctrl_data *cdata, struct sof_ipc_ctrl_data_params *sparams, bool send) { struct sof_ipc_ctrl_data *partdata; size_t send_bytes; size_t offset = 0; size_t msg_bytes; size_t pl_size; int err; int i; /* allocate max ipc size because we have at least one */ partdata = kzalloc(SOF_IPC_MSG_MAX_SIZE, GFP_KERNEL); if (!partdata) return -ENOMEM; if (send) err = sof_get_ctrl_copy_params(cdata->type, cdata, partdata, sparams); else err = sof_get_ctrl_copy_params(cdata->type, partdata, cdata, sparams); if (err < 0) return err; msg_bytes = sparams->msg_bytes; pl_size = sparams->pl_size; /* copy the header data */ memcpy(partdata, cdata, sparams->hdr_bytes); /* Serialise IPC TX */ mutex_lock(&sdev->ipc->tx_mutex); /* copy the payload data in a loop */ for (i = 0; i < sparams->num_msg; i++) { send_bytes = min(msg_bytes, pl_size); partdata->num_elems = send_bytes; partdata->rhdr.hdr.size = sparams->hdr_bytes + send_bytes; partdata->msg_index = i; msg_bytes -= send_bytes; partdata->elems_remaining = msg_bytes; if (send) memcpy(sparams->dst, sparams->src + offset, send_bytes); err = sof_ipc_tx_message_unlocked(sdev->ipc, partdata->rhdr.hdr.cmd, partdata, partdata->rhdr.hdr.size, partdata, partdata->rhdr.hdr.size); if (err < 0) break; if (!send) memcpy(sparams->dst + offset, sparams->src, send_bytes); offset += pl_size; } mutex_unlock(&sdev->ipc->tx_mutex); kfree(partdata); return err; }
CWE-401
7
static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, int id) { struct ion_handle *handle; mutex_lock(&client->lock); handle = idr_find(&client->idr, id); if (handle) ion_handle_get(handle); mutex_unlock(&client->lock); return handle ? handle : ERR_PTR(-EINVAL); }
CWE-416
5
BGD_DECLARE(void) gdImageGifCtx(gdImagePtr im, gdIOCtxPtr out) { gdImagePtr pim = 0, tim = im; int interlace, BitsPerPixel; interlace = im->interlace; if(im->trueColor) { /* Expensive, but the only way that produces an acceptable result: mix down to a palette based temporary image. */ pim = gdImageCreatePaletteFromTrueColor(im, 1, 256); if(!pim) { return; } tim = pim; } BitsPerPixel = colorstobpp(tim->colorsTotal); /* All set, let's do it. */ GIFEncode( out, tim->sx, tim->sy, interlace, 0, tim->transparent, BitsPerPixel, tim->red, tim->green, tim->blue, tim); if(pim) { /* Destroy palette based temporary image. */ gdImageDestroy( pim); } }
CWE-415
4
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
CWE-415
4
void inet6_destroy_sock(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; struct ipv6_txoptions *opt; /* Release rx options */ skb = xchg(&np->pktoptions, NULL); if (skb) kfree_skb(skb); skb = xchg(&np->rxpmtu, NULL); if (skb) kfree_skb(skb); /* Free flowlabels */ fl6_free_socklist(sk); /* Free tx options */ opt = xchg(&np->opt, NULL); if (opt) sock_kfree_s(sk, opt, opt->tot_len); }
CWE-416
5
SPL_METHOD(SplDoublyLinkedList, offsetSet) { zval *zindex, *value; spl_dllist_object *intern; if (zend_parse_parameters(ZEND_NUM_ARGS(), "zz", &zindex, &value) == FAILURE) { return; } intern = Z_SPLDLLIST_P(getThis()); if (Z_TYPE_P(zindex) == IS_NULL) { /* $obj[] = ... */ spl_ptr_llist_push(intern->llist, value); } else { /* $obj[$foo] = ... */ zend_long index; spl_ptr_llist_element *element; index = spl_offset_convert_to_long(zindex); if (index < 0 || index >= intern->llist->count) { zval_ptr_dtor(value); zend_throw_exception(spl_ce_OutOfRangeException, "Offset invalid or out of range", 0); return; } element = spl_ptr_llist_offset(intern->llist, index, intern->flags & SPL_DLLIST_IT_LIFO); if (element != NULL) { /* call dtor on the old element as in spl_ptr_llist_pop */ if (intern->llist->dtor) { intern->llist->dtor(element); } /* the element is replaced, delref the old one as in * SplDoublyLinkedList::pop() */ zval_ptr_dtor(&element->data); ZVAL_COPY_VALUE(&element->data, value); /* new element, call ctor as in spl_ptr_llist_push */ if (intern->llist->ctor) { intern->llist->ctor(element); } } else { zval_ptr_dtor(value); zend_throw_exception(spl_ce_OutOfRangeException, "Offset invalid", 0); return; } } } /* }}} */
CWE-415
4
static int link_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; int ret = 0, i = 0, nbuf; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } /* * If we have iterated all input buffers or ran out of * output room, break. */ if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) break; ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1)); nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ pipe_buf_get(ipipe, ibuf); obuf = opipe->bufs + nbuf; *obuf = *ibuf; /* * Don't inherit the gift flag, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; if (obuf->len > len) obuf->len = len; opipe->nrbufs++; ret += obuf->len; len -= obuf->len; i++; } while (len); /* * return EAGAIN if we have the potential of some data in the * future, otherwise just return 0 */ if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ret = -EAGAIN; pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); return ret; }
CWE-416
5
static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); free(q); }
CWE-416
5
init_ctx_reselect(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc, OM_uint32 acc_negState, gss_OID supportedMech, gss_buffer_t *responseToken, gss_buffer_t *mechListMIC, OM_uint32 *negState, send_token_flag *tokflag) { OM_uint32 tmpmin; size_t i; generic_gss_release_oid(&tmpmin, &sc->internal_mech); gss_delete_sec_context(&tmpmin, &sc->ctx_handle, GSS_C_NO_BUFFER); /* Find supportedMech in sc->mech_set. */ for (i = 0; i < sc->mech_set->count; i++) { if (g_OID_equal(supportedMech, &sc->mech_set->elements[i])) break; } if (i == sc->mech_set->count) return GSS_S_DEFECTIVE_TOKEN; sc->internal_mech = &sc->mech_set->elements[i]; /* * Windows 2003 and earlier don't correctly send a * negState of request-mic when counter-proposing a * mechanism. They probably don't handle mechListMICs * properly either. */ if (acc_negState != REQUEST_MIC) return GSS_S_DEFECTIVE_TOKEN; sc->mech_complete = 0; sc->mic_reqd = 1; *negState = REQUEST_MIC; *tokflag = CONT_TOKEN_SEND; return GSS_S_CONTINUE_NEEDED; }
CWE-415
4
void gf_av1_reset_state(AV1State *state, Bool is_destroy) { GF_List *l1, *l2; if (state->frame_state.header_obus) { while (gf_list_count(state->frame_state.header_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.header_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } if (state->frame_state.frame_obus) { while (gf_list_count(state->frame_state.frame_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.frame_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } l1 = state->frame_state.frame_obus; l2 = state->frame_state.header_obus; memset(&state->frame_state, 0, sizeof(AV1StateFrame)); state->frame_state.is_first_frame = GF_TRUE; if (is_destroy) { gf_list_del(l1); gf_list_del(l2); if (state->bs) { if (gf_bs_get_position(state->bs)) { u32 size; gf_bs_get_content_no_truncate(state->bs, &state->frame_obus, &size, &state->frame_obus_alloc); } gf_bs_del(state->bs); } state->bs = NULL; } else { state->frame_state.frame_obus = l1; state->frame_state.header_obus = l2; if (state->bs) gf_bs_seek(state->bs, 0); } }
CWE-415
4
*/ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) { struct bfq_data *bfqd = container_of(timer, struct bfq_data, idle_slice_timer); struct bfq_queue *bfqq = bfqd->in_service_queue; /* * Theoretical race here: the in-service queue can be NULL or * different from the queue that was idling if a new request * arrives for the current queue and there is a full dispatch * cycle that changes the in-service queue. This can hardly * happen, but in the worst case we just expire a queue too * early. */ if (bfqq) bfq_idle_slice_timer_body(bfqq); return HRTIMER_NORESTART;
CWE-416
5
onig_new_deluxe(regex_t** reg, const UChar* pattern, const UChar* pattern_end, OnigCompileInfo* ci, OnigErrorInfo* einfo) { int r; UChar *cpat, *cpat_end; if (IS_NOT_NULL(einfo)) einfo->par = (UChar* )NULL; if (ci->pattern_enc != ci->target_enc) { r = conv_encoding(ci->pattern_enc, ci->target_enc, pattern, pattern_end, &cpat, &cpat_end); if (r != 0) return r; } else { cpat = (UChar* )pattern; cpat_end = (UChar* )pattern_end; } *reg = (regex_t* )xmalloc(sizeof(regex_t)); if (IS_NULL(*reg)) { r = ONIGERR_MEMORY; goto err2; } r = onig_reg_init(*reg, ci->option, ci->case_fold_flag, ci->target_enc, ci->syntax); if (r != 0) goto err; r = onig_compile(*reg, cpat, cpat_end, einfo); if (r != 0) { err: onig_free(*reg); *reg = NULL; } err2: if (cpat != pattern) xfree(cpat); return r; }
CWE-416
5
void luaD_call (lua_State *L, StkId func, int nresults) { lua_CFunction f; retry: switch (ttypetag(s2v(func))) { case LUA_VCCL: /* C closure */ f = clCvalue(s2v(func))->f; goto Cfunc; case LUA_VLCF: /* light C function */ f = fvalue(s2v(func)); Cfunc: { int n; /* number of returns */ CallInfo *ci = next_ci(L); checkstackp(L, LUA_MINSTACK, func); /* ensure minimum stack size */ ci->nresults = nresults; ci->callstatus = CIST_C; ci->top = L->top + LUA_MINSTACK; ci->func = func; L->ci = ci; lua_assert(ci->top <= L->stack_last); if (L->hookmask & LUA_MASKCALL) { int narg = cast_int(L->top - func) - 1; luaD_hook(L, LUA_HOOKCALL, -1, 1, narg); } lua_unlock(L); n = (*f)(L); /* do the actual call */ lua_lock(L); api_checknelems(L, n); luaD_poscall(L, ci, n); break; } case LUA_VLCL: { /* Lua function */ CallInfo *ci = next_ci(L); Proto *p = clLvalue(s2v(func))->p; int narg = cast_int(L->top - func) - 1; /* number of real arguments */ int nfixparams = p->numparams; int fsize = p->maxstacksize; /* frame size */ checkstackp(L, fsize, func); ci->nresults = nresults; ci->u.l.savedpc = p->code; /* starting point */ ci->callstatus = 0; ci->top = func + 1 + fsize; ci->func = func; L->ci = ci; for (; narg < nfixparams; narg++) setnilvalue(s2v(L->top++)); /* complete missing arguments */ lua_assert(ci->top <= L->stack_last); luaV_execute(L, ci); /* run the function */ break; } default: { /* not a function */ checkstackp(L, 1, func); /* space for metamethod */ luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */ goto retry; /* try again with metamethod */ } } }
CWE-416
5
int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct *desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; desc = get_desc(sel); if (!desc) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc->type & BIT(3))) return -EINVAL; switch ((desc->l << 1) | desc->d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } }
CWE-416
5
BGD_DECLARE(void) gdImageWebpCtx (gdImagePtr im, gdIOCtx * outfile, int quality) { uint8_t *argb; int x, y; uint8_t *p; uint8_t *out; size_t out_size; if (im == NULL) { return; } if (!gdImageTrueColor(im)) { gd_error("Paletter image not supported by webp"); return; } if (quality == -1) { quality = 80; } if (overflow2(gdImageSX(im), 4)) { return; } if (overflow2(gdImageSX(im) * 4, gdImageSY(im))) { return; } argb = (uint8_t *)gdMalloc(gdImageSX(im) * 4 * gdImageSY(im)); if (!argb) { return; } p = argb; for (y = 0; y < gdImageSY(im); y++) { for (x = 0; x < gdImageSX(im); x++) { register int c; register char a; c = im->tpixels[y][x]; a = gdTrueColorGetAlpha(c); if (a == 127) { a = 0; } else { a = 255 - ((a << 1) + (a >> 6)); } *(p++) = gdTrueColorGetRed(c); *(p++) = gdTrueColorGetGreen(c); *(p++) = gdTrueColorGetBlue(c); *(p++) = a; } } out_size = WebPEncodeRGBA(argb, gdImageSX(im), gdImageSY(im), gdImageSX(im) * 4, quality, &out); if (out_size == 0) { gd_error("gd-webp encoding failed"); goto freeargb; } gdPutBuf(out, out_size, outfile); free(out); freeargb: gdFree(argb); }
CWE-415
4
int adis_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); const struct iio_chan_spec *chan; unsigned int scan_count; unsigned int i, j; __be16 *tx, *rx; kfree(adis->xfer); kfree(adis->buffer); if (adis->burst && adis->burst->en) return adis_update_scan_mode_burst(indio_dev, scan_mask); scan_count = indio_dev->scan_bytes / 2; adis->xfer = kcalloc(scan_count + 1, sizeof(*adis->xfer), GFP_KERNEL); if (!adis->xfer) return -ENOMEM; adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL); if (!adis->buffer) return -ENOMEM; rx = adis->buffer; tx = rx + scan_count; spi_message_init(&adis->msg); for (j = 0; j <= scan_count; j++) { adis->xfer[j].bits_per_word = 8; if (j != scan_count) adis->xfer[j].cs_change = 1; adis->xfer[j].len = 2; adis->xfer[j].delay_usecs = adis->data->read_delay; if (j < scan_count) adis->xfer[j].tx_buf = &tx[j]; if (j >= 1) adis->xfer[j].rx_buf = &rx[j - 1]; spi_message_add_tail(&adis->xfer[j], &adis->msg); } chan = indio_dev->channels; for (i = 0; i < indio_dev->num_channels; i++, chan++) { if (!test_bit(chan->scan_index, scan_mask)) continue; if (chan->scan_type.storagebits == 32) *tx++ = cpu_to_be16((chan->address + 2) << 8); *tx++ = cpu_to_be16(chan->address << 8); } return 0; }
CWE-401
7
PHP_FUNCTION(unserialize) { char *buf = NULL; size_t buf_len; const unsigned char *p; php_unserialize_data_t var_hash; zval *options = NULL, *classes = NULL; HashTable *class_hash = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|a", &buf, &buf_len, &options) == FAILURE) { RETURN_FALSE; } if (buf_len == 0) { RETURN_FALSE; } p = (const unsigned char*) buf; PHP_VAR_UNSERIALIZE_INIT(var_hash); if(options != NULL) { classes = zend_hash_str_find(Z_ARRVAL_P(options), "allowed_classes", sizeof("allowed_classes")-1); if(classes && (Z_TYPE_P(classes) == IS_ARRAY || !zend_is_true(classes))) { ALLOC_HASHTABLE(class_hash); zend_hash_init(class_hash, (Z_TYPE_P(classes) == IS_ARRAY)?zend_hash_num_elements(Z_ARRVAL_P(classes)):0, NULL, NULL, 0); } if(class_hash && Z_TYPE_P(classes) == IS_ARRAY) { zval *entry; zend_string *lcname; ZEND_HASH_FOREACH_VAL(Z_ARRVAL_P(classes), entry) { convert_to_string_ex(entry); lcname = zend_string_tolower(Z_STR_P(entry)); zend_hash_add_empty_element(class_hash, lcname); zend_string_release(lcname); } ZEND_HASH_FOREACH_END(); } } if (!php_var_unserialize_ex(return_value, &p, p + buf_len, &var_hash, class_hash)) { PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (class_hash) { zend_hash_destroy(class_hash); FREE_HASHTABLE(class_hash); } zval_ptr_dtor(return_value); if (!EG(exception)) { php_error_docref(NULL, E_NOTICE, "Error at offset " ZEND_LONG_FMT " of %zd bytes", (zend_long)((char*)p - buf), buf_len); } RETURN_FALSE; } /* We should keep an reference to return_value to prevent it from being dtor in case nesting calls to unserialize */ var_push_dtor(&var_hash, return_value); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (class_hash) { zend_hash_destroy(class_hash); FREE_HASHTABLE(class_hash); } }
CWE-416
5
static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file, struct snd_ctl_tlv __user *_tlv, int op_flag) { struct snd_card *card = file->card; struct snd_ctl_tlv tlv; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int len; int err = 0; if (copy_from_user(&tlv, _tlv, sizeof(tlv))) return -EFAULT; if (tlv.length < sizeof(unsigned int) * 2) return -EINVAL; down_read(&card->controls_rwsem); kctl = snd_ctl_find_numid(card, tlv.numid); if (kctl == NULL) { err = -ENOENT; goto __kctl_end; } if (kctl->tlv.p == NULL) { err = -ENXIO; goto __kctl_end; } vd = &kctl->vd[tlv.numid - kctl->id.numid]; if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) || (op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) || (op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) { err = -ENXIO; goto __kctl_end; } if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { if (vd->owner != NULL && vd->owner != file) { err = -EPERM; goto __kctl_end; } err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv); if (err > 0) { up_read(&card->controls_rwsem); snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id); return 0; } } else { if (op_flag) { err = -ENXIO; goto __kctl_end; } len = kctl->tlv.p[1] + 2 * sizeof(unsigned int); if (tlv.length < len) { err = -ENOMEM; goto __kctl_end; } if (copy_to_user(_tlv->tlv, kctl->tlv.p, len)) err = -EFAULT; } __kctl_end: up_read(&card->controls_rwsem); return err; }
CWE-416
5
display_dollar(colnr_T col) { colnr_T save_col; if (!redrawing()) return; cursor_off(); save_col = curwin->w_cursor.col; curwin->w_cursor.col = col; if (has_mbyte) { char_u *p; // If on the last byte of a multi-byte move to the first byte. p = ml_get_curline(); curwin->w_cursor.col -= (*mb_head_off)(p, p + col); } curs_columns(FALSE); // recompute w_wrow and w_wcol if (curwin->w_wcol < curwin->w_width) { edit_putchar('$', FALSE); dollar_vcol = curwin->w_virtcol; } curwin->w_cursor.col = save_col; }
CWE-126
9
jpeg_error_handler(j_common_ptr) { return; }
CWE-415
4
struct ipv6_txoptions *ipv6_update_options(struct sock *sk, struct ipv6_txoptions *opt) { if (inet_sk(sk)->is_icsk) { if (opt && !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) { struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); } } opt = xchg(&inet6_sk(sk)->opt, opt); sk_dst_reset(sk); return opt; }
CWE-416
5
hash_new_from_values(mrb_state *mrb, mrb_int argc, mrb_value *regs) { mrb_value hash = mrb_hash_new_capa(mrb, argc); while (argc--) { mrb_hash_set(mrb, hash, regs[0], regs[1]); regs += 2; } return hash; }
CWE-416
5
sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking) { int k, at_head; Sg_device *sdp = sfp->parentdp; sg_io_hdr_t *hp = &srp->header; srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ hp->status = 0; hp->masked_status = 0; hp->msg_status = 0; hp->info = 0; hp->host_status = 0; hp->driver_status = 0; hp->resid = 0; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); k = sg_start_req(srp, cmnd); if (k) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: start_req err=%d\n", k)); sg_finish_rem_req(srp); return k; /* probably out of space --> ENOMEM */ } if (atomic_read(&sdp->detaching)) { if (srp->bio) blk_end_request_all(srp->rq, -EIO); sg_finish_rem_req(srp); return -ENODEV; } hp->duration = jiffies_to_msecs(jiffies); if (hp->interface_id != '\0' && /* v3 (or later) interface */ (SG_FLAG_Q_AT_TAIL & hp->flags)) at_head = 0; else at_head = 1; srp->rq->timeout = timeout; kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, srp->rq, at_head, sg_rq_end_io); return 0; }
CWE-415
4
static int af9005_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { int ret; u8 reply, *buf; buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL); if (!buf) return -ENOMEM; ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf, FW_BULKOUT_SIZE + 2); if (ret) goto err; deb_info("result of FW_CONFIG in identify state %d\n", reply); if (reply == 0x01) *cold = 1; else if (reply == 0x02) *cold = 0; else return -EIO; deb_info("Identify state cold = %d\n", *cold); err: kfree(buf); return ret; }
CWE-401
7
static int mif_process_cmpt(mif_hdr_t *hdr, char *buf) { jas_tvparser_t *tvp; mif_cmpt_t *cmpt; int id; cmpt = 0; tvp = 0; if (!(cmpt = mif_cmpt_create())) { goto error; } cmpt->tlx = 0; cmpt->tly = 0; cmpt->sampperx = 0; cmpt->samppery = 0; cmpt->width = 0; cmpt->height = 0; cmpt->prec = 0; cmpt->sgnd = -1; cmpt->data = 0; if (!(tvp = jas_tvparser_create(buf))) { goto error; } while (!(id = jas_tvparser_next(tvp))) { switch (jas_taginfo_nonull(jas_taginfos_lookup(mif_tags, jas_tvparser_gettag(tvp)))->id) { case MIF_TLX: cmpt->tlx = atoi(jas_tvparser_getval(tvp)); break; case MIF_TLY: cmpt->tly = atoi(jas_tvparser_getval(tvp)); break; case MIF_WIDTH: cmpt->width = atoi(jas_tvparser_getval(tvp)); break; case MIF_HEIGHT: cmpt->height = atoi(jas_tvparser_getval(tvp)); break; case MIF_HSAMP: cmpt->sampperx = atoi(jas_tvparser_getval(tvp)); break; case MIF_VSAMP: cmpt->samppery = atoi(jas_tvparser_getval(tvp)); break; case MIF_PREC: cmpt->prec = atoi(jas_tvparser_getval(tvp)); break; case MIF_SGND: cmpt->sgnd = atoi(jas_tvparser_getval(tvp)); break; case MIF_DATA: if (!(cmpt->data = jas_strdup(jas_tvparser_getval(tvp)))) { return -1; } break; } } jas_tvparser_destroy(tvp); if (!cmpt->sampperx || !cmpt->samppery) { goto error; } if (mif_hdr_addcmpt(hdr, hdr->numcmpts, cmpt)) { goto error; } return 0; error: if (cmpt) { mif_cmpt_destroy(cmpt); } if (tvp) { jas_tvparser_destroy(tvp); } return -1; }
CWE-416
5
find_match_text(colnr_T startcol, int regstart, char_u *match_text) { colnr_T col = startcol; int c1, c2; int len1, len2; int match; for (;;) { match = TRUE; len2 = MB_CHAR2LEN(regstart); // skip regstart for (len1 = 0; match_text[len1] != NUL; len1 += MB_CHAR2LEN(c1)) { c1 = PTR2CHAR(match_text + len1); c2 = PTR2CHAR(rex.line + col + len2); if (c1 != c2 && (!rex.reg_ic || MB_CASEFOLD(c1) != MB_CASEFOLD(c2))) { match = FALSE; break; } len2 += MB_CHAR2LEN(c2); } if (match // check that no composing char follows && !(enc_utf8 && utf_iscomposing(PTR2CHAR(rex.line + col + len2)))) { cleanup_subexpr(); if (REG_MULTI) { rex.reg_startpos[0].lnum = rex.lnum; rex.reg_startpos[0].col = col; rex.reg_endpos[0].lnum = rex.lnum; rex.reg_endpos[0].col = col + len2; } else { rex.reg_startp[0] = rex.line + col; rex.reg_endp[0] = rex.line + col + len2; } return 1L; } // Try finding regstart after the current match. col += MB_CHAR2LEN(regstart); // skip regstart if (skip_to_start(regstart, &col) == FAIL) break; } return 0L; }
CWE-122
8
void rose_stop_heartbeat(struct sock *sk) { del_timer(&sk->sk_timer); }
CWE-416
5
R_API bool r_crbtree_insert(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && data && cmp, false); bool inserted = false; if (tree->root == NULL) { tree->root = _node_new (data, NULL); if (tree->root == NULL) { return false; } inserted = true; goto out_exit; } RRBNode head; /* Fake tree root */ memset (&head, 0, sizeof (RRBNode)); RRBNode *g = NULL, *parent = &head; /* Grandparent & parent */ RRBNode *p = NULL, *q = tree->root; /* Iterator & parent */ int dir = 0, last = 0; /* Directions */ _set_link (parent, q, 1); for (;;) { if (!q) { /* Insert a node at first null link(also set its parent link) */ q = _node_new (data, p); if (!q) { return false; } p->link[dir] = q; inserted = true; } else if (IS_RED (q->link[0]) && IS_RED (q->link[1])) { /* Simple red violation: color flip */ q->red = 1; q->link[0]->red = 0; q->link[1]->red = 0; } if (IS_RED (q) && IS_RED (p)) { #if 0 // coverity error, parent is never null /* Hard red violation: rotate */ if (!parent) { return false; } #endif int dir2 = parent->link[1] == g; if (q == p->link[last]) { _set_link (parent, _rot_once (g, !last), dir2); } else { _set_link (parent, _rot_twice (g, !last), dir2); } } if (inserted) { break; } last = dir; dir = cmp (data, q->data, user) >= 0; if (g) { parent = g; } g = p; p = q; q = q->link[dir]; } /* Update root(it may different due to root rotation) */ tree->root = head.link[1]; out_exit: /* Invariant: root is black */ tree->root->red = 0; tree->root->parent = NULL; if (inserted) { tree->size++; } return inserted; }
CWE-416
5
void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer) { usb_kill_urb(mixer->urb); usb_kill_urb(mixer->rc_urb); }
CWE-416
5
static struct desc_struct *get_desc(unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc = NULL; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) desc = &ldt->entries[sel]; mutex_unlock(&current->active_mm->context.lock); return desc; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return NULL; return (struct desc_struct *)(gdt_desc.address + desc_base); }
CWE-416
5
int genl_register_family(struct genl_family *family) { int err, i; int start = GENL_START_ALLOC, end = GENL_MAX_ID; err = genl_validate_ops(family); if (err) return err; genl_lock_all(); if (genl_family_find_byname(family->name)) { err = -EEXIST; goto errout_locked; } /* * Sadly, a few cases need to be special-cased * due to them having previously abused the API * and having used their family ID also as their * multicast group ID, so we use reserved IDs * for both to be sure we can do that mapping. */ if (family == &genl_ctrl) { /* and this needs to be special for initial family lookups */ start = end = GENL_ID_CTRL; } else if (strcmp(family->name, "pmcraid") == 0) { start = end = GENL_ID_PMCRAID; } else if (strcmp(family->name, "VFS_DQUOT") == 0) { start = end = GENL_ID_VFS_DQUOT; } if (family->maxattr && !family->parallel_ops) { family->attrbuf = kmalloc_array(family->maxattr + 1, sizeof(struct nlattr *), GFP_KERNEL); if (family->attrbuf == NULL) { err = -ENOMEM; goto errout_locked; } } else family->attrbuf = NULL; family->id = idr_alloc(&genl_fam_idr, family, start, end + 1, GFP_KERNEL); if (family->id < 0) { err = family->id; goto errout_locked; } err = genl_validate_assign_mc_groups(family); if (err) goto errout_remove; genl_unlock_all(); /* send all events */ genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0); for (i = 0; i < family->n_mcgrps; i++) genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family, &family->mcgrps[i], family->mcgrp_offset + i); return 0; errout_remove: idr_remove(&genl_fam_idr, family->id); kfree(family->attrbuf); errout_locked: genl_unlock_all(); return err; }
CWE-401
7
ignore_error_for_testing(char_u *error) { if (ignore_error_list.ga_itemsize == 0) ga_init2(&ignore_error_list, sizeof(char_u *), 1); if (STRCMP("RESET", error) == 0) ga_clear_strings(&ignore_error_list); else ga_add_string(&ignore_error_list, error); }
CWE-416
5
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
CWE-415
4
n_start_visual_mode(int c) { #ifdef FEAT_CONCEAL int cursor_line_was_concealed = curwin->w_p_cole > 0 && conceal_cursor_line(curwin); #endif VIsual_mode = c; VIsual_active = TRUE; VIsual_reselect = TRUE; trigger_modechanged(); // Corner case: the 0 position in a tab may change when going into // virtualedit. Recalculate curwin->w_cursor to avoid bad highlighting. if (c == Ctrl_V && (get_ve_flags() & VE_BLOCK) && gchar_cursor() == TAB) { validate_virtcol(); coladvance(curwin->w_virtcol); } VIsual = curwin->w_cursor; #ifdef FEAT_FOLDING foldAdjustVisual(); #endif setmouse(); #ifdef FEAT_CONCEAL // Check if redraw is needed after changing the state. conceal_check_cursor_line(cursor_line_was_concealed); #endif if (p_smd && msg_silent == 0) redraw_cmdline = TRUE; // show visual mode later #ifdef FEAT_CLIPBOARD // Make sure the clipboard gets updated. Needed because start and // end may still be the same, and the selection needs to be owned clip_star.vmode = NUL; #endif // Only need to redraw this line, unless still need to redraw an old // Visual area (when 'lazyredraw' is set). if (curwin->w_redr_type < INVERTED) { curwin->w_old_cursor_lnum = curwin->w_cursor.lnum; curwin->w_old_visual_lnum = curwin->w_cursor.lnum; } }
CWE-122
8
yank_copy_line(struct block_def *bd, long y_idx, int exclude_trailing_space) { char_u *pnew; if (exclude_trailing_space) bd->endspaces = 0; if ((pnew = alloc(bd->startspaces + bd->endspaces + bd->textlen + 1)) == NULL) return FAIL; y_current->y_array[y_idx] = pnew; vim_memset(pnew, ' ', (size_t)bd->startspaces); pnew += bd->startspaces; mch_memmove(pnew, bd->textstart, (size_t)bd->textlen); pnew += bd->textlen; vim_memset(pnew, ' ', (size_t)bd->endspaces); pnew += bd->endspaces; if (exclude_trailing_space) { int s = bd->textlen + bd->endspaces; while (VIM_ISWHITE(*(bd->textstart + s - 1)) && s > 0) { s = s - (*mb_head_off)(bd->textstart, bd->textstart + s - 1) - 1; pnew--; } } *pnew = NUL; return OK; }
CWE-122
8
destroyPresentationContextList(LST_HEAD ** l) { PRV_PRESENTATIONCONTEXTITEM * prvCtx; DUL_SUBITEM * subItem; if (*l == NULL) return; prvCtx = (PRV_PRESENTATIONCONTEXTITEM*)LST_Dequeue(l); while (prvCtx != NULL) { subItem = (DUL_SUBITEM*)LST_Dequeue(&prvCtx->transferSyntaxList); while (subItem != NULL) { free(subItem); subItem = (DUL_SUBITEM*)LST_Dequeue(&prvCtx->transferSyntaxList); } LST_Destroy(&prvCtx->transferSyntaxList); free(prvCtx); prvCtx = (PRV_PRESENTATIONCONTEXTITEM*)LST_Dequeue(l); } LST_Destroy(l); }
CWE-401
7
void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) { struct nci_dev *ndev = priv->ndev; if (priv->ndev->nfc_dev->fw_download_in_progress) nfcmrvl_fw_dnld_abort(priv); nfcmrvl_fw_dnld_deinit(priv); if (gpio_is_valid(priv->config.reset_n_io)) gpio_free(priv->config.reset_n_io); nci_unregister_device(ndev); nci_free_device(ndev); kfree(priv); }
CWE-416
5
mrb_realloc(mrb_state *mrb, void *p, size_t len) { void *p2; p2 = mrb_realloc_simple(mrb, p, len); if (len == 0) return p2; if (p2 == NULL) { mrb_free(mrb, p); mrb->gc.out_of_memory = TRUE; mrb_raise_nomemory(mrb); } else { mrb->gc.out_of_memory = FALSE; } return p2; }
CWE-415
4
_php_mb_regex_init_options(const char *parg, int narg, OnigOptionType *option, OnigSyntaxType **syntax, int *eval) { int n; char c; int optm = 0; *syntax = ONIG_SYNTAX_RUBY; if (parg != NULL) { n = 0; while(n < narg) { c = parg[n++]; switch (c) { case 'i': optm |= ONIG_OPTION_IGNORECASE; break; case 'x': optm |= ONIG_OPTION_EXTEND; break; case 'm': optm |= ONIG_OPTION_MULTILINE; break; case 's': optm |= ONIG_OPTION_SINGLELINE; break; case 'p': optm |= ONIG_OPTION_MULTILINE | ONIG_OPTION_SINGLELINE; break; case 'l': optm |= ONIG_OPTION_FIND_LONGEST; break; case 'n': optm |= ONIG_OPTION_FIND_NOT_EMPTY; break; case 'j': *syntax = ONIG_SYNTAX_JAVA; break; case 'u': *syntax = ONIG_SYNTAX_GNU_REGEX; break; case 'g': *syntax = ONIG_SYNTAX_GREP; break; case 'c': *syntax = ONIG_SYNTAX_EMACS; break; case 'r': *syntax = ONIG_SYNTAX_RUBY; break; case 'z': *syntax = ONIG_SYNTAX_PERL; break; case 'b': *syntax = ONIG_SYNTAX_POSIX_BASIC; break; case 'd': *syntax = ONIG_SYNTAX_POSIX_EXTENDED; break; case 'e': if (eval != NULL) *eval = 1; break; default: break; } } if (option != NULL) *option|=optm; } }
CWE-415
4
static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pud_access_permitted(orig, write)) return 0; if (pud_devmap(orig)) return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pud_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; }
CWE-416
5
static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; int error = -EACCES; /* We don't need a base pointer in the /proc filesystem */ path_put(&nd->path); /* Are we allowed to snoop on the tasks file descriptors? */ if (!proc_fd_access_allowed(inode)) goto out; error = PROC_I(inode)->op.proc_get_link(inode, &nd->path); nd->last_type = LAST_BIND; out: return ERR_PTR(error); }
CWE-416
5
__must_hold(&ctx->completion_lock) { u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); spin_lock_irq(&ctx->timeout_lock); while (!list_empty(&ctx->timeout_list)) { u32 events_needed, events_got; struct io_kiocb *req = list_first_entry(&ctx->timeout_list, struct io_kiocb, timeout.list); if (io_is_timeout_noseq(req)) break; /* * Since seq can easily wrap around over time, subtract * the last seq at which timeouts were flushed before comparing. * Assuming not more than 2^31-1 events have happened since, * these subtractions won't have wrapped, so we can check if * target is in [last_seq, current_seq] by comparing the two. */ events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush; events_got = seq - ctx->cq_last_tm_flush; if (events_got < events_needed) break; list_del_init(&req->timeout.list); io_kill_timeout(req, 0); } ctx->cq_last_tm_flush = seq; spin_unlock_irq(&ctx->timeout_lock); }
CWE-416
5
static void ext4_clamp_want_extra_isize(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; /* determine the minimum size of new large inodes, if present */ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE && sbi->s_want_extra_isize == 0) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; if (ext4_has_feature_extra_isize(sb)) { if (sbi->s_want_extra_isize < le16_to_cpu(es->s_want_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_want_extra_isize); if (sbi->s_want_extra_isize < le16_to_cpu(es->s_min_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_min_extra_isize); } } /* Check if enough inode space is available */ if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > sbi->s_inode_size) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; ext4_msg(sb, KERN_INFO, "required extra inode space not available"); } }
CWE-416
5
static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, bool attach_req) { struct inet_request_sock *ireq = inet_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); struct flowi6 *fl6 = &fl->u.ip6; struct sk_buff *skb; int err = -ENOMEM; /* First, grab a route. */ if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, IPPROTO_TCP)) == NULL) goto done; skb = tcp_make_synack(sk, dst, req, foc, attach_req); if (skb) { __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, &ireq->ir_v6_rmt_addr); fl6->daddr = ireq->ir_v6_rmt_addr; if (np->repflow && ireq->pktopts) fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); err = net_xmit_eval(err); } done: return err; }
CWE-416
5
get_function_line( exarg_T *eap, char_u **line_to_free, int indent, getline_opt_T getline_options) { char_u *theline; if (eap->getline == NULL) theline = getcmdline(':', 0L, indent, 0); else theline = eap->getline(':', eap->cookie, indent, getline_options); if (theline != NULL) { if (*eap->cmdlinep == *line_to_free) *eap->cmdlinep = theline; vim_free(*line_to_free); *line_to_free = theline; } return theline; }
CWE-416
5
int __close_fd_get_file(unsigned int fd, struct file **res) { struct files_struct *files = current->files; struct file *file; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; file = fdt->fd[fd]; if (!file) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); get_file(file); *res = file; return filp_close(file, files); out_unlock: spin_unlock(&files->file_lock); *res = NULL; return -ENOENT; }
CWE-416
5
static int ion_handle_put(struct ion_handle *handle) { struct ion_client *client = handle->client; int ret; mutex_lock(&client->lock); ret = kref_put(&handle->ref, ion_handle_destroy); mutex_unlock(&client->lock); return ret; }
CWE-416
5
BGD_DECLARE(void) gdImageWebpEx (gdImagePtr im, FILE * outFile, int quality) { gdIOCtx *out = gdNewFileCtx(outFile); if (out == NULL) { return; } gdImageWebpCtx(im, out, quality); out->gd_free(out); }
CWE-415
4
ut64 MACH0_(get_baddr)(struct MACH0_(obj_t)* bin) { int i; if (bin->hdr.filetype != MH_EXECUTE && bin->hdr.filetype != MH_DYLINKER) return 0; for (i = 0; i < bin->nsegs; ++i) if (bin->segs[i].fileoff == 0 && bin->segs[i].filesize != 0) return bin->segs[i].vmaddr; return 0; }
CWE-416
5
static void rds_tcp_kill_sock(struct net *net) { struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct socket *lsock = rtn->rds_tcp_listen_sock; rtn->rds_tcp_listen_sock = NULL; rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net || !tc->t_sock) continue; if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { list_move_tail(&tc->t_tcp_node, &tmp_list); } else { list_del(&tc->t_tcp_node); tc->t_tcp_node_detached = true; } } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) rds_conn_destroy(tc->t_cpath->cp_conn); }
CWE-416
5
void ZydisFormatterBufferInitTokenized(ZydisFormatterBuffer* buffer, ZydisFormatterToken** first_token, void* user_buffer, ZyanUSize length) { ZYAN_ASSERT(buffer); ZYAN_ASSERT(first_token); ZYAN_ASSERT(user_buffer); ZYAN_ASSERT(length); *first_token = user_buffer; (*first_token)->type = ZYDIS_TOKEN_INVALID; (*first_token)->next = 0; user_buffer = (ZyanU8*)user_buffer + sizeof(ZydisFormatterToken); length -= sizeof(ZydisFormatterToken); buffer->is_token_list = ZYAN_TRUE; buffer->capacity = length; buffer->string.flags = ZYAN_STRING_HAS_FIXED_CAPACITY; buffer->string.vector.allocator = ZYAN_NULL; buffer->string.vector.element_size = sizeof(char); buffer->string.vector.size = 1; buffer->string.vector.capacity = length; buffer->string.vector.data = user_buffer; *(char*)user_buffer = '\0'; }
CWE-457
10
get_visual_text( cmdarg_T *cap, char_u **pp, // return: start of selected text int *lenp) // return: length of selected text { if (VIsual_mode != 'V') unadjust_for_sel(); if (VIsual.lnum != curwin->w_cursor.lnum) { if (cap != NULL) clearopbeep(cap->oap); return FAIL; } if (VIsual_mode == 'V') { *pp = ml_get_curline(); *lenp = (int)STRLEN(*pp); } else { if (LT_POS(curwin->w_cursor, VIsual)) { *pp = ml_get_pos(&curwin->w_cursor); *lenp = VIsual.col - curwin->w_cursor.col + 1; } else { *pp = ml_get_pos(&VIsual); *lenp = curwin->w_cursor.col - VIsual.col + 1; } if (**pp == NUL) *lenp = 0; if (has_mbyte && *lenp > 0) // Correct the length to include all bytes of the last character. *lenp += (*mb_ptr2len)(*pp + (*lenp - 1)) - 1; } reset_VIsual_and_resel(); return OK; }
CWE-126
9
static int hgcm_call_preprocess_linaddr( const struct vmmdev_hgcm_function_parameter *src_parm, void **bounce_buf_ret, size_t *extra) { void *buf, *bounce_buf; bool copy_in; u32 len; int ret; buf = (void *)src_parm->u.pointer.u.linear_addr; len = src_parm->u.pointer.size; copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT; if (len > VBG_MAX_HGCM_USER_PARM) return -E2BIG; bounce_buf = kvmalloc(len, GFP_KERNEL); if (!bounce_buf) return -ENOMEM; if (copy_in) { ret = copy_from_user(bounce_buf, (void __user *)buf, len); if (ret) return -EFAULT; } else { memset(bounce_buf, 0, len); } *bounce_buf_ret = bounce_buf; hgcm_call_add_pagelist_size(bounce_buf, len, extra); return 0; }
CWE-401
7
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) { struct super_block *sb; int err; int rc; if (!ext4_handle_valid(handle)) { ext4_put_nojournal(handle); return 0; } if (!handle->h_transaction) { err = jbd2_journal_stop(handle); return handle->h_err ? handle->h_err : err; } sb = handle->h_transaction->t_journal->j_private; err = handle->h_err; rc = jbd2_journal_stop(handle); if (!err) err = rc; if (err) __ext4_std_error(sb, where, line, err); return err; }
CWE-416
5
RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); NE_image_segment_entry *se = &bin->segment_entries[i]; if (!bs) { return segments; } bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; }
CWE-129
6
int get_evtchn_to_irq(evtchn_port_t evtchn) { if (evtchn >= xen_evtchn_max_channels()) return -1; if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) return -1; return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; }
CWE-416
5
static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); assert(q->slot); q->slot = sd_bus_slot_unref(q->slot); assert(!q->reply); q->reply = sd_bus_message_ref(reply); /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the * whole message processing again, and thus re-validating and re-retrieving the "userdata" field * again. * * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again, * i.e. after the second time the message is processed is complete. */ assert(!q->defer_event_source); r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q); if (r < 0) goto fail; r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE); if (r < 0) goto fail; r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT); if (r < 0) goto fail; r = sd_bus_message_rewind(q->request, true); if (r < 0) goto fail; r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request); if (r < 0) goto fail; return 1; fail: log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m"); (void) sd_bus_reply_method_errno(q->request, r, NULL); async_polkit_query_free(q); return r; }
CWE-416
5
static int kvm_ioctl_create_device(struct kvm *kvm, struct kvm_create_device *cd) { struct kvm_device_ops *ops = NULL; struct kvm_device *dev; bool test = cd->flags & KVM_CREATE_DEVICE_TEST; int ret; if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) return -ENODEV; ops = kvm_device_ops_table[cd->type]; if (ops == NULL) return -ENODEV; if (test) return 0; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->ops = ops; dev->kvm = kvm; mutex_lock(&kvm->lock); ret = ops->create(dev, cd->type); if (ret < 0) { mutex_unlock(&kvm->lock); kfree(dev); return ret; } list_add(&dev->vm_node, &kvm->devices); mutex_unlock(&kvm->lock); if (ops->init) ops->init(dev); ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); if (ret < 0) { ops->destroy(dev); mutex_lock(&kvm->lock); list_del(&dev->vm_node); mutex_unlock(&kvm->lock); return ret; } kvm_get_kvm(kvm); cd->fd = ret; return 0; }
CWE-416
5
GF_Err afra_box_read(GF_Box *s, GF_BitStream *bs) { unsigned int i; GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; ISOM_DECREASE_SIZE(ptr, 9) ptr->long_ids = gf_bs_read_int(bs, 1); ptr->long_offsets = gf_bs_read_int(bs, 1); ptr->global_entries = gf_bs_read_int(bs, 1); ptr->reserved = gf_bs_read_int(bs, 5); ptr->time_scale = gf_bs_read_u32(bs); ptr->entry_count = gf_bs_read_u32(bs); if (ptr->size / ( (ptr->long_offsets ? 16 : 12) ) < ptr->entry_count) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->entry_count; i++) { GF_AfraEntry *ae = gf_malloc(sizeof(GF_AfraEntry)); if (!ae) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 8) ae->time = gf_bs_read_u64(bs); if (ptr->long_offsets) { ISOM_DECREASE_SIZE(ptr, 8) ae->offset = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) ae->offset = gf_bs_read_u32(bs); } gf_list_insert(ptr->local_access_entries, ae, i); } if (ptr->global_entries) { ISOM_DECREASE_SIZE(ptr, 4) ptr->global_entry_count = gf_bs_read_u32(bs); for (i=0; i<ptr->global_entry_count; i++) { GF_GlobalAfraEntry *ae = gf_malloc(sizeof(GF_GlobalAfraEntry)); if (!ae) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 8) ae->time = gf_bs_read_u64(bs); if (ptr->long_ids) { ISOM_DECREASE_SIZE(ptr, 8) ae->segment = gf_bs_read_u32(bs); ae->fragment = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) ae->segment = gf_bs_read_u16(bs); ae->fragment = gf_bs_read_u16(bs); } if (ptr->long_offsets) { ISOM_DECREASE_SIZE(ptr, 16) ae->afra_offset = gf_bs_read_u64(bs); ae->offset_from_afra = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 8) ae->afra_offset = gf_bs_read_u32(bs); ae->offset_from_afra = gf_bs_read_u32(bs); } gf_list_insert(ptr->global_access_entries, ae, i); } } return GF_OK; }
CWE-401
7
void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { get_page(buf->page); }
CWE-416
5
ASC_destroyAssociation(T_ASC_Association ** association) { OFCondition cond = EC_Normal; /* don't worry if already destroyed */ if (association == NULL) return EC_Normal; if (*association == NULL) return EC_Normal; if ((*association)->DULassociation != NULL) { ASC_dropAssociation(*association); } if ((*association)->params != NULL) { cond = ASC_destroyAssociationParameters(&(*association)->params); if (cond.bad()) return cond; } if ((*association)->sendPDVBuffer != NULL) free((*association)->sendPDVBuffer); free(*association); *association = NULL; return EC_Normal; }
CWE-401
7
static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) { struct bfq_data *bfqd = bfqq->bfqd; enum bfqq_expiration reason; unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); bfq_clear_bfqq_wait_request(bfqq); if (bfqq != bfqd->in_service_queue) { spin_unlock_irqrestore(&bfqd->lock, flags); return; } if (bfq_bfqq_budget_timeout(bfqq)) /* * Also here the queue can be safely expired * for budget timeout without wasting * guarantees */ reason = BFQQE_BUDGET_TIMEOUT; else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) /* * The queue may not be empty upon timer expiration, * because we may not disable the timer when the * first request of the in-service queue arrives * during disk idling. */ reason = BFQQE_TOO_IDLE; else goto schedule_dispatch; bfq_bfqq_expire(bfqd, bfqq, true, reason); schedule_dispatch: spin_unlock_irqrestore(&bfqd->lock, flags); bfq_schedule_dispatch(bfqd);
CWE-416
5
int read_file(struct sc_card *card, char *str_path, unsigned char **data, size_t *data_len) { struct sc_path path; struct sc_file *file; unsigned char *p; int ok = 0; int r; size_t len; sc_format_path(str_path, &path); if (SC_SUCCESS != sc_select_file(card, &path, &file)) { goto err; } len = file ? file->size : 4096; p = realloc(*data, len); if (!p) { goto err; } *data = p; *data_len = len; r = sc_read_binary(card, 0, p, len, 0); if (r < 0) goto err; *data_len = r; ok = 1; err: sc_file_free(file); return ok; }
CWE-415
4
max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, int force_toggles) { struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); int old_epnum, same_ep, rcvtog, sndtog; struct usb_device *old_dev; u8 hctl; old_dev = max3421_hcd->loaded_dev; old_epnum = max3421_hcd->loaded_epnum; same_ep = (dev == old_dev && epnum == old_epnum); if (same_ep && !force_toggles) return; if (old_dev && !same_ep) { /* save the old end-points toggles: */ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; /* no locking: HCD (i.e., we) own toggles, don't we? */ usb_settoggle(old_dev, old_epnum, 0, rcvtog); usb_settoggle(old_dev, old_epnum, 1, sndtog); } /* setup new endpoint's toggle bits: */ rcvtog = usb_gettoggle(dev, epnum, 0); sndtog = usb_gettoggle(dev, epnum, 1); hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); max3421_hcd->loaded_epnum = epnum; spi_wr8(hcd, MAX3421_REG_HCTL, hctl); /* * Note: devnum for one and the same device can change during * address-assignment so it's best to just always load the * address whenever the end-point changed/was forced. */ max3421_hcd->loaded_dev = dev; spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); }
CWE-416
5
static void put_crypt_info(struct fscrypt_info *ci) { if (!ci) return; key_put(ci->ci_keyring_key); crypto_free_skcipher(ci->ci_ctfm); kmem_cache_free(fscrypt_info_cachep, ci); }
CWE-416
5
static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); int err = 0; mutex_lock(&swhash->hlist_mutex); if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { struct swevent_hlist *hlist; hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); if (!hlist) { err = -ENOMEM; goto exit; } rcu_assign_pointer(swhash->swevent_hlist, hlist); } swhash->hlist_refcount++; exit: mutex_unlock(&swhash->hlist_mutex); return err; }
CWE-416
5
int __mdiobus_register(struct mii_bus *bus, struct module *owner) { struct mdio_device *mdiodev; int i, err; struct gpio_desc *gpiod; if (NULL == bus || NULL == bus->name || NULL == bus->read || NULL == bus->write) return -EINVAL; BUG_ON(bus->state != MDIOBUS_ALLOCATED && bus->state != MDIOBUS_UNREGISTERED); bus->owner = owner; bus->dev.parent = bus->parent; bus->dev.class = &mdio_bus_class; bus->dev.groups = NULL; dev_set_name(&bus->dev, "%s", bus->id); err = device_register(&bus->dev); if (err) { pr_err("mii_bus %s failed to register\n", bus->id); put_device(&bus->dev); return -EINVAL; } mutex_init(&bus->mdio_lock); /* de-assert bus level PHY GPIO reset */ gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) { dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", bus->id); device_del(&bus->dev); return PTR_ERR(gpiod); } else if (gpiod) { bus->reset_gpiod = gpiod; gpiod_set_value_cansleep(gpiod, 1); udelay(bus->reset_delay_us); gpiod_set_value_cansleep(gpiod, 0); } if (bus->reset) bus->reset(bus); for (i = 0; i < PHY_MAX_ADDR; i++) { if ((bus->phy_mask & (1 << i)) == 0) { struct phy_device *phydev; phydev = mdiobus_scan(bus, i); if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) { err = PTR_ERR(phydev); goto error; } } } mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device); bus->state = MDIOBUS_REGISTERED; pr_info("%s: probed\n", bus->name); return 0; error: while (--i >= 0) { mdiodev = bus->mdio_map[i]; if (!mdiodev) continue; mdiodev->device_remove(mdiodev); mdiodev->device_free(mdiodev); } /* Put PHYs in RESET to save power */ if (bus->reset_gpiod) gpiod_set_value_cansleep(bus->reset_gpiod, 1); device_del(&bus->dev); return err; }
CWE-416
5
static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); }
CWE-415
4
struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, int port) { unsigned long flags; struct snd_seq_client_port *new_port, *p; int num = -1; /* sanity check */ if (snd_BUG_ON(!client)) return NULL; if (client->num_ports >= SNDRV_SEQ_MAX_PORTS) { pr_warn("ALSA: seq: too many ports for client %d\n", client->number); return NULL; } /* create a new port */ new_port = kzalloc(sizeof(*new_port), GFP_KERNEL); if (!new_port) return NULL; /* failure, out of memory */ /* init port data */ new_port->addr.client = client->number; new_port->addr.port = -1; new_port->owner = THIS_MODULE; sprintf(new_port->name, "port-%d", num); snd_use_lock_init(&new_port->use_lock); port_subs_info_init(&new_port->c_src); port_subs_info_init(&new_port->c_dest); num = port >= 0 ? port : 0; mutex_lock(&client->ports_mutex); write_lock_irqsave(&client->ports_lock, flags); list_for_each_entry(p, &client->ports_list_head, list) { if (p->addr.port > num) break; if (port < 0) /* auto-probe mode */ num = p->addr.port + 1; } /* insert the new port */ list_add_tail(&new_port->list, &p->list); client->num_ports++; new_port->addr.port = num; /* store the port number in the port */ write_unlock_irqrestore(&client->ports_lock, flags); mutex_unlock(&client->ports_mutex); sprintf(new_port->name, "port-%d", num); return new_port; }
CWE-416
5
static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); }
CWE-415
4
void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); }
CWE-416
5
int bnxt_re_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata) { struct ib_pd *ib_pd = ib_srq->pd; struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); struct bnxt_qplib_nq *nq = NULL; int rc, entries; if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded"); rc = -EINVAL; goto exit; } if (srq_init_attr->srq_type != IB_SRQT_BASIC) { rc = -EOPNOTSUPP; goto exit; } srq->rdev = rdev; srq->qplib_srq.pd = &pd->qplib_pd; srq->qplib_srq.dpi = &rdev->dpi_privileged; /* Allocate 1 more than what's provided so posting max doesn't * mean empty */ entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); if (entries > dev_attr->max_srq_wqes + 1) entries = dev_attr->max_srq_wqes + 1; srq->qplib_srq.max_wqe = entries; srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge; srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; srq->srq_limit = srq_init_attr->attr.srq_limit; srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; nq = &rdev->nq[0]; if (udata) { rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); if (rc) goto fail; } rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq); if (rc) { dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!"); goto fail; } if (udata) { struct bnxt_re_srq_resp resp; resp.srqid = srq->qplib_srq.id; rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!"); bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq); goto exit; } } if (nq) nq->budget++; atomic_inc(&rdev->srq_count); return 0; fail: ib_umem_release(srq->umem); exit: return rc; }
CWE-401
7
static int my_login(pTHX_ SV* dbh, imp_dbh_t *imp_dbh) { SV* sv; HV* hv; char* dbname; char* host; char* port; char* user; char* password; char* mysql_socket; int result; D_imp_xxh(dbh); /* TODO- resolve this so that it is set only if DBI is 1.607 */ #define TAKE_IMP_DATA_VERSION 1 #if TAKE_IMP_DATA_VERSION if (DBIc_has(imp_dbh, DBIcf_IMPSET)) { /* eg from take_imp_data() */ if (DBIc_has(imp_dbh, DBIcf_ACTIVE)) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "my_login skip connect\n"); /* tell our parent we've adopted an active child */ ++DBIc_ACTIVE_KIDS(DBIc_PARENT_COM(imp_dbh)); return TRUE; } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "my_login IMPSET but not ACTIVE so connect not skipped\n"); } #endif sv = DBIc_IMP_DATA(imp_dbh); if (!sv || !SvROK(sv)) return FALSE; hv = (HV*) SvRV(sv); if (SvTYPE(hv) != SVt_PVHV) return FALSE; host= safe_hv_fetch(aTHX_ hv, "host", 4); port= safe_hv_fetch(aTHX_ hv, "port", 4); user= safe_hv_fetch(aTHX_ hv, "user", 4); password= safe_hv_fetch(aTHX_ hv, "password", 8); dbname= safe_hv_fetch(aTHX_ hv, "database", 8); mysql_socket= safe_hv_fetch(aTHX_ hv, "mysql_socket", 12); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->my_login : dbname = %s, uid = %s, pwd = %s," \ "host = %s, port = %s\n", dbname ? dbname : "NULL", user ? user : "NULL", password ? password : "NULL", host ? host : "NULL", port ? port : "NULL"); if (!imp_dbh->pmysql) { Newz(908, imp_dbh->pmysql, 1, MYSQL); } result = mysql_dr_connect(dbh, imp_dbh->pmysql, mysql_socket, host, port, user, password, dbname, imp_dbh) ? TRUE : FALSE; if (!result) Safefree(imp_dbh->pmysql); return result; }
CWE-416
5
ga_add_string(garray_T *gap, char_u *p) { char_u *cp = vim_strsave(p); if (cp == NULL) return FAIL; if (ga_grow(gap, 1) == FAIL) { vim_free(cp); return FAIL; } ((char_u **)(gap->ga_data))[gap->ga_len++] = cp; return OK; }
CWE-416
5
static int cac_get_serial_nr_from_CUID(sc_card_t* card, sc_serial_number_t* serial) { cac_private_data_t * priv = CAC_DATA(card); SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_NORMAL); if (card->serialnr.len) { *serial = card->serialnr; SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } if (priv->cac_id_len) { serial->len = MIN(priv->cac_id_len, SC_MAX_SERIALNR); memcpy(serial->value, priv->cac_id, priv->cac_id_len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_FILE_NOT_FOUND); }
CWE-415
4
static struct file *path_openat(int dfd, struct filename *pathname, struct nameidata *nd, const struct open_flags *op, int flags) { struct file *file; struct path path; int opened = 0; int error; file = get_empty_filp(); if (IS_ERR(file)) return file; file->f_flags = op->open_flag; if (unlikely(file->f_flags & __O_TMPFILE)) { error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened); goto out; } error = path_init(dfd, pathname, flags, nd); if (unlikely(error)) goto out; error = do_last(nd, &path, file, op, &opened, pathname); while (unlikely(error > 0)) { /* trailing symlink */ struct path link = path; void *cookie; if (!(nd->flags & LOOKUP_FOLLOW)) { path_put_conditional(&path, nd); path_put(&nd->path); error = -ELOOP; break; } error = may_follow_link(&link, nd); if (unlikely(error)) break; nd->flags |= LOOKUP_PARENT; nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); error = follow_link(&link, nd, &cookie); if (unlikely(error)) break; error = do_last(nd, &path, file, op, &opened, pathname); put_link(nd, &link, cookie); } out: path_cleanup(nd); if (!(opened & FILE_OPENED)) { BUG_ON(!error); put_filp(file); } if (unlikely(error)) { if (error == -EOPENSTALE) { if (flags & LOOKUP_RCU) error = -ECHILD; else error = -ESTALE; } file = ERR_PTR(error); } return file; }
CWE-416
5
static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct sk_buff *skb; /* Allocate memory for receiving command response data */ skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE); if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for command response data.\n"); return -ENOMEM; } skb_put(skb, MWIFIEX_UPLD_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, PCI_DMA_FROMDEVICE)) return -1; card->cmdrsp_buf = skb; return 0; }
CWE-401
7
static int perf_swevent_add(struct perf_event *event, int flags) { struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); struct hw_perf_event *hwc = &event->hw; struct hlist_head *head; if (is_sampling_event(event)) { hwc->last_period = hwc->sample_period; perf_swevent_set_period(event); } hwc->state = !(flags & PERF_EF_START); head = find_swevent_head(swhash, event); if (!head) { /* * We can race with cpu hotplug code. Do not * WARN if the cpu just got unplugged. */ WARN_ON_ONCE(swhash->online); return -EINVAL; } hlist_add_head_rcu(&event->hlist_entry, head); perf_event_update_userpage(event); return 0; }
CWE-416
5
static void smp_task_timedout(struct timer_list *t) { struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task *task = slow->task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); complete(&task->slow_task->completion); }
CWE-416
5
void *gdImageJpegPtr (gdImagePtr im, int *size, int quality) { void *rv; gdIOCtx *out = gdNewDynamicCtx (2048, NULL); gdImageJpegCtx (im, out, quality); rv = gdDPExtractData (out, size); out->gd_free (out); return rv; }
CWE-415
4
void Curl_detach_connnection(struct Curl_easy *data) { struct connectdata *conn = data->conn; if(conn) Curl_llist_remove(&conn->easyq, &data->conn_queue, NULL); data->conn = NULL; }
CWE-416
5
static int xfrm_dump_policy_done(struct netlink_callback *cb) { struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; struct net *net = sock_net(cb->skb->sk); xfrm_policy_walk_done(walk, net); return 0; }
CWE-416
5
archive_read_format_rar_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct rar *rar = (struct rar *)(a->format->data); int ret; if (rar->has_encrypted_entries == ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW) { rar->has_encrypted_entries = 0; } if (rar->bytes_unconsumed > 0) { /* Consume as much as the decompressor actually used. */ __archive_read_consume(a, rar->bytes_unconsumed); rar->bytes_unconsumed = 0; } *buff = NULL; if (rar->entry_eof || rar->offset_seek >= rar->unp_size) { *size = 0; *offset = rar->offset; if (*offset < rar->unp_size) *offset = rar->unp_size; return (ARCHIVE_EOF); } switch (rar->compression_method) { case COMPRESS_METHOD_STORE: ret = read_data_stored(a, buff, size, offset); break; case COMPRESS_METHOD_FASTEST: case COMPRESS_METHOD_FAST: case COMPRESS_METHOD_NORMAL: case COMPRESS_METHOD_GOOD: case COMPRESS_METHOD_BEST: ret = read_data_compressed(a, buff, size, offset); if (ret != ARCHIVE_OK && ret != ARCHIVE_WARN) __archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context); break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported compression method for RAR file."); ret = ARCHIVE_FATAL; break; } return (ret); }
CWE-416
5
static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); p4d = p4d_offset(pgd, address); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); /* * This should never happen (a device public page in the gate * area). */ if (is_device_public_page(*page)) goto unmap; } get_page(*page); out: ret = 0; unmap: pte_unmap(pte); return ret; }
CWE-416
5
static inline void pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { buf->ops->get(pipe, buf); }
CWE-416
5
int read_file(struct sc_card *card, char *str_path, unsigned char **data, size_t *data_len) { struct sc_path path; struct sc_file *file; unsigned char *p; int ok = 0; int r; size_t len; sc_format_path(str_path, &path); if (SC_SUCCESS != sc_select_file(card, &path, &file)) { goto err; } len = file ? file->size : 4096; p = realloc(*data, len); if (!p) { goto err; } *data = p; *data_len = len; r = sc_read_binary(card, 0, p, len, 0); if (r < 0) goto err; *data_len = r; ok = 1; err: sc_file_free(file); return ok; }
CWE-415
4
check_lnums(int do_curwin) { win_T *wp; tabpage_T *tp; FOR_ALL_TAB_WINDOWS(tp, wp) if ((do_curwin || wp != curwin) && wp->w_buffer == curbuf) { // save the original cursor position and topline wp->w_save_cursor.w_cursor_save = wp->w_cursor; wp->w_save_cursor.w_topline_save = wp->w_topline; if (wp->w_cursor.lnum > curbuf->b_ml.ml_line_count) wp->w_cursor.lnum = curbuf->b_ml.ml_line_count; if (wp->w_topline > curbuf->b_ml.ml_line_count) wp->w_topline = curbuf->b_ml.ml_line_count; // save the corrected cursor position and topline wp->w_save_cursor.w_cursor_corr = wp->w_cursor; wp->w_save_cursor.w_topline_corr = wp->w_topline; } }
CWE-122
8
compile_lock_unlock( lval_T *lvp, char_u *name_end, exarg_T *eap, int deep, void *coookie) { cctx_T *cctx = coookie; int cc = *name_end; char_u *p = lvp->ll_name; int ret = OK; size_t len; char_u *buf; isntype_T isn = ISN_EXEC; if (cctx->ctx_skip == SKIP_YES) return OK; // Cannot use :lockvar and :unlockvar on local variables. if (p[1] != ':') { char_u *end = find_name_end(p, NULL, NULL, FNE_CHECK_START); if (lookup_local(p, end - p, NULL, cctx) == OK) { char_u *s = p; if (*end != '.' && *end != '[') { emsg(_(e_cannot_lock_unlock_local_variable)); return FAIL; } // For "d.member" put the local variable on the stack, it will be // passed to ex_lockvar() indirectly. if (compile_load(&s, end, cctx, FALSE, FALSE) == FAIL) return FAIL; isn = ISN_LOCKUNLOCK; } } // Checking is done at runtime. *name_end = NUL; len = name_end - p + 20; buf = alloc(len); if (buf == NULL) ret = FAIL; else { char *cmd = eap->cmdidx == CMD_lockvar ? "lockvar" : "unlockvar"; if (deep < 0) vim_snprintf((char *)buf, len, "%s! %s", cmd, p); else vim_snprintf((char *)buf, len, "%s %d %s", cmd, deep, p); ret = generate_EXEC_copy(cctx, isn, buf); vim_free(buf); *name_end = cc; } return ret; }
CWE-122
8
destroyPresentationContextList(LST_HEAD ** lst) { DUL_PRESENTATIONCONTEXT *pc; DUL_TRANSFERSYNTAX *ts; if ((lst == NULL) || (*lst == NULL)) return; while ((pc = (DUL_PRESENTATIONCONTEXT*) LST_Dequeue(lst)) != NULL) { if (pc->proposedTransferSyntax != NULL) { while ((ts = (DUL_TRANSFERSYNTAX*) LST_Dequeue(&pc->proposedTransferSyntax)) != NULL) { free(ts); } LST_Destroy(&pc->proposedTransferSyntax); } free(pc); } LST_Destroy(lst); }
CWE-401
7
njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) { njs_int_t ret; njs_value_t ctor; njs_native_frame_t *frame; njs_promise_capability_t *capability; frame = vm->top_frame; frame->retval = retval; njs_set_function(&ctor, &vm->constructors[NJS_OBJ_TYPE_PROMISE]); capability = njs_promise_new_capability(vm, &ctor); if (njs_slow_path(capability == NULL)) { return NJS_ERROR; } frame->function->context = capability; ret = njs_function_lambda_call(vm); if (ret == NJS_OK) { ret = njs_function_call(vm, njs_function(&capability->resolve), &njs_value_undefined, retval, 1, &vm->retval); } else if (ret == NJS_AGAIN) { ret = NJS_OK; } else if (ret == NJS_ERROR) { if (njs_is_memory_error(vm, &vm->retval)) { return NJS_ERROR; } ret = njs_function_call(vm, njs_function(&capability->reject), &njs_value_undefined, &vm->retval, 1, &vm->retval); } *retval = capability->promise; return ret; }
CWE-416
5
get_visual_text( cmdarg_T *cap, char_u **pp, // return: start of selected text int *lenp) // return: length of selected text { if (VIsual_mode != 'V') unadjust_for_sel(); if (VIsual.lnum != curwin->w_cursor.lnum) { if (cap != NULL) clearopbeep(cap->oap); return FAIL; } if (VIsual_mode == 'V') { *pp = ml_get_curline(); *lenp = (int)STRLEN(*pp); } else { if (LT_POS(curwin->w_cursor, VIsual)) { *pp = ml_get_pos(&curwin->w_cursor); *lenp = VIsual.col - curwin->w_cursor.col + 1; } else { *pp = ml_get_pos(&VIsual); *lenp = curwin->w_cursor.col - VIsual.col + 1; } if (has_mbyte) // Correct the length to include the whole last character. *lenp += (*mb_ptr2len)(*pp + (*lenp - 1)) - 1; } reset_VIsual_and_resel(); return OK; }
CWE-122
8
ga_concat_shorten_esc(garray_T *gap, char_u *str) { char_u *p; char_u *s; int c; int clen; char_u buf[NUMBUFLEN]; int same_len; if (str == NULL) { ga_concat(gap, (char_u *)"NULL"); return; } for (p = str; *p != NUL; ++p) { same_len = 1; s = p; c = mb_ptr2char_adv(&s); clen = s - p; while (*s != NUL && c == mb_ptr2char(s)) { ++same_len; s += clen; } if (same_len > 20) { ga_concat(gap, (char_u *)"\\["); ga_concat_esc(gap, p, clen); ga_concat(gap, (char_u *)" occurs "); vim_snprintf((char *)buf, NUMBUFLEN, "%d", same_len); ga_concat(gap, buf); ga_concat(gap, (char_u *)" times]"); p = s - 1; } else ga_concat_esc(gap, p, clen); } }
CWE-121
11
ut64 MACH0_(get_main)(struct MACH0_(obj_t)* bin) { ut64 addr = 0LL; struct symbol_t *symbols; int i; if (!(symbols = MACH0_(get_symbols) (bin))) { return 0; } for (i = 0; !symbols[i].last; i++) { if (!strcmp (symbols[i].name, "_main")) { addr = symbols[i].addr; break; } } free (symbols); if (!addr && bin->main_cmd.cmd == LC_MAIN) { addr = bin->entry + bin->baddr; } if (!addr) { ut8 b[128]; ut64 entry = addr_to_offset(bin, bin->entry); // XXX: X86 only and hacky! if (entry > bin->size || entry + sizeof (b) > bin->size) return 0; i = r_buf_read_at (bin->b, entry, b, sizeof (b)); if (i < 1) { return 0; } for (i = 0; i < 64; i++) { if (b[i] == 0xe8 && !b[i+3] && !b[i+4]) { int delta = b[i+1] | (b[i+2] << 8) | (b[i+3] << 16) | (b[i+4] << 24); return bin->entry + i + 5 + delta; } } } return addr; }
CWE-416
5
BGD_DECLARE(void *) gdImageWebpPtrEx (gdImagePtr im, int *size, int quality) { void *rv; gdIOCtx *out = gdNewDynamicCtx(2048, NULL); if (out == NULL) { return NULL; } gdImageWebpCtx(im, out, quality); rv = gdDPExtractData(out, size); out->gd_free(out); return rv; }
CWE-415
4
bfad_im_get_stats(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; union bfa_port_stats_u *fcstats; struct fc_host_statistics *hstats; bfa_status_t rc; unsigned long flags; fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL); if (fcstats == NULL) return NULL; hstats = &bfad->link_stats; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); memset(hstats, 0, sizeof(struct fc_host_statistics)); rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), fcstats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) return NULL; wait_for_completion(&fcomp.comp); /* Fill the fc_host_statistics structure */ hstats->seconds_since_last_reset = fcstats->fc.secs_reset; hstats->tx_frames = fcstats->fc.tx_frames; hstats->tx_words = fcstats->fc.tx_words; hstats->rx_frames = fcstats->fc.rx_frames; hstats->rx_words = fcstats->fc.rx_words; hstats->lip_count = fcstats->fc.lip_count; hstats->nos_count = fcstats->fc.nos_count; hstats->error_frames = fcstats->fc.error_frames; hstats->dumped_frames = fcstats->fc.dropped_frames; hstats->link_failure_count = fcstats->fc.link_failures; hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs; hstats->loss_of_signal_count = fcstats->fc.loss_of_signals; hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs; hstats->invalid_crc_count = fcstats->fc.invalid_crcs; kfree(fcstats); return hstats; }
CWE-401
7
static GF_Err BM_ParseGlobalQuantizer(GF_BifsDecoder *codec, GF_BitStream *bs, GF_List *com_list) { GF_Node *node; GF_Command *com; GF_CommandField *inf; node = gf_bifs_dec_node(codec, bs, NDT_SFWorldNode); if (!node) return GF_NON_COMPLIANT_BITSTREAM; /*reset global QP*/ if (codec->scenegraph->global_qp) { gf_node_unregister(codec->scenegraph->global_qp, NULL); } codec->ActiveQP = NULL; codec->scenegraph->global_qp = NULL; if (gf_node_get_tag(node) != TAG_MPEG4_QuantizationParameter) { gf_node_unregister(node, NULL); return GF_NON_COMPLIANT_BITSTREAM; } /*register global QP*/ codec->ActiveQP = (M_QuantizationParameter *) node; codec->ActiveQP->isLocal = 0; codec->scenegraph->global_qp = node; /*register TWICE: once for the command, and for the scenegraph globalQP*/ node->sgprivate->num_instances = 2; com = gf_sg_command_new(codec->current_graph, GF_SG_GLOBAL_QUANTIZER); inf = gf_sg_command_field_new(com); inf->new_node = node; inf->field_ptr = &inf->new_node; inf->fieldType = GF_SG_VRML_SFNODE; gf_list_add(com_list, com); return GF_OK; }
CWE-416
5
int cx23888_ir_probe(struct cx23885_dev *dev) { struct cx23888_ir_state *state; struct v4l2_subdev *sd; struct v4l2_subdev_ir_parameters default_params; int ret; state = kzalloc(sizeof(struct cx23888_ir_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; spin_lock_init(&state->rx_kfifo_lock); if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL)) return -ENOMEM; state->dev = dev; sd = &state->sd; v4l2_subdev_init(sd, &cx23888_ir_controller_ops); v4l2_set_subdevdata(sd, state); /* FIXME - fix the formatting of dev->v4l2_dev.name and use it */ snprintf(sd->name, sizeof(sd->name), "%s/888-ir", dev->name); sd->grp_id = CX23885_HW_888_IR; ret = v4l2_device_register_subdev(&dev->v4l2_dev, sd); if (ret == 0) { /* * Ensure no interrupts arrive from '888 specific conditions, * since we ignore them in this driver to have commonality with * similar IR controller cores. */ cx23888_ir_write4(dev, CX23888_IR_IRQEN_REG, 0); mutex_init(&state->rx_params_lock); default_params = default_rx_params; v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params); mutex_init(&state->tx_params_lock); default_params = default_tx_params; v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params); } else { kfifo_free(&state->rx_kfifo); } return ret; }
CWE-401
7
static size_t _php_mb_regex_get_option_string(char *str, size_t len, OnigOptionType option, OnigSyntaxType *syntax) { size_t len_left = len; size_t len_req = 0; char *p = str; char c; if ((option & ONIG_OPTION_IGNORECASE) != 0) { if (len_left > 0) { --len_left; *(p++) = 'i'; } ++len_req; } if ((option & ONIG_OPTION_EXTEND) != 0) { if (len_left > 0) { --len_left; *(p++) = 'x'; } ++len_req; } if ((option & (ONIG_OPTION_MULTILINE | ONIG_OPTION_SINGLELINE)) == (ONIG_OPTION_MULTILINE | ONIG_OPTION_SINGLELINE)) { if (len_left > 0) { --len_left; *(p++) = 'p'; } ++len_req; } else { if ((option & ONIG_OPTION_MULTILINE) != 0) { if (len_left > 0) { --len_left; *(p++) = 'm'; } ++len_req; } if ((option & ONIG_OPTION_SINGLELINE) != 0) { if (len_left > 0) { --len_left; *(p++) = 's'; } ++len_req; } } if ((option & ONIG_OPTION_FIND_LONGEST) != 0) { if (len_left > 0) { --len_left; *(p++) = 'l'; } ++len_req; } if ((option & ONIG_OPTION_FIND_NOT_EMPTY) != 0) { if (len_left > 0) { --len_left; *(p++) = 'n'; } ++len_req; } c = 0; if (syntax == ONIG_SYNTAX_JAVA) { c = 'j'; } else if (syntax == ONIG_SYNTAX_GNU_REGEX) { c = 'u'; } else if (syntax == ONIG_SYNTAX_GREP) { c = 'g'; } else if (syntax == ONIG_SYNTAX_EMACS) { c = 'c'; } else if (syntax == ONIG_SYNTAX_RUBY) { c = 'r'; } else if (syntax == ONIG_SYNTAX_PERL) { c = 'z'; } else if (syntax == ONIG_SYNTAX_POSIX_BASIC) { c = 'b'; } else if (syntax == ONIG_SYNTAX_POSIX_EXTENDED) { c = 'd'; } if (c != 0) { if (len_left > 0) { --len_left; *(p++) = c; } ++len_req; } if (len_left > 0) { --len_left; *(p++) = '\0'; } ++len_req; if (len < len_req) { return len_req; } return 0; }
CWE-415
4
static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; ref->ref++; }
CWE-416
5