func
stringlengths
0
484k
target
int64
0
1
cwe
sequence
project
stringlengths
2
29
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
bool HeaderMapImpl::operator==(const HeaderMapImpl& rhs) const { if (size() != rhs.size()) { return false; } for (auto i = headers_.begin(), j = rhs.headers_.begin(); i != headers_.end(); ++i, ++j) { if (i->key() != j->key().getStringView() || i->value() != j->value().getStringView()) { return false; } } return true; }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
326,393,149,736,214,730,000,000,000,000,000,000,000
13
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <asraa@google.com>
static int read_next_command(void) { static int stdin_eof = 0; if (stdin_eof) { unread_command_buf = 0; return EOF; } for (;;) { const char *p; if (unread_command_buf) { unread_command_buf = 0; } else { struct recent_command *rc; strbuf_detach(&command_buf, NULL); stdin_eof = strbuf_getline(&command_buf, stdin, '\n'); if (stdin_eof) return EOF; if (!seen_data_command && !starts_with(command_buf.buf, "feature ") && !starts_with(command_buf.buf, "option ")) { parse_argv(); } rc = rc_free; if (rc) rc_free = rc->next; else { rc = cmd_hist.next; cmd_hist.next = rc->next; cmd_hist.next->prev = &cmd_hist; free(rc->buf); } rc->buf = command_buf.buf; rc->prev = cmd_tail; rc->next = cmd_hist.prev; rc->prev->next = rc; cmd_tail = rc; } if (skip_prefix(command_buf.buf, "get-mark ", &p)) { parse_get_mark(p); continue; } if (skip_prefix(command_buf.buf, "cat-blob ", &p)) { parse_cat_blob(p); continue; } if (command_buf.buf[0] == '#') continue; return 0; } }
0
[ "CWE-119", "CWE-787" ]
git
34fa79a6cde56d6d428ab0d3160cb094ebad3305
272,512,472,013,987,930,000,000,000,000,000,000,000
57
prefer memcpy to strcpy When we already know the length of a string (e.g., because we just malloc'd to fit it), it's nicer to use memcpy than strcpy, as it makes it more obvious that we are not going to overflow the buffer (because the size we pass matches the size in the allocation). This also eliminates calls to strcpy, which make auditing the code base harder. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev) { EVBUFFER_LOCK(buf); buf->parent = bev; EVBUFFER_UNLOCK(buf); }
0
[ "CWE-189" ]
libevent
20d6d4458bee5d88bda1511c225c25b2d3198d6c
225,685,700,869,578,600,000,000,000,000,000,000,000
6
Fix CVE-2014-6272 in Libevent 2.0 For this fix, we need to make sure that passing too-large inputs to the evbuffer functions can't make us do bad things with the heap. Also, lower the maximum chunk size to the lower of off_t, size_t maximum. This is necessary since otherwise we could get into an infinite loop if we make a chunk that 'misalign' cannot index into.
Discovered_table_list::Discovered_table_list(THD *thd_arg, Dynamic_array<LEX_CSTRING*> *tables_arg, const LEX_CSTRING *wild_arg) : thd(thd_arg), with_temps(false), tables(tables_arg) { if (wild_arg->str && wild_arg->str[0]) { wild= wild_arg->str; wend= wild + wild_arg->length; } else wild= 0; }
0
[ "CWE-416" ]
server
af810407f78b7f792a9bb8c47c8c532eb3b3a758
52,647,209,355,936,910,000,000,000,000,000,000,000
13
MDEV-28098 incorrect key in "dup value" error after long unique reset errkey after using it, so that it wouldn't affect the next error message in the next statement
static ZIPARCHIVE_METHOD(extractTo) { struct zip *intern; zval *self = getThis(); zval *zval_files = NULL; zval *zval_file = NULL; php_stream_statbuf ssb; char *pathto; size_t pathto_len; int ret, i; int nelems; if (!self) { RETURN_FALSE; } if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|z", &pathto, &pathto_len, &zval_files) == FAILURE) { return; } if (pathto_len < 1) { RETURN_FALSE; } if (php_stream_stat_path_ex(pathto, PHP_STREAM_URL_STAT_QUIET, &ssb, NULL) < 0) { ret = php_stream_mkdir(pathto, 0777, PHP_STREAM_MKDIR_RECURSIVE, NULL); if (!ret) { RETURN_FALSE; } } ZIP_FROM_OBJECT(intern, self); if (zval_files && (Z_TYPE_P(zval_files) != IS_NULL)) { switch (Z_TYPE_P(zval_files)) { case IS_STRING: if (!php_zip_extract_file(intern, pathto, Z_STRVAL_P(zval_files), Z_STRLEN_P(zval_files))) { RETURN_FALSE; } break; case IS_ARRAY: nelems = zend_hash_num_elements(Z_ARRVAL_P(zval_files)); if (nelems == 0 ) { RETURN_FALSE; } for (i = 0; i < nelems; i++) { if ((zval_file = zend_hash_index_find(Z_ARRVAL_P(zval_files), i)) != NULL) { switch (Z_TYPE_P(zval_file)) { case IS_LONG: break; case IS_STRING: if (!php_zip_extract_file(intern, pathto, Z_STRVAL_P(zval_file), Z_STRLEN_P(zval_file))) { RETURN_FALSE; } break; } } } break; case IS_LONG: default: php_error_docref(NULL, E_WARNING, "Invalid argument, expect string or array of strings"); break; } } else { /* Extract all files */ int filecount = zip_get_num_files(intern); if (filecount == -1) { php_error_docref(NULL, E_WARNING, "Illegal archive"); RETURN_FALSE; } for (i = 0; i < filecount; i++) { char *file = (char*)zip_get_name(intern, i, ZIP_FL_UNCHANGED); if (!file || !php_zip_extract_file(intern, pathto, file, strlen(file))) { RETURN_FALSE; } } } RETURN_TRUE; }
0
[ "CWE-190" ]
php-src
3b8d4de300854b3517c7acb239b84f7726c1353c
47,247,376,167,792,425,000,000,000,000,000,000,000
83
Fix bug #71923 - integer overflow in ZipArchive::getFrom*
int mp_montgomery_calc_normalization(mp_int *a, mp_int *b) { fp_montgomery_calc_normalization(a, b); return MP_OKAY; }
0
[ "CWE-326", "CWE-203" ]
wolfssl
1de07da61f0c8e9926dcbd68119f73230dae283f
300,868,012,475,258,140,000,000,000,000,000,000,000
5
Constant time EC map to affine for private operations For fast math, use a constant time modular inverse when mapping to affine when operation involves a private key - key gen, calc shared secret, sign.
static void vhost_net_busy_poll(struct vhost_net *net, struct vhost_virtqueue *rvq, struct vhost_virtqueue *tvq, bool *busyloop_intr, bool poll_rx) { unsigned long busyloop_timeout; unsigned long endtime; struct socket *sock; struct vhost_virtqueue *vq = poll_rx ? tvq : rvq; /* Try to hold the vq mutex of the paired virtqueue. We can't * use mutex_lock() here since we could not guarantee a * consistenet lock ordering. */ if (!mutex_trylock(&vq->mutex)) return; vhost_disable_notify(&net->dev, vq); sock = rvq->private_data; busyloop_timeout = poll_rx ? rvq->busyloop_timeout: tvq->busyloop_timeout; preempt_disable(); endtime = busy_clock() + busyloop_timeout; while (vhost_can_busy_poll(endtime)) { if (vhost_has_work(&net->dev)) { *busyloop_intr = true; break; } if ((sock_has_rx_data(sock) && !vhost_vq_avail_empty(&net->dev, rvq)) || !vhost_vq_avail_empty(&net->dev, tvq)) break; cpu_relax(); } preempt_enable(); if (poll_rx || sock_has_rx_data(sock)) vhost_net_busy_poll_try_queue(net, vq); else if (!poll_rx) /* On tx here, sock has no rx data. */ vhost_enable_notify(&net->dev, rvq); mutex_unlock(&vq->mutex); }
0
[ "CWE-787" ]
linux
42d84c8490f9f0931786f1623191fcab397c3d64
260,664,298,696,531,180,000,000,000,000,000,000,000
50
vhost: Check docket sk_family instead of call getname Doing so, we save one call to get data we already have in the struct. Also, since there is no guarantee that getname use sockaddr_ll parameter beyond its size, we add a little bit of security here. It should do not do beyond MAX_ADDR_LEN, but syzbot found that ax25_getname writes more (72 bytes, the size of full_sockaddr_ax25, versus 20 + 32 bytes of sockaddr_ll + MAX_ADDR_LEN in syzbot repro). Fixes: 3a4d5c94e9593 ("vhost_net: a kernel-level virtio server") Reported-by: syzbot+f2a62d07a5198c819c7b@syzkaller.appspotmail.com Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
int n_ssl3_mac(SSL *ssl, unsigned char *md, int send) { SSL3_RECORD *rec; unsigned char *mac_sec,*seq; EVP_MD_CTX md_ctx; const EVP_MD_CTX *hash; unsigned char *p,rec_char; size_t md_size; int npad; int t; if (send) { rec= &(ssl->s3->wrec); mac_sec= &(ssl->s3->write_mac_secret[0]); seq= &(ssl->s3->write_sequence[0]); hash=ssl->write_hash; } else { rec= &(ssl->s3->rrec); mac_sec= &(ssl->s3->read_mac_secret[0]); seq= &(ssl->s3->read_sequence[0]); hash=ssl->read_hash; } t=EVP_MD_CTX_size(hash); if (t < 0) return -1; md_size=t; npad=(48/md_size)*md_size; if (!send && EVP_CIPHER_CTX_mode(ssl->enc_read_ctx) == EVP_CIPH_CBC_MODE && ssl3_cbc_record_digest_supported(hash)) { /* This is a CBC-encrypted record. We must avoid leaking any * timing-side channel information about how many blocks of * data we are hashing because that gives an attacker a * timing-oracle. */ /* npad is, at most, 48 bytes and that's with MD5: * 16 + 48 + 8 (sequence bytes) + 1 + 2 = 75. * * With SHA-1 (the largest hash speced for SSLv3) the hash size * goes up 4, but npad goes down by 8, resulting in a smaller * total size. */ unsigned char header[75]; unsigned j = 0; memcpy(header+j, mac_sec, md_size); j += md_size; memcpy(header+j, ssl3_pad_1, npad); j += npad; memcpy(header+j, seq, 8); j += 8; header[j++] = rec->type; header[j++] = rec->length >> 8; header[j++] = rec->length & 0xff; ssl3_cbc_digest_record( hash, md, &md_size, header, rec->input, rec->length + md_size, rec->orig_len, mac_sec, md_size, 1 /* is SSLv3 */); } else { unsigned int md_size_u; /* Chop the digest off the end :-) */ EVP_MD_CTX_init(&md_ctx); EVP_MD_CTX_copy_ex( &md_ctx,hash); EVP_DigestUpdate(&md_ctx,mac_sec,md_size); EVP_DigestUpdate(&md_ctx,ssl3_pad_1,npad); EVP_DigestUpdate(&md_ctx,seq,8); rec_char=rec->type; EVP_DigestUpdate(&md_ctx,&rec_char,1); p=md; s2n(rec->length,p); EVP_DigestUpdate(&md_ctx,md,2); EVP_DigestUpdate(&md_ctx,rec->input,rec->length); EVP_DigestFinal_ex( &md_ctx,md,NULL); EVP_MD_CTX_copy_ex( &md_ctx,hash); EVP_DigestUpdate(&md_ctx,mac_sec,md_size); EVP_DigestUpdate(&md_ctx,ssl3_pad_2,npad); EVP_DigestUpdate(&md_ctx,md,md_size); EVP_DigestFinal_ex( &md_ctx,md,&md_size_u); md_size = md_size_u; EVP_MD_CTX_cleanup(&md_ctx); } ssl3_record_sequence_update(seq); return(md_size); }
0
[ "CWE-310" ]
openssl
e5420be6cd09af2550b128575a675490cfba0483
128,722,472,772,765,070,000,000,000,000,000,000,000
98
Make CBC decoding constant time. This patch makes the decoding of SSLv3 and TLS CBC records constant time. Without this, a timing side-channel can be used to build a padding oracle and mount Vaudenay's attack. This patch also disables the stitched AESNI+SHA mode pending a similar fix to that code. In order to be easy to backport, this change is implemented in ssl/, rather than as a generic AEAD mode. In the future this should be changed around so that HMAC isn't in ssl/, but crypto/ as FIPS expects. (cherry picked from commit e130841bccfc0bb9da254dc84e23bc6a1c78a64e) Conflicts: crypto/evp/c_allc.c ssl/ssl_algs.c ssl/ssl_locl.h ssl/t1_enc.c
static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v) { struct vhost_dev *dev = &v->vdev; vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1); kfree(dev->iotlb); dev->iotlb = NULL; }
0
[ "CWE-416" ]
linux
f6bbf0010ba004f5e90c7aefdebc0ee4bd3283b9
323,974,736,029,287,980,000,000,000,000,000,000,000
8
vhost-vdpa: fix use-after-free of v->config_ctx When the 'v->config_ctx' eventfd_ctx reference is released we didn't set it to NULL. So if the same character device (e.g. /dev/vhost-vdpa-0) is re-opened, the 'v->config_ctx' is invalid and calling again vhost_vdpa_config_put() causes use-after-free issues like the following refcount_t underflow: refcount_t: underflow; use-after-free. WARNING: CPU: 2 PID: 872 at lib/refcount.c:28 refcount_warn_saturate+0xae/0xf0 RIP: 0010:refcount_warn_saturate+0xae/0xf0 Call Trace: eventfd_ctx_put+0x5b/0x70 vhost_vdpa_release+0xcd/0x150 [vhost_vdpa] __fput+0x8e/0x240 ____fput+0xe/0x10 task_work_run+0x66/0xa0 exit_to_user_mode_prepare+0x118/0x120 syscall_exit_to_user_mode+0x21/0x50 ? __x64_sys_close+0x12/0x40 do_syscall_64+0x45/0x50 entry_SYSCALL_64_after_hwframe+0x44/0xae Fixes: 776f395004d8 ("vhost_vdpa: Support config interrupt in vdpa") Cc: lingshan.zhu@intel.com Cc: stable@vger.kernel.org Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Link: https://lore.kernel.org/r/20210311135257.109460-2-sgarzare@redhat.com Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Zhu Lingshan <lingshan.zhu@intel.com> Acked-by: Jason Wang <jasowang@redhat.com>
bool ms_can_fast_dispatch(const Message *m) const override { switch (m->get_type()) { case CEPH_MSG_OSD_OP: case CEPH_MSG_OSD_BACKOFF: case MSG_OSD_SUBOP: case MSG_OSD_REPOP: case MSG_OSD_SUBOPREPLY: case MSG_OSD_REPOPREPLY: case MSG_OSD_PG_PUSH: case MSG_OSD_PG_PULL: case MSG_OSD_PG_PUSH_REPLY: case MSG_OSD_PG_SCAN: case MSG_OSD_PG_BACKFILL: case MSG_OSD_PG_BACKFILL_REMOVE: case MSG_OSD_EC_WRITE: case MSG_OSD_EC_WRITE_REPLY: case MSG_OSD_EC_READ: case MSG_OSD_EC_READ_REPLY: case MSG_OSD_SCRUB_RESERVE: case MSG_OSD_REP_SCRUB: case MSG_OSD_REP_SCRUBMAP: case MSG_OSD_PG_UPDATE_LOG_MISSING: case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY: case MSG_OSD_PG_RECOVERY_DELETE: case MSG_OSD_PG_RECOVERY_DELETE_REPLY: return true; default: return false; } }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
62,387,647,309,762,970,000,000,000,000,000,000,000
30
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <sage@redhat.com> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
string t_cpp_generator::local_reflection_name(const char* prefix, t_type* ttype, bool external) { ttype = get_true_type(ttype); // We have to use the program name as part of the identifier because // if two thrift "programs" are compiled into one actual program // you would get a symbol collision if they both defined list<i32>. // trlo = Thrift Reflection LOcal. string prog; string name; string nspace; // TODO(dreiss): Would it be better to pregenerate the base types // and put them in Thrift.{h,cpp} ? if (ttype->is_base_type()) { prog = program_->get_name(); name = ttype->get_ascii_fingerprint(); } else if (ttype->is_enum()) { assert(ttype->get_program() != NULL); prog = ttype->get_program()->get_name(); name = ttype->get_ascii_fingerprint(); } else if (ttype->is_container()) { prog = program_->get_name(); name = ttype->get_ascii_fingerprint(); } else { assert(ttype->is_struct() || ttype->is_xception()); assert(ttype->get_program() != NULL); prog = ttype->get_program()->get_name(); name = ttype->get_ascii_fingerprint(); } if (external && ttype->get_program() != NULL && ttype->get_program() != program_) { nspace = namespace_prefix(ttype->get_program()->get_namespace("cpp")); } return nspace + "trlo_" + prefix + "_" + prog + "_" + name; }
0
[ "CWE-20" ]
thrift
cfaadcc4adcfde2a8232c62ec89870b73ef40df1
213,514,037,625,477,900,000,000,000,000,000,000,000
37
THRIFT-3231 CPP: Limit recursion depth to 64 Client: cpp Patch: Ben Craig <bencraig@apache.org>
BGD_DECLARE(gdImagePtr) gdImageCreateFromWBMP(FILE *inFile) { gdImagePtr im; gdIOCtx *in = gdNewFileCtx(inFile); if (in == NULL) return NULL; im = gdImageCreateFromWBMPCtx(in); in->gd_free(in); return im; }
0
[ "CWE-415" ]
libgd
553702980ae89c83f2d6e254d62cf82e204956d0
259,573,925,859,434,000,000,000,000,000,000,000,000
9
Fix #492: Potential double-free in gdImage*Ptr() Whenever `gdImage*Ptr()` calls `gdImage*Ctx()` and the latter fails, we must not call `gdDPExtractData()`; otherwise a double-free would happen. Since `gdImage*Ctx()` are void functions, and we can't change that for BC reasons, we're introducing static helpers which are used internally. We're adding a regression test for `gdImageJpegPtr()`, but not for `gdImageGifPtr()` and `gdImageWbmpPtr()` since we don't know how to trigger failure of the respective `gdImage*Ctx()` calls. This potential security issue has been reported by Solmaz Salimi (aka. Rooney).
coderange_scan(const char *p, long len, rb_encoding *enc) { const char *e = p + len; if (rb_enc_to_index(enc) == 0) { /* enc is ASCII-8BIT. ASCII-8BIT string never be broken. */ p = search_nonascii(p, e); return p ? ENC_CODERANGE_VALID : ENC_CODERANGE_7BIT; } if (rb_enc_asciicompat(enc)) { p = search_nonascii(p, e); if (!p) { return ENC_CODERANGE_7BIT; } while (p < e) { int ret = rb_enc_precise_mbclen(p, e, enc); if (!MBCLEN_CHARFOUND_P(ret)) { return ENC_CODERANGE_BROKEN; } p += MBCLEN_CHARFOUND_LEN(ret); if (p < e) { p = search_nonascii(p, e); if (!p) { return ENC_CODERANGE_VALID; } } } if (e < p) { return ENC_CODERANGE_BROKEN; } return ENC_CODERANGE_VALID; } while (p < e) { int ret = rb_enc_precise_mbclen(p, e, enc); if (!MBCLEN_CHARFOUND_P(ret)) { return ENC_CODERANGE_BROKEN; } p += MBCLEN_CHARFOUND_LEN(ret); } if (e < p) { return ENC_CODERANGE_BROKEN; } return ENC_CODERANGE_VALID; }
0
[ "CWE-119" ]
ruby
1c2ef610358af33f9ded3086aa2d70aac03dcac5
149,995,418,371,283,700,000,000,000,000,000,000,000
47
* string.c (rb_str_justify): CVE-2009-4124. Fixes a bug reported by Emmanouel Kellinis <Emmanouel.Kellinis AT kpmg.co.uk>, KPMG London; Patch by nobu. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@26038 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
void *arpt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(arpt, ARPT); }
0
[ "CWE-200" ]
linux-2.6
42eab94fff18cb1091d3501cd284d6bd6cc9c143
133,724,299,329,120,470,000,000,000,000,000,000,000
4
netfilter: arp_tables: fix infoleak to userspace Structures ipt_replace, compat_ipt_replace, and xt_get_revision are copied from userspace. Fields of these structs that are zero-terminated strings are not checked. When they are used as argument to a format string containing "%s" in request_module(), some sensitive information is leaked to userspace via argument of spawned modprobe process. The first bug was introduced before the git epoch; the second is introduced by 6b7d31fc (v2.6.15-rc1); the third is introduced by 6b7d31fc (v2.6.15-rc1). To trigger the bug one should have CAP_NET_ADMIN. Signed-off-by: Vasiliy Kulikov <segoon@openwall.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
test_empty (void) { g_autoptr(FlatpakBwrap) bwrap = flatpak_bwrap_new (NULL); g_autoptr(FlatpakExports) exports = flatpak_exports_new (); gsize i; g_assert_false (flatpak_exports_path_is_visible (exports, "/run")); g_assert_cmpint (flatpak_exports_path_get_mode (exports, "/tmp"), ==, FLATPAK_FILESYSTEM_MODE_NONE); flatpak_bwrap_add_arg (bwrap, "bwrap"); flatpak_exports_append_bwrap_args (exports, bwrap); flatpak_bwrap_finish (bwrap); print_bwrap (bwrap); i = 0; g_assert_cmpuint (i, <, bwrap->argv->len); g_assert_cmpstr (bwrap->argv->pdata[i++], ==, "bwrap"); i = assert_next_is_os_release (bwrap, i); g_assert_cmpuint (i, <, bwrap->argv->len); g_assert_cmpstr (bwrap->argv->pdata[i++], ==, NULL); g_assert_cmpuint (i, ==, bwrap->argv->len); }
0
[ "CWE-74" ]
flatpak
4108e022452303093d8b90c838695a0476cb09c7
217,246,563,691,179,500,000,000,000,000,000,000,000
25
context: Add --unset-env option and a corresponding override This follows up from GHSA-4ppf-fxf6-vxg2 to fix missing functionality that I noticed while resolving that vulnerability, but is not required for fixing the vulnerability. Signed-off-by: Simon McVittie <smcv@collabora.com>
int X509V3_add_value_bool(const char *name, int asn1_bool, STACK_OF(CONF_VALUE) **extlist) { if(asn1_bool) return X509V3_add_value(name, "TRUE", extlist); return X509V3_add_value(name, "FALSE", extlist); }
0
[]
openssl
a70da5b3ecc3160368529677006801c58cb369db
322,481,417,326,249,670,000,000,000,000,000,000,000
6
New functions to check a hostname email or IP address against a certificate. Add options to s_client, s_server and x509 utilities to print results of checks.
parser_parse_unary_expression (parser_context_t *context_p, /**< context */ size_t *grouping_level_p) /**< grouping level */ { bool new_was_seen = false; /* Collect unary operators. */ while (true) { /* Convert plus and minus binary operators to unary operators. */ switch (context_p->token.type) { case LEXER_ADD: { context_p->token.type = LEXER_PLUS; break; } case LEXER_SUBTRACT: { context_p->token.type = LEXER_NEGATE; break; } #if JERRY_ESNEXT case LEXER_KEYW_AWAIT: { #if JERRY_MODULE_SYSTEM if ((context_p->global_status_flags & ECMA_PARSE_MODULE) && !(context_p->status_flags & PARSER_IS_ASYNC_FUNCTION)) { parser_raise_error (context_p, PARSER_ERR_AWAIT_NOT_ALLOWED); } #endif /* JERRY_MODULE_SYSTEM */ if (JERRY_UNLIKELY (context_p->token.lit_location.has_escape)) { parser_raise_error (context_p, PARSER_ERR_INVALID_KEYWORD); } break; } #endif /* JERRY_ESNEXT */ #if JERRY_MODULE_SYSTEM case LEXER_KEYW_IMPORT: { if (new_was_seen) { parser_raise_error (context_p, PARSER_ERR_IMPORT_AFTER_NEW); } break; } #endif /* JERRY_MODULE_SYSTEM */ } /* Bracketed expressions are primary expressions. At this * point their left paren is pushed onto the stack and * they are processed when their closing paren is reached. */ if (context_p->token.type == LEXER_LEFT_PAREN) { #if JERRY_ESNEXT if (context_p->next_scanner_info_p->source_p == context_p->source_p) { JERRY_ASSERT (context_p->next_scanner_info_p->type == SCANNER_TYPE_FUNCTION); break; } #endif /* JERRY_ESNEXT */ (*grouping_level_p) += PARSER_GROUPING_LEVEL_INCREASE; new_was_seen = false; } else if (context_p->token.type == LEXER_KEYW_NEW) { /* After 'new' unary operators are not allowed. */ new_was_seen = true; #if JERRY_ESNEXT /* Check if "new.target" is written here. */ if (scanner_try_scan_new_target (context_p)) { if (!(context_p->status_flags & PARSER_ALLOW_NEW_TARGET)) { parser_raise_error (context_p, PARSER_ERR_NEW_TARGET_NOT_ALLOWED); } parser_emit_cbc_ext (context_p, CBC_EXT_PUSH_NEW_TARGET); lexer_next_token (context_p); /* Found "new.target" return here */ return false; } #endif /* JERRY_ESNEXT */ } else if (new_was_seen || (*grouping_level_p == PARSE_EXPR_LEFT_HAND_SIDE) || !LEXER_IS_UNARY_OP_TOKEN (context_p->token.type)) { break; } parser_stack_push_uint8 (context_p, context_p->token.type); lexer_next_token (context_p); } /* Parse primary expression. */ switch (context_p->token.type) { #if JERRY_ESNEXT case LEXER_TEMPLATE_LITERAL: { if (context_p->source_p[-1] != LIT_CHAR_GRAVE_ACCENT) { parser_parse_template_literal (context_p); break; } /* The string is a normal string literal. */ /* FALLTHRU */ } #endif /* JERRY_ESNEXT */ case LEXER_LITERAL: { #if JERRY_ESNEXT if (JERRY_UNLIKELY (context_p->next_scanner_info_p->source_p == context_p->source_p)) { JERRY_ASSERT (context_p->next_scanner_info_p->type == SCANNER_TYPE_FUNCTION); uint32_t arrow_status_flags = (PARSER_IS_FUNCTION | PARSER_IS_ARROW_FUNCTION | (context_p->status_flags & PARSER_INSIDE_CLASS_FIELD)); if (context_p->next_scanner_info_p->u8_arg & SCANNER_FUNCTION_ASYNC) { JERRY_ASSERT (lexer_token_is_async (context_p)); JERRY_ASSERT (!(context_p->next_scanner_info_p->u8_arg & SCANNER_FUNCTION_STATEMENT)); uint32_t saved_status_flags = context_p->status_flags; context_p->status_flags |= PARSER_IS_ASYNC_FUNCTION | PARSER_DISALLOW_AWAIT_YIELD; lexer_next_token (context_p); context_p->status_flags = saved_status_flags; if (context_p->token.type == LEXER_KEYW_FUNCTION) { uint32_t status_flags = (PARSER_FUNCTION_CLOSURE | PARSER_IS_FUNC_EXPRESSION | PARSER_IS_ASYNC_FUNCTION | PARSER_DISALLOW_AWAIT_YIELD); parser_parse_function_expression (context_p, status_flags); break; } arrow_status_flags = (PARSER_IS_FUNCTION | PARSER_IS_ARROW_FUNCTION | PARSER_IS_ASYNC_FUNCTION | PARSER_DISALLOW_AWAIT_YIELD); } parser_check_assignment_expr (context_p); parser_parse_function_expression (context_p, arrow_status_flags); return parser_abort_parsing_after_assignment_expression (context_p); } #endif /* JERRY_ESNEXT */ uint8_t type = context_p->token.lit_location.type; if (type == LEXER_IDENT_LITERAL || type == LEXER_STRING_LITERAL) { lexer_construct_literal_object (context_p, &context_p->token.lit_location, context_p->token.lit_location.type); } else if (type == LEXER_NUMBER_LITERAL) { bool is_negative_number = false; if ((context_p->stack_top_uint8 == LEXER_PLUS || context_p->stack_top_uint8 == LEXER_NEGATE) && !lexer_check_post_primary_exp (context_p)) { do { if (context_p->stack_top_uint8 == LEXER_NEGATE) { is_negative_number = !is_negative_number; } #if JERRY_BUILTIN_BIGINT else if (JERRY_LIKELY (context_p->token.extra_value == LEXER_NUMBER_BIGINT)) { break; } #endif /* JERRY_BUILTIN_BIGINT */ parser_stack_pop_uint8 (context_p); } while (context_p->stack_top_uint8 == LEXER_PLUS || context_p->stack_top_uint8 == LEXER_NEGATE); } if (lexer_construct_number_object (context_p, true, is_negative_number)) { JERRY_ASSERT (context_p->lit_object.index <= CBC_PUSH_NUMBER_BYTE_RANGE_END); parser_emit_cbc_push_number (context_p, is_negative_number); break; } } cbc_opcode_t opcode = CBC_PUSH_LITERAL; if (context_p->token.keyword_type != LEXER_KEYW_EVAL) { if (context_p->last_cbc_opcode == CBC_PUSH_LITERAL) { context_p->last_cbc_opcode = CBC_PUSH_TWO_LITERALS; context_p->last_cbc.value = context_p->lit_object.index; context_p->last_cbc.literal_type = context_p->token.lit_location.type; context_p->last_cbc.literal_keyword_type = context_p->token.keyword_type; break; } if (context_p->last_cbc_opcode == CBC_PUSH_TWO_LITERALS) { context_p->last_cbc_opcode = CBC_PUSH_THREE_LITERALS; context_p->last_cbc.third_literal_index = context_p->lit_object.index; context_p->last_cbc.literal_type = context_p->token.lit_location.type; context_p->last_cbc.literal_keyword_type = context_p->token.keyword_type; break; } if (context_p->last_cbc_opcode == CBC_PUSH_THIS) { context_p->last_cbc_opcode = PARSER_CBC_UNAVAILABLE; opcode = CBC_PUSH_THIS_LITERAL; } } parser_emit_cbc_literal_from_token (context_p, (uint16_t) opcode); break; } case LEXER_KEYW_FUNCTION: { parser_parse_function_expression (context_p, PARSER_FUNCTION_CLOSURE | PARSER_IS_FUNC_EXPRESSION); break; } case LEXER_LEFT_BRACE: { #if JERRY_ESNEXT if (context_p->next_scanner_info_p->source_p == context_p->source_p && context_p->next_scanner_info_p->type == SCANNER_TYPE_INITIALIZER) { if (parser_is_assignment_expr (context_p)) { uint32_t flags = PARSER_PATTERN_NO_OPTS; if (context_p->next_scanner_info_p->u8_arg & SCANNER_LITERAL_OBJECT_HAS_REST) { flags |= PARSER_PATTERN_HAS_REST_ELEMENT; } parser_parse_object_initializer (context_p, flags); return parser_abort_parsing_after_assignment_expression (context_p); } scanner_release_next (context_p, sizeof (scanner_location_info_t)); } #endif /* JERRY_ESNEXT */ parser_parse_object_literal (context_p); break; } case LEXER_LEFT_SQUARE: { #if JERRY_ESNEXT if (context_p->next_scanner_info_p->source_p == context_p->source_p) { if (context_p->next_scanner_info_p->type == SCANNER_TYPE_INITIALIZER) { if (parser_is_assignment_expr (context_p)) { parser_parse_array_initializer (context_p, PARSER_PATTERN_NO_OPTS); return parser_abort_parsing_after_assignment_expression (context_p); } scanner_release_next (context_p, sizeof (scanner_location_info_t)); } else { JERRY_ASSERT (context_p->next_scanner_info_p->type == SCANNER_TYPE_LITERAL_FLAGS && context_p->next_scanner_info_p->u8_arg == SCANNER_LITERAL_NO_DESTRUCTURING); scanner_release_next (context_p, sizeof (scanner_info_t)); } } #endif /* JERRY_ESNEXT */ parser_parse_array_literal (context_p); break; } case LEXER_DIVIDE: case LEXER_ASSIGN_DIVIDE: { lexer_construct_regexp_object (context_p, false); uint16_t literal_index = (uint16_t) (context_p->literal_count - 1); if (context_p->last_cbc_opcode == CBC_PUSH_LITERAL) { context_p->last_cbc_opcode = CBC_PUSH_TWO_LITERALS; context_p->last_cbc.value = literal_index; } else if (context_p->last_cbc_opcode == CBC_PUSH_TWO_LITERALS) { context_p->last_cbc_opcode = CBC_PUSH_THREE_LITERALS; context_p->last_cbc.third_literal_index = literal_index; } else { parser_emit_cbc_literal (context_p, CBC_PUSH_LITERAL, literal_index); } context_p->last_cbc.literal_type = LEXER_REGEXP_LITERAL; context_p->last_cbc.literal_keyword_type = LEXER_EOS; break; } case LEXER_KEYW_THIS: { #if JERRY_ESNEXT if (context_p->status_flags & PARSER_ALLOW_SUPER_CALL) { parser_emit_cbc_ext (context_p, CBC_EXT_RESOLVE_LEXICAL_THIS); } else { #endif /* JERRY_ESNEXT */ parser_emit_cbc (context_p, CBC_PUSH_THIS); #if JERRY_ESNEXT } #endif /* JERRY_ESNEXT */ break; } case LEXER_LIT_TRUE: { parser_emit_cbc (context_p, CBC_PUSH_TRUE); break; } case LEXER_LIT_FALSE: { parser_emit_cbc (context_p, CBC_PUSH_FALSE); break; } case LEXER_LIT_NULL: { parser_emit_cbc (context_p, CBC_PUSH_NULL); break; } #if JERRY_ESNEXT case LEXER_KEYW_CLASS: { parser_parse_class (context_p, false); return false; } case LEXER_KEYW_SUPER: { if (context_p->status_flags & PARSER_ALLOW_SUPER) { if (lexer_check_next_characters (context_p, LIT_CHAR_DOT, LIT_CHAR_LEFT_SQUARE)) { parser_emit_cbc_ext (context_p, CBC_EXT_PUSH_SUPER); break; } if (lexer_check_next_character (context_p, LIT_CHAR_LEFT_PAREN) && (context_p->status_flags & PARSER_ALLOW_SUPER_CALL)) { parser_emit_cbc_ext (context_p, CBC_EXT_PUSH_SUPER_CONSTRUCTOR); break; } } parser_raise_error (context_p, PARSER_ERR_UNEXPECTED_SUPER_KEYWORD); } case LEXER_LEFT_PAREN: { JERRY_ASSERT (context_p->next_scanner_info_p->source_p == context_p->source_p && context_p->next_scanner_info_p->type == SCANNER_TYPE_FUNCTION); parser_check_assignment_expr (context_p); uint32_t arrow_status_flags = (PARSER_IS_FUNCTION | PARSER_IS_ARROW_FUNCTION | (context_p->status_flags & PARSER_INSIDE_CLASS_FIELD)); parser_parse_function_expression (context_p, arrow_status_flags); return parser_abort_parsing_after_assignment_expression (context_p); } case LEXER_KEYW_YIELD: { JERRY_ASSERT ((context_p->status_flags & PARSER_IS_GENERATOR_FUNCTION) && !(context_p->status_flags & PARSER_DISALLOW_AWAIT_YIELD)); if (context_p->token.lit_location.has_escape) { parser_raise_error (context_p, PARSER_ERR_INVALID_KEYWORD); } parser_check_assignment_expr (context_p); lexer_next_token (context_p); cbc_ext_opcode_t opcode = ((context_p->status_flags & PARSER_IS_ASYNC_FUNCTION) ? CBC_EXT_ASYNC_YIELD : CBC_EXT_YIELD); if (!lexer_check_yield_no_arg (context_p)) { if (context_p->token.type == LEXER_MULTIPLY) { lexer_next_token (context_p); opcode = ((context_p->status_flags & PARSER_IS_ASYNC_FUNCTION) ? CBC_EXT_ASYNC_YIELD_ITERATOR : CBC_EXT_YIELD_ITERATOR); } parser_parse_expression (context_p, PARSE_EXPR_NO_COMMA); } else { parser_emit_cbc (context_p, CBC_PUSH_UNDEFINED); } parser_emit_cbc_ext (context_p, opcode); return (context_p->token.type != LEXER_RIGHT_PAREN && context_p->token.type != LEXER_COMMA); } #endif /* JERRY_ESNEXT */ #if JERRY_MODULE_SYSTEM case LEXER_KEYW_IMPORT: { lexer_next_token (context_p); if (context_p->token.type != LEXER_LEFT_PAREN) { parser_raise_error (context_p, PARSER_ERR_LEFT_PAREN_EXPECTED); } lexer_next_token (context_p); parser_parse_expression (context_p, PARSE_EXPR_NO_COMMA); if (context_p->token.type != LEXER_RIGHT_PAREN) { parser_raise_error (context_p, PARSER_ERR_RIGHT_PAREN_EXPECTED); } parser_emit_cbc_ext (context_p, CBC_EXT_MODULE_IMPORT); break; } #endif /* JERRY_MODULE_SYSTEM */ default: { bool is_left_hand_side = (*grouping_level_p == PARSE_EXPR_LEFT_HAND_SIDE); parser_raise_error (context_p, (is_left_hand_side ? PARSER_ERR_LEFT_HAND_SIDE_EXP_EXPECTED : PARSER_ERR_PRIMARY_EXP_EXPECTED)); break; } } lexer_next_token (context_p); return false; } /* parser_parse_unary_expression */
1
[ "CWE-416" ]
jerryscript
3bcd48f72d4af01d1304b754ef19fe1a02c96049
32,347,926,419,598,707,000,000,000,000,000,000,000
456
Improve parse_identifier (#4691) Ascii string length is no longer computed during string allocation. JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz batizjob@gmail.com
Formattable::~Formattable() { dispose(); }
0
[ "CWE-190" ]
icu
53d8c8f3d181d87a6aa925b449b51c4a2c922a51
135,861,320,188,060,630,000,000,000,000,000,000,000
4
ICU-20246 Fixing another integer overflow in number parsing.
void hstrlenCommand(client *c) { robj *o; if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL || checkType(c,o,OBJ_HASH)) return; addReplyLongLong(c,hashTypeGetValueLength(o,c->argv[2]->ptr)); }
0
[ "CWE-190" ]
redis
f6a40570fa63d5afdd596c78083d754081d80ae3
256,120,571,656,443,440,000,000,000,000,000,000,000
7
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628) - fix possible heap corruption in ziplist and listpack resulting by trying to allocate more than the maximum size of 4GB. - prevent ziplist (hash and zset) from reaching size of above 1GB, will be converted to HT encoding, that's not a useful size. - prevent listpack (stream) from reaching size of above 1GB. - XADD will start a new listpack if the new record may cause the previous listpack to grow over 1GB. - XADD will respond with an error if a single stream record is over 1GB - List type (ziplist in quicklist) was truncating strings that were over 4GB, now it'll respond with an error.
TIFFWriteDirectoryTagLong8Array(TIFF* tif, uint32* ndir, TIFFDirEntry* dir, uint16 tag, uint32 count, uint64* value) { if (dir==NULL) { (*ndir)++; return(1); } return(TIFFWriteDirectoryTagCheckedLong8Array(tif,ndir,dir,tag,count,value)); }
0
[ "CWE-617" ]
libtiff
de144fd228e4be8aa484c3caf3d814b6fa88c6d9
26,974,338,801,450,537,000,000,000,000,000,000,000
9
TIFFWriteDirectorySec: avoid assertion. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2795. CVE-2018-10963
static void free_recv_msg_list(struct list_head *q) { struct ipmi_recv_msg *msg, *msg2; list_for_each_entry_safe(msg, msg2, q, link) { list_del(&msg->link); ipmi_free_recv_msg(msg); } }
0
[ "CWE-416", "CWE-284" ]
linux
77f8269606bf95fcb232ee86f6da80886f1dfae8
171,961,343,521,113,000,000,000,000,000,000,000,000
9
ipmi: fix use-after-free of user->release_barrier.rda When we do the following test, we got oops in ipmi_msghandler driver while((1)) do service ipmievd restart & service ipmievd restart done --------------------------------------------------------------- [ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008 [ 294.230188] Mem abort info: [ 294.230190] ESR = 0x96000004 [ 294.230191] Exception class = DABT (current EL), IL = 32 bits [ 294.230193] SET = 0, FnV = 0 [ 294.230194] EA = 0, S1PTW = 0 [ 294.230195] Data abort info: [ 294.230196] ISV = 0, ISS = 0x00000004 [ 294.230197] CM = 0, WnR = 0 [ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a [ 294.230201] [0000803fea6ea008] pgd=0000000000000000 [ 294.230204] Internal error: Oops: 96000004 [#1] SMP [ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio [ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113 [ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017 [ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO) [ 294.297695] pc : __srcu_read_lock+0x38/0x58 [ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler] [ 294.307853] sp : ffff00001001bc80 [ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000 [ 294.316594] x27: 0000000000000000 x26: dead000000000100 [ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800 [ 294.327366] x23: 0000000000000000 x22: 0000000000000000 [ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018 [ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000 [ 294.343523] x17: 0000000000000000 x16: 0000000000000000 [ 294.348908] x15: 0000000000000000 x14: 0000000000000002 [ 294.354293] x13: 0000000000000000 x12: 0000000000000000 [ 294.359679] x11: 0000000000000000 x10: 0000000000100000 [ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004 [ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678 [ 294.375836] x5 : 000000000000000c x4 : 0000000000000000 [ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000 [ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001 [ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293) [ 294.398791] Call trace: [ 294.401266] __srcu_read_lock+0x38/0x58 [ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler] [ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler] [ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler] [ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler] [ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler] [ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler] [ 294.451618] tasklet_action_common.isra.5+0x88/0x138 [ 294.460661] tasklet_action+0x2c/0x38 [ 294.468191] __do_softirq+0x120/0x2f8 [ 294.475561] irq_exit+0x134/0x140 [ 294.482445] __handle_domain_irq+0x6c/0xc0 [ 294.489954] gic_handle_irq+0xb8/0x178 [ 294.497037] el1_irq+0xb0/0x140 [ 294.503381] arch_cpu_idle+0x34/0x1a8 [ 294.510096] do_idle+0x1d4/0x290 [ 294.516322] cpu_startup_entry+0x28/0x30 [ 294.523230] secondary_start_kernel+0x184/0x1d0 [ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25) [ 294.539746] ---[ end trace 8a7a880dee570b29 ]--- [ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt [ 294.556837] SMP: stopping secondary CPUs [ 294.563996] Kernel Offset: disabled [ 294.570515] CPU features: 0x002,21006008 [ 294.577638] Memory Limit: none [ 294.587178] Starting crashdump kernel... [ 294.594314] Bye! Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda in __srcu_read_lock(), it causes oops. Fix this by calling cleanup_srcu_struct() when the refcount is zero. Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove") Cc: stable@vger.kernel.org # 4.18 Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: Corey Minyard <cminyard@mvista.com>
asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode) { struct nameidata nd; int old_fsuid, old_fsgid; kernel_cap_t old_cap; int res; if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */ return -EINVAL; old_fsuid = current->fsuid; old_fsgid = current->fsgid; old_cap = current->cap_effective; current->fsuid = current->uid; current->fsgid = current->gid; /* * Clear the capabilities if we switch to a non-root user * * FIXME: There is a race here against sys_capset. The * capabilities can change yet we will restore the old * value below. We should hold task_capabilities_lock, * but we cannot because user_path_walk can sleep. */ if (current->uid) cap_clear(current->cap_effective); else current->cap_effective = current->cap_permitted; res = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW|LOOKUP_ACCESS, &nd); if (res) goto out; res = vfs_permission(&nd, mode); /* SuS v2 requires we report a read only fs too */ if(res || !(mode & S_IWOTH) || special_file(nd.dentry->d_inode->i_mode)) goto out_path_release; if(IS_RDONLY(nd.dentry->d_inode)) res = -EROFS; out_path_release: path_release(&nd); out: current->fsuid = old_fsuid; current->fsgid = old_fsgid; current->cap_effective = old_cap; return res; }
0
[ "CWE-264" ]
linux-2.6
7b82dc0e64e93f430182f36b46b79fcee87d3532
113,272,852,082,053,620,000,000,000,000,000,000,000
52
Remove suid/sgid bits on [f]truncate() .. to match what we do on write(). This way, people who write to files by using [f]truncate + writable mmap have the same semantics as if they were using the write() family of system calls. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int do_end_io(struct multipath *m, struct request *clone, int error, struct dm_mpath_io *mpio) { /* * We don't queue any clone request inside the multipath target * during end I/O handling, since those clone requests don't have * bio clones. If we queue them inside the multipath target, * we need to make bio clones, that requires memory allocation. * (See drivers/md/dm.c:end_clone_bio() about why the clone requests * don't have bio clones.) * Instead of queueing the clone request here, we queue the original * request into dm core, which will remake a clone request and * clone bios for it and resubmit it later. */ int r = DM_ENDIO_REQUEUE; unsigned long flags; if (!error && !clone->errors) return 0; /* I/O complete */ if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ) return error; if (mpio->pgpath) fail_path(mpio->pgpath); spin_lock_irqsave(&m->lock, flags); if (!m->nr_valid_paths) { if (!m->queue_if_no_path) { if (!__must_push_back(m)) r = -EIO; } else { if (error == -EBADE) r = error; } } spin_unlock_irqrestore(&m->lock, flags); return r; }
0
[ "CWE-284", "CWE-264" ]
linux
ec8013beddd717d1740cfefb1a9b900deef85462
178,674,022,076,269,000,000,000,000,000,000,000,000
40
dm: do not forward ioctls from logical volumes to the underlying device A logical volume can map to just part of underlying physical volume. In this case, it must be treated like a partition. Based on a patch from Alasdair G Kergon. Cc: Alasdair G Kergon <agk@redhat.com> Cc: dm-devel@redhat.com Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Set or fetch imap timeout */ PHP_FUNCTION(imap_timeout) { zend_long ttype, timeout=-1; int timeout_type; if (zend_parse_parameters(ZEND_NUM_ARGS(), "l|l", &ttype, &timeout) == FAILURE) { RETURN_FALSE; } if (timeout == -1) { switch (ttype) { case 1: timeout_type = GET_OPENTIMEOUT; break; case 2: timeout_type = GET_READTIMEOUT; break; case 3: timeout_type = GET_WRITETIMEOUT; break; case 4: timeout_type = GET_CLOSETIMEOUT; break; default: RETURN_FALSE; break; } timeout = (zend_long) mail_parameters(NIL, timeout_type, NIL); RETURN_LONG(timeout); } else if (timeout >= 0) { switch (ttype) { case 1: timeout_type = SET_OPENTIMEOUT; break; case 2: timeout_type = SET_READTIMEOUT; break; case 3: timeout_type = SET_WRITETIMEOUT; break; case 4: timeout_type = SET_CLOSETIMEOUT; break; default: RETURN_FALSE; break; } timeout = (zend_long) mail_parameters(NIL, timeout_type, (void *) timeout); RETURN_TRUE; } else { RETURN_FALSE; }
0
[ "CWE-88" ]
php-src
336d2086a9189006909ae06c7e95902d7d5ff77e
13,513,265,289,644,548,000,000,000,000,000,000,000
55
Disable rsh/ssh functionality in imap by default (bug #77153)
static char *ask_new_field(struct chfn_control *ctl, const char *question, char *def_val) { int len; char *buf = NULL; /* leave initialized to NULL or getline segfaults */ size_t dummy = 0; if (!def_val) def_val = ""; while (true) { printf("%s [%s]:", question, def_val); __fpurge(stdin); putchar(' '); fflush(stdout); if (getline(&buf, &dummy, stdin) < 0) errx(EXIT_FAILURE, _("Aborted.")); /* remove white spaces from string end */ ltrim_whitespace((unsigned char *) buf); len = rtrim_whitespace((unsigned char *) buf); if (len == 0) { free(buf); return xstrdup(def_val); } if (!strcasecmp(buf, "none")) { free(buf); ctl->changed = 1; return xstrdup(""); } if (check_gecos_string(question, buf) >= 0) break; } ctl->changed = 1; return buf; }
0
[ "CWE-209" ]
util-linux
faa5a3a83ad0cb5e2c303edbfd8cd823c9d94c17
116,951,210,502,340,210,000,000,000,000,000,000,000
38
chsh, chfn: remove readline support [CVE-2022-0563] The readline library uses INPUTRC= environment variable to get a path to the library config file. When the library cannot parse the specified file, it prints an error message containing data from the file. Unfortunately, the library does not use secure_getenv() (or a similar concept) to avoid vulnerabilities that could occur if set-user-ID or set-group-ID programs. Reported-by: Rory Mackie <rory.mackie@trailofbits.com> Signed-off-by: Karel Zak <kzak@redhat.com>
static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) { struct napi_struct *napi; napi = container_of(timer, struct napi_struct, timer); if (napi->gro_list) napi_schedule(napi); return HRTIMER_NORESTART;
0
[ "CWE-400", "CWE-703" ]
linux
fac8e0f579695a3ecbc4d3cac369139d7f819971
260,367,093,010,787,550,000,000,000,000,000,000,000
10
tunnels: Don't apply GRO to multiple layers of encapsulation. When drivers express support for TSO of encapsulated packets, they only mean that they can do it for one layer of encapsulation. Supporting additional levels would mean updating, at a minimum, more IP length fields and they are unaware of this. No encapsulation device expresses support for handling offloaded encapsulated packets, so we won't generate these types of frames in the transmit path. However, GRO doesn't have a check for multiple levels of encapsulation and will attempt to build them. UDP tunnel GRO actually does prevent this situation but it only handles multiple UDP tunnels stacked on top of each other. This generalizes that solution to prevent any kind of tunnel stacking that would cause problems. Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack") Signed-off-by: Jesse Gross <jesse@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
static bool is_domain_in_set( const uint32_t domain_id, const Domains& domains) { bool returned_value = false; for (auto range : domains.ranges) { if (range.second == 0) { if (domain_id == range.first) { returned_value = true; break; } } else { if (domain_id >= range.first && domain_id <= range.second) { returned_value = true; break; } } } return returned_value; }
0
[ "CWE-284" ]
Fast-DDS
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
199,377,171,246,473,500,000,000,000,000,000,000,000
29
check remote permissions (#1387) * Refs 5346. Blackbox test Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. one-way string compare Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Do not add partition separator on last partition Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Access control unit testing It only covers Partition and Topic permissions Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs #3680. Fix partition check on Permissions plugin. Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Fix tests on mac Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Fix windows tests Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Avoid memory leak on test Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Proxy data mocks should not return temporary objects Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * refs 3680. uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> Co-authored-by: Miguel Company <MiguelCompany@eprosima.com>
static bool ok_jpg_read_sos(ok_jpg_decoder *decoder) { // JPEG spec: Table B.3 ok_jpg *jpg = decoder->jpg; uint8_t buffer[16]; const size_t header_size = 3; if (!ok_read(decoder, buffer, header_size)) { return false; } uint16_t length = readBE16(buffer); decoder->num_scan_components = buffer[2]; if (decoder->num_scan_components > decoder->num_components) { ok_jpg_error(jpg, OK_JPG_ERROR_INVALID, "Invalid SOS segment (Ns)"); return false; } const size_t expected_size = 3 + (size_t)decoder->num_scan_components * 2; if (length != expected_size + header_size) { ok_jpg_error(jpg, OK_JPG_ERROR_INVALID, "Invalid SOS segment (L)"); return false; } if (sizeof(buffer) < expected_size) { ok_jpg_error(jpg, OK_JPG_ERROR_INVALID, "Too many components for buffer"); return false; } if (!ok_read(decoder, buffer, expected_size)) { return false; } uint8_t *src = buffer; for (int i = 0; i < decoder->num_scan_components; i++, src += 2) { int C = src[0]; bool component_found = false; for (int j = 0; j < decoder->num_components; j++) { ok_jpg_component *c = decoder->components + j; if (c->id == C) { decoder->scan_components[i] = j; component_found = true; } } if (!component_found) { ok_jpg_error(jpg, OK_JPG_ERROR_INVALID, "Invalid SOS segment (C)"); return false; } // Make sure component is unique for (int j = 0; j < i; j++) { if (decoder->scan_components[i] == decoder->scan_components[j]) { ok_jpg_error(jpg, OK_JPG_ERROR_INVALID, "Invalid SOS segment (component ids)"); return false; } } ok_jpg_component *comp = decoder->components + decoder->scan_components[i]; comp->Td = src[1] >> 4; comp->Ta = src[1] & 0x0f; if (comp->Td > 3 || comp->Ta > 3) { ok_jpg_error(jpg, OK_JPG_ERROR_INVALID, "Invalid SOS segment (Td/Ta)"); return false; } } decoder->scan_start = src[0]; decoder->scan_end = src[1]; decoder->scan_prev_scale = src[2] >> 4; decoder->scan_scale = src[2] & 0x0f; if (decoder->progressive) { if (decoder->scan_start < 0 || decoder->scan_start > 63 || decoder->scan_end < decoder->scan_start || decoder->scan_end > 63 || decoder->scan_prev_scale < 0 || decoder->scan_prev_scale > 13 || decoder->scan_scale < 0 || decoder->scan_scale > 13) { ok_jpg_error(jpg, OK_JPG_ERROR_INVALID, "Invalid progressive SOS segment (Ss/Se/Ah/Al)"); return false; } } else { // Sequential if (decoder->scan_start != 0 || decoder->scan_end != 63 || decoder->scan_prev_scale != 0 || decoder->scan_scale != 0) { ok_jpg_error(jpg, OK_JPG_ERROR_INVALID, "Invalid SOS segment (Ss/Se/Ah/Al)"); return false; } } #if 0 printf("SOS: Scan components: ("); for (int i = 0; i < decoder->num_scan_components; i++) { ok_jpg_component *comp = decoder->components + decoder->scan_components[i]; if (i != 0) { printf(","); } printf("%i", comp->id); } printf(") "); for (int i = decoder->num_scan_components; i < decoder->num_components; i++) { printf(" "); } printf("\trange: %i...%i \tah: %i al: %i\n", decoder->scan_start, decoder->scan_end, decoder->scan_prev_scale, decoder->scan_scale); #endif return ok_jpg_decode_scan(decoder); }
0
[ "CWE-787" ]
ok-file-formats
a9cc1711dd4ed6a215038f1c5c03af0ef52c3211
65,771,290,652,425,700,000,000,000,000,000,000,000
103
ok_jpg: Fix invalid DHT (#11)
int __hnbad(const char *dotted) { unsigned char c, n, *cp; unsigned char buf[NS_MAXCDNAME]; cp = (unsigned char *)dotted; while ((c = *cp++)) if (c < 0x21 || c > 0x7E) return (1); if (ns_name_pton(dotted, buf, sizeof(buf)) < 0) return (2); if (buf[0] > 0 && buf[1] == '-') return (3); cp = buf; while ((n = *cp++)) { if (n > 63) return (4); while (n--) { c = *cp++; if (c < '-' || (c > '-' && c < '0') || (c > '9' && c < 'A') || (c > 'Z' && c < '_') || (c > '_' && c < 'a') || c > 'z') return (5); } } return (0); }
0
[ "CWE-79" ]
uclibc-ng
0f822af0445e5348ce7b7bd8ce1204244f31d174
177,572,401,761,957,170,000,000,000,000,000,000,000
30
libc/inet/resolv.c: add __hnbad to check DNS entries for validity… … using the same rules glibc does also call __hnbad in some places to check answers
static int cmd_match(const char *cmd, const char *str) { /* See if cmd, written into a sysfs file, matches * str. They must either be the same, or cmd can * have a trailing newline */ while (*cmd && *str && *cmd == *str) { cmd++; str++; } if (*cmd == '\n') cmd++; if (*str || *cmd) return 0; return 1; }
0
[ "CWE-200" ]
linux
b6878d9e03043695dbf3fa1caa6dfc09db225b16
298,990,157,567,172,940,000,000,000,000,000,000,000
16
md: use kzalloc() when bitmap is disabled In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a mdu_bitmap_file_t called "file". 5769 file = kmalloc(sizeof(*file), GFP_NOIO); 5770 if (!file) 5771 return -ENOMEM; This structure is copied to user space at the end of the function. 5786 if (err == 0 && 5787 copy_to_user(arg, file, sizeof(*file))) 5788 err = -EFAULT But if bitmap is disabled only the first byte of "file" is initialized with zero, so it's possible to read some bytes (up to 4095) of kernel space memory from user space. This is an information leak. 5775 /* bitmap disabled, zero the first byte and copy out */ 5776 if (!mddev->bitmap_info.file) 5777 file->pathname[0] = '\0'; Signed-off-by: Benjamin Randazzo <benjamin@randazzo.fr> Signed-off-by: NeilBrown <neilb@suse.com>
static inline s16 fixp_mult(s16 a, s16 b) { a = ((s32)a * 0x100) / 0x7fff; return ((s32)(a * b)) >> FRAC_N; }
0
[ "CWE-416" ]
linux
fa3a5a1880c91bb92594ad42dfe9eedad7996b86
219,189,792,942,650,700,000,000,000,000,000,000,000
5
Input: ff-memless - kill timer in destroy() No timer must be left running when the device goes away. Signed-off-by: Oliver Neukum <oneukum@suse.com> Reported-and-tested-by: syzbot+b6c55daa701fc389e286@syzkaller.appspotmail.com Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/1573726121.17351.3.camel@suse.com Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
DestroyJNG(unsigned char *chunk,Image **color_image, ImageInfo **color_image_info,Image **alpha_image,ImageInfo **alpha_image_info) { (void) RelinquishMagickMemory(chunk); if (color_image_info && *color_image_info) { DestroyImageInfo(*color_image_info); *color_image_info = (ImageInfo *)NULL; } if (alpha_image_info && *alpha_image_info) { DestroyImageInfo(*alpha_image_info); *alpha_image_info = (ImageInfo *)NULL; } if (color_image && *color_image) { DestroyImage(*color_image); *color_image = (Image *)NULL; } if (alpha_image && *alpha_image) { DestroyImage(*alpha_image); *alpha_image = (Image *)NULL; } }
0
[ "CWE-835" ]
ImageMagick6
9eda4b36a8695e4a0cd27bea28b9c173c68a01ec
62,280,414,334,980,750,000,000,000,000,000,000,000
25
Fixed infinite loop (#1095).
SdMmcHcPowerControl ( IN EFI_PCI_IO_PROTOCOL *PciIo, IN UINT8 Slot, IN UINT8 PowerCtrl ) { EFI_STATUS Status; // // Clr SD Bus Power // PowerCtrl &= (UINT8)~BIT0; Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_POWER_CTRL, FALSE, sizeof (PowerCtrl), &PowerCtrl); if (EFI_ERROR (Status)) { return Status; } // // Set SD Bus Voltage Select and SD Bus Power fields in Power Control Register // PowerCtrl |= BIT0; Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_POWER_CTRL, FALSE, sizeof (PowerCtrl), &PowerCtrl); return Status; }
0
[]
edk2
e36d5ac7d10a6ff5becb0f52fdfd69a1752b0d14
212,440,923,779,137,930,000,000,000,000,000,000,000
25
MdeModulePkg/SdMmcPciHcDxe: Fix double PciIo Unmap in TRB creation (CVE-2019-14587) REF:https://bugzilla.tianocore.org/show_bug.cgi?id=1989 The commit will avoid unmapping the same resource in error handling logic for function BuildAdmaDescTable() and SdMmcCreateTrb(). For the error handling in BuildAdmaDescTable(): The error is directly related with the corresponding Map() operation (mapped address beyond 4G, which is not supported in ADMA), so the Unmap() operation is done in the error handling logic, and then setting 'Trb->AdmaMap' to NULL to avoid double Unmap. For the error handling in SdMmcCreateTrb(): The error is not directly related with the corresponding Map() operation, so the commit will update the code to left SdMmcFreeTrb() for the Unmap operation to avoid double Unmap. Cc: Jian J Wang <jian.j.wang@intel.com> Cc: Ray Ni <ray.ni@intel.com> Signed-off-by: Hao A Wu <hao.a.wu@intel.com> Reviewed-by: Jian J Wang <jian.j.wang@intel.com>
static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) { void *p; void *addr = page_address(page); if (!check_slab(s, page) || !on_freelist(s, page, NULL)) return 0; /* Now we know that a valid freelist exists */ bitmap_zero(map, page->objects); for_each_free_object(p, s, page->freelist) { set_bit(slab_index(p, s, addr), map); if (!check_object(s, page, p, 0)) return 0; } for_each_object(p, s, addr, page->objects) if (!test_bit(slab_index(p, s, addr), map)) if (!check_object(s, page, p, 1)) return 0; return 1; }
0
[ "CWE-189" ]
linux
f8bd2258e2d520dff28c855658bd24bdafb5102d
222,784,563,880,556,800,000,000,000,000,000,000,000
25
remove div_long_long_rem x86 is the only arch right now, which provides an optimized for div_long_long_rem and it has the downside that one has to be very careful that the divide doesn't overflow. The API is a little akward, as the arguments for the unsigned divide are signed. The signed version also doesn't handle a negative divisor and produces worse code on 64bit archs. There is little incentive to keep this API alive, so this converts the few users to the new API. Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: john stultz <johnstul@us.ibm.com> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static inline struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); }
0
[]
linux
bc8a76a152c5f9ef3b48104154a65a68a8b76946
159,521,327,316,952,270,000,000,000,000,000,000,000
4
drm/i915/gen9: Clear residual context state on context switch Intel ID: PSIRT-TA-201910-001 CVEID: CVE-2019-14615 Intel GPU Hardware prior to Gen11 does not clear EU state during a context switch. This can result in information leakage between contexts. For Gen8 and Gen9, hardware provides a mechanism for fast cleardown of the EU state, by issuing a PIPE_CONTROL with bit 27 set. We can use this in a context batch buffer to explicitly cleardown the state on every context switch. As this workaround is already in place for gen8, we can borrow the code verbatim for Gen9. Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Akeem G Abodunrin <akeem.g.abodunrin@intel.com> Cc: Kumar Valsan Prathap <prathap.kumar.valsan@intel.com> Cc: Chris Wilson <chris.p.wilson@intel.com> Cc: Balestrieri Francesco <francesco.balestrieri@intel.com> Cc: Bloomfield Jon <jon.bloomfield@intel.com> Cc: Dutt Sudeep <sudeep.dutt@intel.com>
ldns_rr2buffer_str(ldns_buffer *output, const ldns_rr *rr) { return ldns_rr2buffer_str_fmt(output, ldns_output_format_default, rr); }
0
[ "CWE-415" ]
ldns
070b4595981f48a21cc6b4f5047fdc2d09d3da91
110,657,315,804,234,810,000,000,000,000,000,000,000
4
CAA and URI
struct regulator *regulator_get_exclusive(struct device *dev, const char *id) { return _regulator_get(dev, id, true, false); }
0
[ "CWE-416" ]
linux
60a2362f769cf549dc466134efe71c8bf9fbaaba
8,011,957,973,666,654,000,000,000,000,000,000,000
4
regulator: core: Fix regualtor_ena_gpio_free not to access pin after freeing After freeing pin from regulator_ena_gpio_free, loop can access the pin. So this patch fixes not to access pin after freeing. Signed-off-by: Seung-Woo Kim <sw0312.kim@samsung.com> Signed-off-by: Mark Brown <broonie@kernel.org>
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c) { int alpha = (int8_t) vp8_rac_get_uint(c, 8); int beta = (int8_t) vp8_rac_get_uint(c, 8); int ret; if (!s->keyframe && (alpha || beta)) { int width = s->mb_width * 16; int height = s->mb_height * 16; AVFrame *src, *dst; if (!s->framep[VP56_FRAME_PREVIOUS] || !s->framep[VP56_FRAME_GOLDEN]) { av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n"); return AVERROR_INVALIDDATA; } dst = src = s->framep[VP56_FRAME_PREVIOUS]->tf.f; /* preserve the golden frame, write a new previous frame */ if (s->framep[VP56_FRAME_GOLDEN] == s->framep[VP56_FRAME_PREVIOUS]) { s->framep[VP56_FRAME_PREVIOUS] = vp8_find_free_buffer(s); if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0) return ret; dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f; copy_chroma(dst, src, width, height); } fade(dst->data[0], dst->linesize[0], src->data[0], src->linesize[0], width, height, alpha, beta); } return 0; }
0
[ "CWE-119", "CWE-787" ]
FFmpeg
6b5d3fb26fb4be48e4966e4b1d97c2165538d4ef
68,950,931,675,687,430,000,000,000,000,000,000,000
38
avcodec/webp: Always set pix_fmt Fixes: out of array access Fixes: 1434/clusterfuzz-testcase-minimized-6314998085189632 Fixes: 1435/clusterfuzz-testcase-minimized-6483783723253760 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg Reviewed-by: "Ronald S. Bultje" <rsbultje@gmail.com> Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
static int get_malloc_block_total_size(void *ptr) { mbed_heap_overhead_t *c = (mbed_heap_overhead_t *)((char *)ptr - offsetof(mbed_heap_overhead, next)); // Skip the padding area if (c->size < 0) { c = (mbed_heap_overhead_t *)((char *)c + c->size); } // Mask LSB as it is used for usage flags return (c->size & ~0x1); }
0
[ "CWE-190" ]
mbed-os
4450464fc87fb104a45190a2334b82b3bb2b13fb
256,021,059,601,632,560,000,000,000,000,000,000,000
11
Add integer overflow check to the malloc wrappers Add a check that the combined size of the buffer to allocate and alloc_info_t does not exceed the maximum integer value representable by size_t.
SpaceToBatchNDContext(TfLiteContext* context, TfLiteNode* node) { input = GetInput(context, node, 0); block_shape = GetInput(context, node, 1); paddings = GetInput(context, node, 2); output = GetOutput(context, node, 0); }
0
[ "CWE-369" ]
tensorflow
6d36ba65577006affb272335b7c1abd829010708
220,714,602,661,280,300,000,000,000,000,000,000,000
6
Prevent division by 0 PiperOrigin-RevId: 370984990 Change-Id: Ib324955bbeb1cbd97c82fd5d61a00a2697c9a2de
const SubFramework* Binary::sub_framework() const { return command<SubFramework>(); }
0
[ "CWE-703" ]
LIEF
7acf0bc4224081d4f425fcc8b2e361b95291d878
283,508,667,057,539,600,000,000,000,000,000,000,000
3
Resolve #764
int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid, unsigned int group, int report, gfp_t flags) { int err = 0; if (group) { int exclude_pid = 0; if (report) { atomic_inc(&skb->users); exclude_pid = pid; } /* errors reported via destination sk->sk_err, but propagate * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ err = nlmsg_multicast(sk, skb, exclude_pid, group, flags); } if (report) { int err2; err2 = nlmsg_unicast(sk, skb, pid); if (!err || err == -ESRCH) err = err2; } return err; }
0
[]
linux-2.6
16e5726269611b71c930054ffe9b858c1cea88eb
322,124,535,911,354,570,000,000,000,000,000,000,000
28
af_unix: dont send SCM_CREDENTIALS by default Since commit 7361c36c5224 (af_unix: Allow credentials to work across user and pid namespaces) af_unix performance dropped a lot. This is because we now take a reference on pid and cred in each write(), and release them in read(), usually done from another process, eventually from another cpu. This triggers false sharing. # Events: 154K cycles # # Overhead Command Shared Object Symbol # ........ ....... .................. ......................... # 10.40% hackbench [kernel.kallsyms] [k] put_pid 8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg 7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg 6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock 4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb 4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns 4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred 2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm 2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count 1.75% hackbench [kernel.kallsyms] [k] fget_light 1.51% hackbench [kernel.kallsyms] [k] __mutex_lock_interruptible_slowpath 1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb This patch includes SCM_CREDENTIALS information in a af_unix message/skb only if requested by the sender, [man 7 unix for details how to include ancillary data using sendmsg() system call] Note: This might break buggy applications that expected SCM_CREDENTIAL from an unaware write() system call, and receiver not using SO_PASSCRED socket option. If SOCK_PASSCRED is set on source or destination socket, we still include credentials for mere write() syscalls. Performance boost in hackbench : more than 50% gain on a 16 thread machine (2 quad-core cpus, 2 threads per core) hackbench 20 thread 2000 4.228 sec instead of 9.102 sec Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static void autoname_imp_trampoline(RzCore *core, RzAnalysisFunction *fcn) { if (rz_list_length(fcn->bbs) == 1 && ((RzAnalysisBlock *)rz_list_first(fcn->bbs))->ninstr == 1) { RzList *xrefs = rz_analysis_function_get_xrefs_from(fcn); if (xrefs && rz_list_length(xrefs) == 1) { RzAnalysisXRef *xref = rz_list_first(xrefs); if (xref->type != RZ_ANALYSIS_REF_TYPE_CALL) { /* Some fcns don't return */ RzFlagItem *flg = rz_flag_get_i(core->flags, xref->to); if (flg && rz_str_startswith(flg->name, "sym.imp.")) { RZ_FREE(fcn->name); fcn->name = rz_str_newf("sub.%s", flg->name + 8); } } } rz_list_free(xrefs); } }
0
[ "CWE-703" ]
rizin
6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939
160,723,993,652,495,910,000,000,000,000,000,000,000
16
Initialize retctx,ctx before freeing the inner elements In rz_core_analysis_type_match retctx structure was initialized on the stack only after a "goto out_function", where a field of that structure was freed. When the goto path is taken, the field is not properly initialized and it cause cause a crash of Rizin or have other effects. Fixes: CVE-2021-4022
copy64 (uint32x * M, uschar *in) { int i; for (i = 0; i < 16; i++) M[i] = (in[i * 4 + 3] << 24) | (in[i * 4 + 2] << 16) | (in[i * 4 + 1] << 8) | (in[i * 4 + 0] << 0); }
0
[ "CWE-125" ]
exim
57aa14b216432be381b6295c312065b2fd034f86
212,882,461,621,303,370,000,000,000,000,000,000,000
8
Fix SPA authenticator, checking client-supplied data before using it. Bug 2571
shell_gtk_embed_get_preferred_height (ClutterActor *actor, float for_width, float *min_height_p, float *natural_height_p) { ShellGtkEmbed *embed = SHELL_GTK_EMBED (actor); ShellGtkEmbedPrivate *priv = shell_gtk_embed_get_instance_private (embed); if (priv->window && gtk_widget_get_visible (GTK_WIDGET (priv->window))) { GtkRequisition min_req, natural_req; gtk_widget_get_preferred_size (GTK_WIDGET (priv->window), &min_req, &natural_req); *min_height_p = min_req.height; *natural_height_p = natural_req.height; } else *min_height_p = *natural_height_p = 0; }
0
[]
gnome-shell
90c55e1977fde252b79bcfd9d0ef41144fb21fe2
301,832,194,646,989,780,000,000,000,000,000,000,000
20
gtk-embed: ensure we only listen for window-created events once If a tray icon gets a mapped and unmapped and the mapped again in quick succession, we can end up with multiple handlers listening for window creation events. This commit tries to guard against that by only listening for window-created events when we don't know the actor associated with the icon. https://bugzilla.gnome.org/show_bug.cgi?id=787361
dev_t blk_lookup_devt(const char *name, int partno) { dev_t devt = MKDEV(0, 0); struct class_dev_iter iter; struct device *dev; class_dev_iter_init(&iter, &block_class, NULL, &disk_type); while ((dev = class_dev_iter_next(&iter))) { struct gendisk *disk = dev_to_disk(dev); struct hd_struct *part; if (strcmp(dev_name(dev), name)) continue; if (partno < disk->minors) { /* We need to return the right devno, even * if the partition doesn't exist yet. */ devt = MKDEV(MAJOR(dev->devt), MINOR(dev->devt) + partno); break; } part = disk_get_part(disk, partno); if (part) { devt = part_devt(part); disk_put_part(part); break; } disk_put_part(part); } class_dev_iter_exit(&iter); return devt; }
0
[ "CWE-416" ]
linux-stable
77da160530dd1dc94f6ae15a981f24e5f0021e84
200,024,962,496,274,640,000,000,000,000,000,000,000
33
block: fix use-after-free in seq file I got a KASAN report of use-after-free: ================================================================== BUG: KASAN: use-after-free in klist_iter_exit+0x61/0x70 at addr ffff8800b6581508 Read of size 8 by task trinity-c1/315 ============================================================================= BUG kmalloc-32 (Not tainted): kasan: bad access detected ----------------------------------------------------------------------------- Disabling lock debugging due to kernel taint INFO: Allocated in disk_seqf_start+0x66/0x110 age=144 cpu=1 pid=315 ___slab_alloc+0x4f1/0x520 __slab_alloc.isra.58+0x56/0x80 kmem_cache_alloc_trace+0x260/0x2a0 disk_seqf_start+0x66/0x110 traverse+0x176/0x860 seq_read+0x7e3/0x11a0 proc_reg_read+0xbc/0x180 do_loop_readv_writev+0x134/0x210 do_readv_writev+0x565/0x660 vfs_readv+0x67/0xa0 do_preadv+0x126/0x170 SyS_preadv+0xc/0x10 do_syscall_64+0x1a1/0x460 return_from_SYSCALL_64+0x0/0x6a INFO: Freed in disk_seqf_stop+0x42/0x50 age=160 cpu=1 pid=315 __slab_free+0x17a/0x2c0 kfree+0x20a/0x220 disk_seqf_stop+0x42/0x50 traverse+0x3b5/0x860 seq_read+0x7e3/0x11a0 proc_reg_read+0xbc/0x180 do_loop_readv_writev+0x134/0x210 do_readv_writev+0x565/0x660 vfs_readv+0x67/0xa0 do_preadv+0x126/0x170 SyS_preadv+0xc/0x10 do_syscall_64+0x1a1/0x460 return_from_SYSCALL_64+0x0/0x6a CPU: 1 PID: 315 Comm: trinity-c1 Tainted: G B 4.7.0+ #62 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 ffffea0002d96000 ffff880119b9f918 ffffffff81d6ce81 ffff88011a804480 ffff8800b6581500 ffff880119b9f948 ffffffff8146c7bd ffff88011a804480 ffffea0002d96000 ffff8800b6581500 fffffffffffffff4 ffff880119b9f970 Call Trace: [<ffffffff81d6ce81>] dump_stack+0x65/0x84 [<ffffffff8146c7bd>] print_trailer+0x10d/0x1a0 [<ffffffff814704ff>] object_err+0x2f/0x40 [<ffffffff814754d1>] kasan_report_error+0x221/0x520 [<ffffffff8147590e>] __asan_report_load8_noabort+0x3e/0x40 [<ffffffff83888161>] klist_iter_exit+0x61/0x70 [<ffffffff82404389>] class_dev_iter_exit+0x9/0x10 [<ffffffff81d2e8ea>] disk_seqf_stop+0x3a/0x50 [<ffffffff8151f812>] seq_read+0x4b2/0x11a0 [<ffffffff815f8fdc>] proc_reg_read+0xbc/0x180 [<ffffffff814b24e4>] do_loop_readv_writev+0x134/0x210 [<ffffffff814b4c45>] do_readv_writev+0x565/0x660 [<ffffffff814b8a17>] vfs_readv+0x67/0xa0 [<ffffffff814b8de6>] do_preadv+0x126/0x170 [<ffffffff814b92ec>] SyS_preadv+0xc/0x10 This problem can occur in the following situation: open() - pread() - .seq_start() - iter = kmalloc() // succeeds - seqf->private = iter - .seq_stop() - kfree(seqf->private) - pread() - .seq_start() - iter = kmalloc() // fails - .seq_stop() - class_dev_iter_exit(seqf->private) // boom! old pointer As the comment in disk_seqf_stop() says, stop is called even if start failed, so we need to reinitialise the private pointer to NULL when seq iteration stops. An alternative would be to set the private pointer to NULL when the kmalloc() in disk_seqf_start() fails. Cc: stable@vger.kernel.org Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@fb.com>
int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime) { int savelru = server.maxmemory_policy & MAXMEMORY_FLAG_LRU; int savelfu = server.maxmemory_policy & MAXMEMORY_FLAG_LFU; /* Save the expire time */ if (expiretime != -1) { if (rdbSaveType(rdb,RDB_OPCODE_EXPIRETIME_MS) == -1) return -1; if (rdbSaveMillisecondTime(rdb,expiretime) == -1) return -1; } /* Save the LRU info. */ if (savelru) { uint64_t idletime = estimateObjectIdleTime(val); idletime /= 1000; /* Using seconds is enough and requires less space.*/ if (rdbSaveType(rdb,RDB_OPCODE_IDLE) == -1) return -1; if (rdbSaveLen(rdb,idletime) == -1) return -1; } /* Save the LFU info. */ if (savelfu) { uint8_t buf[1]; buf[0] = LFUDecrAndReturn(val); /* We can encode this in exactly two bytes: the opcode and an 8 * bit counter, since the frequency is logarithmic with a 0-255 range. * Note that we do not store the halving time because to reset it * a single time when loading does not affect the frequency much. */ if (rdbSaveType(rdb,RDB_OPCODE_FREQ) == -1) return -1; if (rdbWriteRaw(rdb,buf,1) == -1) return -1; } /* Save type, key, value */ if (rdbSaveObjectType(rdb,val) == -1) return -1; if (rdbSaveStringObject(rdb,key) == -1) return -1; if (rdbSaveObject(rdb,val,key) == -1) return -1; /* Delay return if required (for testing) */ if (server.rdb_key_save_delay) usleep(server.rdb_key_save_delay); return 1; }
0
[ "CWE-190" ]
redis
a30d367a71b7017581cf1ca104242a3c644dec0f
283,956,936,436,676,320,000,000,000,000,000,000,000
41
Fix Integer overflow issue with intsets (CVE-2021-32687) The vulnerability involves changing the default set-max-intset-entries configuration parameter to a very large value and constructing specially crafted commands to manipulate sets
void htc_sta_drain(struct htc_target *target, u8 idx) { target->hif->sta_drain(target->hif_dev, idx); }
0
[ "CWE-400", "CWE-401" ]
linux
853acf7caf10b828102d92d05b5c101666a6142b
325,081,162,889,324,300,000,000,000,000,000,000,000
4
ath9k_htc: release allocated buffer if timed out In htc_config_pipe_credits, htc_setup_complete, and htc_connect_service if time out happens, the allocated buffer needs to be released. Otherwise there will be memory leak. Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
void opj_j2k_read_int16_to_int32 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem) { OPJ_BYTE * l_src_data = (OPJ_BYTE *) p_src_data; OPJ_INT32 * l_dest_data = (OPJ_INT32 *) p_dest_data; OPJ_UINT32 i; OPJ_UINT32 l_temp; for (i=0;i<p_nb_elem;++i) { opj_read_bytes(l_src_data,&l_temp,2); l_src_data+=sizeof(OPJ_INT16); *(l_dest_data++) = (OPJ_INT32) l_temp; } }
0
[]
openjpeg
0fa5a17c98c4b8f9ee2286f4f0a50cf52a5fccb0
107,410,554,193,336,700,000,000,000,000,000,000,000
15
[trunk] Correct potential double free on malloc failure in opj_j2k_copy_default_tcp_and_create_tcp (fixes issue 492)
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) { int busy = 0; int err; nodemask_t tmp; err = migrate_prep(); if (err) return err; down_read(&mm->mmap_sem); err = migrate_vmas(mm, from_nodes, to_nodes, flags); if (err) goto out; /* * Find a 'source' bit set in 'tmp' whose corresponding 'dest' * bit in 'to' is not also set in 'tmp'. Clear the found 'source' * bit in 'tmp', and return that <source, dest> pair for migration. * The pair of nodemasks 'to' and 'from' define the map. * * If no pair of bits is found that way, fallback to picking some * pair of 'source' and 'dest' bits that are not the same. If the * 'source' and 'dest' bits are the same, this represents a node * that will be migrating to itself, so no pages need move. * * If no bits are left in 'tmp', or if all remaining bits left * in 'tmp' correspond to the same bit in 'to', return false * (nothing left to migrate). * * This lets us pick a pair of nodes to migrate between, such that * if possible the dest node is not already occupied by some other * source node, minimizing the risk of overloading the memory on a * node that would happen if we migrated incoming memory to a node * before migrating outgoing memory source that same node. * * A single scan of tmp is sufficient. As we go, we remember the * most recent <s, d> pair that moved (s != d). If we find a pair * that not only moved, but what's better, moved to an empty slot * (d is not set in tmp), then we break out then, with that pair. * Otherwise when we finish scanning from_tmp, we at least have the * most recent <s, d> pair that moved. If we get all the way through * the scan of tmp without finding any node that moved, much less * moved to an empty node, then there is nothing left worth migrating. */ tmp = *from_nodes; while (!nodes_empty(tmp)) { int s,d; int source = -1; int dest = 0; for_each_node_mask(s, tmp) { d = node_remap(s, *from_nodes, *to_nodes); if (s == d) continue; source = s; /* Node moved. Memorize */ dest = d; /* dest not in remaining from nodes? */ if (!node_isset(dest, tmp)) break; } if (source == -1) break; node_clear(source, tmp); err = migrate_to_node(mm, source, dest, flags); if (err > 0) busy += err; if (err < 0) break; } out: up_read(&mm->mmap_sem); if (err < 0) return err; return busy; }
0
[ "CWE-264" ]
linux-2.6
1a5a9906d4e8d1976b701f889d8f35d54b928f25
109,543,330,188,792,730,000,000,000,000,000,000,000
83
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [akpm@linux-foundation.org: checkpatch fixes] Reported-by: Ulrich Obergfell <uobergfe@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Jones <davej@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: <stable@vger.kernel.org> [2.6.38+] Cc: Mark Salter <msalter@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static ssize_t lbs_highsnr_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { return lbs_threshold_read(TLV_TYPE_SNR_HIGH, CMD_SUBSCRIBE_SNR_HIGH, file, userbuf, count, ppos); }
0
[ "CWE-703", "CWE-189" ]
linux
a497e47d4aec37aaf8f13509f3ef3d1f6a717d88
164,366,150,994,953,160,000,000,000,000,000,000,000
6
libertas: potential oops in debugfs If we do a zero size allocation then it will oops. Also we can't be sure the user passes us a NUL terminated string so I've added a terminator. This code can only be triggered by root. Reported-by: Nico Golde <nico@ngolde.de> Reported-by: Fabian Yamaguchi <fabs@goesec.de> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Acked-by: Dan Williams <dcbw@redhat.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
virSecuritySELinuxSetSysinfoLabel(virSecurityManager *mgr, virSysinfoDef *def, virSecuritySELinuxData *data) { size_t i; for (i = 0; i < def->nfw_cfgs; i++) { virSysinfoFWCfgDef *f = &def->fw_cfgs[i]; if (f->file && virSecuritySELinuxSetFilecon(mgr, f->file, data->content_context, true) < 0) return -1; } return 0; }
0
[ "CWE-732" ]
libvirt
15073504dbb624d3f6c911e85557019d3620fdb2
71,359,510,963,710,550,000,000,000,000,000,000,000
17
security: fix SELinux label generation logic A process can access a file if the set of MCS categories for the file is equal-to *or* a subset-of, the set of MCS categories for the process. If there are two VMs: a) svirt_t:s0:c117 b) svirt_t:s0:c117,c720 Then VM (b) is able to access files labelled for VM (a). IOW, we must discard case where the categories are equal because that is a subset of many other valid category pairs. Fixes: https://gitlab.com/libvirt/libvirt/-/issues/153 CVE-2021-3631 Reviewed-by: Peter Krempa <pkrempa@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
static void smtp_to_smtps(struct connectdata *conn) { /* Change the connection handler */ conn->handler = &Curl_handler_smtps; /* Set the connection's upgraded to TLS flag */ conn->tls_upgraded = TRUE; }
0
[ "CWE-200", "CWE-119", "CWE-787" ]
curl
ba1dbd78e5f1ed67c1b8d37ac89d90e5e330b628
189,178,256,426,818,600,000,000,000,000,000,000,000
8
smtp: use the upload buffer size for scratch buffer malloc ... not the read buffer size, as that can be set smaller and thus cause a buffer overflow! CVE-2018-0500 Reported-by: Peter Wu Bug: https://curl.haxx.se/docs/adv_2018-70a2.html
static void set_indicator_in_key_fields(KEY *key_info) { KEY_PART_INFO *key_part; uint key_parts= key_info->user_defined_key_parts, i; for (i= 0, key_part=key_info->key_part; i < key_parts; i++, key_part++) key_part->field->flags|= GET_FIXED_FIELDS_FLAG; }
0
[ "CWE-416" ]
server
c02ebf3510850ba78a106be9974c94c3b97d8585
332,297,276,704,973,150,000,000,000,000,000,000,000
7
MDEV-24176 Preparations 1. moved fix_vcol_exprs() call to open_table() mysql_alter_table() doesn't do lock_tables() so it cannot win from fix_vcol_exprs() from there. Tests affected: main.default_session 2. Vanilla cleanups and comments.
int vb2_ioctl_create_bufs(struct file *file, void *priv, struct v4l2_create_buffers *p) { struct video_device *vdev = video_devdata(file); int res = vb2_verify_memory_type(vdev->queue, p->memory, p->format.type); p->index = vdev->queue->num_buffers; /* * If count == 0, then just check if memory and type are valid. * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0. */ if (p->count == 0) return res != -EBUSY ? res : 0; if (res) return res; if (vb2_queue_is_busy(vdev, file)) return -EBUSY; res = vb2_create_bufs(vdev->queue, p); if (res == 0) vdev->queue->owner = file->private_data; return res; }
0
[ "CWE-119", "CWE-787" ]
linux
2c1f6951a8a82e6de0d82b1158b5e493fc6c54ab
38,736,212,793,785,110,000,000,000,000,000,000,000
24
[media] videobuf2-v4l2: Verify planes array in buffer dequeueing When a buffer is being dequeued using VIDIOC_DQBUF IOCTL, the exact buffer which will be dequeued is not known until the buffer has been removed from the queue. The number of planes is specific to a buffer, not to the queue. This does lead to the situation where multi-plane buffers may be requested and queued with n planes, but VIDIOC_DQBUF IOCTL may be passed an argument struct with fewer planes. __fill_v4l2_buffer() however uses the number of planes from the dequeued videobuf2 buffer, overwriting kernel memory (the m.planes array allocated in video_usercopy() in v4l2-ioctl.c) if the user provided fewer planes than the dequeued buffer had. Oops! Fixes: b0e0e1f83de3 ("[media] media: videobuf2: Prepare to divide videobuf2") Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com> Acked-by: Hans Verkuil <hans.verkuil@cisco.com> Cc: stable@vger.kernel.org # for v4.4 and later Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
static void __ept_release(struct kref *kref) { struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint, refcount); /* * At this point no one holds a reference to ept anymore, * so we can directly free it */ kfree(ept); }
0
[ "CWE-415" ]
linux
1680939e9ecf7764fba8689cfb3429c2fe2bb23c
82,018,438,313,957,210,000,000,000,000,000,000,000
10
rpmsg: virtio: Fix possible double free in rpmsg_virtio_add_ctrl_dev() vch will be free in virtio_rpmsg_release_device() when rpmsg_ctrldev_register_device() fails. There is no need to call kfree() again. Fixes: c486682ae1e2 ("rpmsg: virtio: Register the rpmsg_char device") Signed-off-by: Hangyu Hua <hbh25y@gmail.com> Tested-by: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com> Link: https://lore.kernel.org/r/20220426060536.15594-3-hbh25y@gmail.com Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
static void tcp_update_pacing_rate(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); u64 rate; /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); /* current rate is (cwnd * mss) / srtt * In Slow Start [1], set sk_pacing_rate to 200 % the current rate. * In Congestion Avoidance phase, set it to 120 % the current rate. * * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh) * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching * end of slow start and should slow down. */ if (tp->snd_cwnd < tp->snd_ssthresh / 2) rate *= sysctl_tcp_pacing_ss_ratio; else rate *= sysctl_tcp_pacing_ca_ratio; rate *= max(tp->snd_cwnd, tp->packets_out); if (likely(tp->srtt_us)) do_div(rate, tp->srtt_us); /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate * without any lock. We want to make sure compiler wont store * intermediate values in this location. */ ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, sk->sk_max_pacing_rate); }
0
[ "CWE-703", "CWE-189" ]
linux
8b8a321ff72c785ed5e8b4cf6eda20b35d427390
149,832,201,615,945,360,000,000,000,000,000,000,000
33
tcp: fix zero cwnd in tcp_cwnd_reduction Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally") introduced a bug that cwnd may become 0 when both inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead to a div-by-zero if the connection starts another cwnd reduction phase by setting tp->prior_cwnd to the current cwnd (0) in tcp_init_cwnd_reduction(). To prevent this we skip PRR operation when nothing is acked or sacked. Then cwnd must be positive in all cases as long as ssthresh is positive: 1) The proportional reduction mode inflight > ssthresh > 0 2) The reduction bound mode a) inflight == ssthresh > 0 b) inflight < ssthresh sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh Therefore in all cases inflight and sndcnt can not both be 0. We check invalid tp->prior_cwnd to avoid potential div0 bugs. In reality this bug is triggered only with a sequence of less common events. For example, the connection is terminating an ECN-triggered cwnd reduction with an inflight 0, then it receives reordered/old ACKs or DSACKs from prior transmission (which acks nothing). Or the connection is in fast recovery stage that marks everything lost, but fails to retransmit due to local issues, then receives data packets from other end which acks nothing. Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally") Reported-by: Oleksandr Natalenko <oleksandr@natalenko.name> Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
doZipFinish(ptcpsess_t *pSess) { int zRet; /* zlib return state */ DEFiRet; unsigned outavail; struct syslogTime stTime; uchar zipBuf[32*1024]; // TODO: use "global" one from pSess if(!pSess->bzInitDone) goto done; pSess->zstrm.avail_in = 0; /* run inflate() on buffer until everything has been compressed */ do { DBGPRINTF("doZipFinish: in inflate() loop, avail_in %d, total_in %ld\n", pSess->zstrm.avail_in, pSess->zstrm.total_in); pSess->zstrm.avail_out = sizeof(zipBuf); pSess->zstrm.next_out = zipBuf; zRet = inflate(&pSess->zstrm, Z_FINISH); /* no bad return value */ DBGPRINTF("after inflate, ret %d, avail_out %d\n", zRet, pSess->zstrm.avail_out); outavail = sizeof(zipBuf) - pSess->zstrm.avail_out; if(outavail != 0) { pSess->pLstn->rcvdDecompressed += outavail; CHKiRet(DataRcvdUncompressed(pSess, (char*)zipBuf, outavail, &stTime, 0)); // TODO: query time! } } while (pSess->zstrm.avail_out == 0); finalize_it: zRet = inflateEnd(&pSess->zstrm); if(zRet != Z_OK) { DBGPRINTF("imptcp: error %d returned from zlib/inflateEnd()\n", zRet); } pSess->bzInitDone = 0; done: RETiRet; }
0
[ "CWE-787" ]
rsyslog
89955b0bcb1ff105e1374aad7e0e993faa6a038f
14,727,604,906,510,768,000,000,000,000,000,000,000
37
net bugfix: potential buffer overrun
static int x509parse_key_pkcs8_unencrypted_der( rsa_context *rsa, const unsigned char *key, size_t keylen ) { int ret; size_t len; unsigned char *p, *end; x509_buf pk_alg_oid; p = (unsigned char *) key; end = p + keylen; /* * This function parses the PrivatKeyInfo object (PKCS#8) * * PrivateKeyInfo ::= SEQUENCE { * version Version, * algorithm AlgorithmIdentifier, * PrivateKey BIT STRING * } * * AlgorithmIdentifier ::= SEQUENCE { * algorithm OBJECT IDENTIFIER, * parameters ANY DEFINED BY algorithm OPTIONAL * } * * The PrivateKey BIT STRING is a PKCS#1 RSAPrivateKey */ if( ( ret = asn1_get_tag( &p, end, &len, ASN1_CONSTRUCTED | ASN1_SEQUENCE ) ) != 0 ) { return( POLARSSL_ERR_X509_KEY_INVALID_FORMAT + ret ); } end = p + len; if( ( ret = asn1_get_int( &p, end, &rsa->ver ) ) != 0 ) { return( POLARSSL_ERR_X509_KEY_INVALID_FORMAT + ret ); } if( rsa->ver != 0 ) { return( POLARSSL_ERR_X509_KEY_INVALID_VERSION + ret ); } if( ( ret = x509_get_alg( &p, end, &pk_alg_oid ) ) != 0 ) { return( POLARSSL_ERR_X509_KEY_INVALID_FORMAT + ret ); } /* * only RSA keys handled at this time */ if( pk_alg_oid.len != 9 || memcmp( pk_alg_oid.p, OID_PKCS1_RSA, 9 ) != 0 ) { return( POLARSSL_ERR_X509_UNKNOWN_PK_ALG ); } /* * Get the OCTET STRING and parse the PKCS#1 format inside */ if( ( ret = asn1_get_tag( &p, end, &len, ASN1_OCTET_STRING ) ) != 0 ) return( POLARSSL_ERR_X509_KEY_INVALID_FORMAT + ret ); if( ( end - p ) < 1 ) { return( POLARSSL_ERR_X509_KEY_INVALID_FORMAT + POLARSSL_ERR_ASN1_OUT_OF_DATA ); } end = p + len; if( ( ret = x509parse_key_pkcs1_der( rsa, p, end - p ) ) != 0 ) return( ret ); return( 0 ); }
0
[ "CWE-310" ]
polarssl
43f9799ce61c6392a014d0a2ea136b4b3a9ee194
261,867,338,478,095,680,000,000,000,000,000,000,000
80
RSA blinding on CRT operations to counter timing attacks
static char *sun4u_fw_dev_path(FWPathProvider *p, BusState *bus, DeviceState *dev) { PCIDevice *pci; IDEBus *ide_bus; IDEState *ide_s; int bus_id; if (!strcmp(object_get_typename(OBJECT(dev)), "pbm-bridge")) { pci = PCI_DEVICE(dev); if (PCI_FUNC(pci->devfn)) { return g_strdup_printf("pci@%x,%x", PCI_SLOT(pci->devfn), PCI_FUNC(pci->devfn)); } else { return g_strdup_printf("pci@%x", PCI_SLOT(pci->devfn)); } } if (!strcmp(object_get_typename(OBJECT(dev)), "ide-drive")) { ide_bus = IDE_BUS(qdev_get_parent_bus(dev)); ide_s = idebus_active_if(ide_bus); bus_id = ide_bus->bus_id; if (ide_s->drive_kind == IDE_CD) { return g_strdup_printf("ide@%x/cdrom", bus_id); } return g_strdup_printf("ide@%x/disk", bus_id); } if (!strcmp(object_get_typename(OBJECT(dev)), "ide-hd")) { return g_strdup("disk"); } if (!strcmp(object_get_typename(OBJECT(dev)), "ide-cd")) { return g_strdup("cdrom"); } if (!strcmp(object_get_typename(OBJECT(dev)), "virtio-blk-device")) { return g_strdup("disk"); } return NULL; }
0
[ "CWE-476" ]
qemu
ad280559c68360c9f1cd7be063857853759e6a73
304,438,319,139,161,170,000,000,000,000,000,000,000
45
sun4u: add power_mem_read routine Define skeleton 'power_mem_read' routine. Avoid NULL dereference. Reported-by: Fakhri Zulkifli <mohdfakhrizulkifli@gmail.com> Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Item_func_sp::Item_func_sp(THD *thd, Name_resolution_context *context_arg, sp_name *name_arg, List<Item> &list): Item_func(thd, list), context(context_arg), m_name(name_arg), m_sp(NULL), sp_result_field(NULL) { maybe_null= 1; m_name->init_qname(thd); dummy_table= (TABLE*) thd->calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE)); dummy_table->s= (TABLE_SHARE*) (dummy_table+1); }
0
[ "CWE-120" ]
server
eca207c46293bc72dd8d0d5622153fab4d3fccf1
99,536,955,070,535,200,000,000,000,000,000,000,000
10
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size. Precision should be kept below DECIMAL_MAX_SCALE for computations. It can be bigger in Item_decimal. I'd fix this too but it changes the existing behaviour so problemmatic to ix.
TEST_P(RedirectIntegrationTest, UpstreamRedirectPreservesURIFragmentInLocation) { // Use base class initialize. HttpProtocolIntegrationTest::initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); Http::TestResponseHeaderMapImpl redirect_response{ {":status", "302"}, {"content-length", "0"}, {"location", "http://authority2/new/url?p1=v1&p2=v2#fragment"}}; auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response, 0); EXPECT_TRUE(response->complete()); EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ("http://authority2/new/url?p1=v1&p2=v2#fragment", response->headers().getLocationValue()); }
0
[ "CWE-703" ]
envoy
18871dbfb168d3512a10c78dd267ff7c03f564c6
328,269,257,837,297,300,000,000,000,000,000,000,000
15
[1.18] CVE-2022-21655 Crash with direct_response Signed-off-by: Otto van der Schaaf <ovanders@redhat.com>
static sqlite3_index_info *allocateIndexInfo( WhereInfo *pWInfo, /* The WHERE clause */ WhereClause *pWC, /* The WHERE clause being analyzed */ Bitmask mUnusable, /* Ignore terms with these prereqs */ SrcItem *pSrc, /* The FROM clause term that is the vtab */ u16 *pmNoOmit /* Mask of terms not to omit */ ){ int i, j; int nTerm; Parse *pParse = pWInfo->pParse; struct sqlite3_index_constraint *pIdxCons; struct sqlite3_index_orderby *pIdxOrderBy; struct sqlite3_index_constraint_usage *pUsage; struct HiddenIndexInfo *pHidden; WhereTerm *pTerm; int nOrderBy; sqlite3_index_info *pIdxInfo; u16 mNoOmit = 0; const Table *pTab; int eDistinct = 0; ExprList *pOrderBy = pWInfo->pOrderBy; assert( pSrc!=0 ); pTab = pSrc->pTab; assert( pTab!=0 ); assert( IsVirtual(pTab) ); /* Find all WHERE clause constraints referring to this virtual table. ** Mark each term with the TERM_OK flag. Set nTerm to the number of ** terms found. */ for(i=nTerm=0, pTerm=pWC->a; i<pWC->nTerm; i++, pTerm++){ pTerm->wtFlags &= ~TERM_OK; if( pTerm->leftCursor != pSrc->iCursor ) continue; if( pTerm->prereqRight & mUnusable ) continue; assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); testcase( pTerm->eOperator & WO_IN ); testcase( pTerm->eOperator & WO_ISNULL ); testcase( pTerm->eOperator & WO_IS ); testcase( pTerm->eOperator & WO_ALL ); if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue; if( pTerm->wtFlags & TERM_VNULL ) continue; assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); assert( pTerm->u.x.leftColumn>=XN_ROWID ); assert( pTerm->u.x.leftColumn<pTab->nCol ); /* tag-20191211-002: WHERE-clause constraints are not useful to the ** right-hand table of a LEFT JOIN nor to the either table of a ** RIGHT JOIN. See tag-20191211-001 for the ** equivalent restriction for ordinary tables. */ if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 ){ testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LEFT ); testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_RIGHT ); testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LTORJ ); testcase( ExprHasProperty(pTerm->pExpr, EP_OuterON) ); testcase( ExprHasProperty(pTerm->pExpr, EP_InnerON) ); if( !ExprHasProperty(pTerm->pExpr, EP_OuterON|EP_InnerON) || pTerm->pExpr->w.iJoin != pSrc->iCursor ){ continue; } } nTerm++; pTerm->wtFlags |= TERM_OK; } /* If the ORDER BY clause contains only columns in the current ** virtual table then allocate space for the aOrderBy part of ** the sqlite3_index_info structure. */ nOrderBy = 0; if( pOrderBy ){ int n = pOrderBy->nExpr; for(i=0; i<n; i++){ Expr *pExpr = pOrderBy->a[i].pExpr; Expr *pE2; /* Skip over constant terms in the ORDER BY clause */ if( sqlite3ExprIsConstant(pExpr) ){ continue; } /* Virtual tables are unable to deal with NULLS FIRST */ if( pOrderBy->a[i].fg.sortFlags & KEYINFO_ORDER_BIGNULL ) break; /* First case - a direct column references without a COLLATE operator */ if( pExpr->op==TK_COLUMN && pExpr->iTable==pSrc->iCursor ){ assert( pExpr->iColumn>=XN_ROWID && pExpr->iColumn<pTab->nCol ); continue; } /* 2nd case - a column reference with a COLLATE operator. Only match ** of the COLLATE operator matches the collation of the column. */ if( pExpr->op==TK_COLLATE && (pE2 = pExpr->pLeft)->op==TK_COLUMN && pE2->iTable==pSrc->iCursor ){ const char *zColl; /* The collating sequence name */ assert( !ExprHasProperty(pExpr, EP_IntValue) ); assert( pExpr->u.zToken!=0 ); assert( pE2->iColumn>=XN_ROWID && pE2->iColumn<pTab->nCol ); pExpr->iColumn = pE2->iColumn; if( pE2->iColumn<0 ) continue; /* Collseq does not matter for rowid */ zColl = sqlite3ColumnColl(&pTab->aCol[pE2->iColumn]); if( zColl==0 ) zColl = sqlite3StrBINARY; if( sqlite3_stricmp(pExpr->u.zToken, zColl)==0 ) continue; } /* No matches cause a break out of the loop */ break; } if( i==n ){ nOrderBy = n; if( (pWInfo->wctrlFlags & WHERE_DISTINCTBY) ){ eDistinct = 2 + ((pWInfo->wctrlFlags & WHERE_SORTBYGROUP)!=0); }else if( pWInfo->wctrlFlags & WHERE_GROUPBY ){ eDistinct = 1; } } } /* Allocate the sqlite3_index_info structure */ pIdxInfo = sqlite3DbMallocZero(pParse->db, sizeof(*pIdxInfo) + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm + sizeof(*pIdxOrderBy)*nOrderBy + sizeof(*pHidden) + sizeof(sqlite3_value*)*nTerm ); if( pIdxInfo==0 ){ sqlite3ErrorMsg(pParse, "out of memory"); return 0; } pHidden = (struct HiddenIndexInfo*)&pIdxInfo[1]; pIdxCons = (struct sqlite3_index_constraint*)&pHidden->aRhs[nTerm]; pIdxOrderBy = (struct sqlite3_index_orderby*)&pIdxCons[nTerm]; pUsage = (struct sqlite3_index_constraint_usage*)&pIdxOrderBy[nOrderBy]; pIdxInfo->aConstraint = pIdxCons; pIdxInfo->aOrderBy = pIdxOrderBy; pIdxInfo->aConstraintUsage = pUsage; pHidden->pWC = pWC; pHidden->pParse = pParse; pHidden->eDistinct = eDistinct; pHidden->mIn = 0; for(i=j=0, pTerm=pWC->a; i<pWC->nTerm; i++, pTerm++){ u16 op; if( (pTerm->wtFlags & TERM_OK)==0 ) continue; pIdxCons[j].iColumn = pTerm->u.x.leftColumn; pIdxCons[j].iTermOffset = i; op = pTerm->eOperator & WO_ALL; if( op==WO_IN ){ if( (pTerm->wtFlags & TERM_SLICE)==0 ){ pHidden->mIn |= SMASKBIT32(j); } op = WO_EQ; } if( op==WO_AUX ){ pIdxCons[j].op = pTerm->eMatchOp; }else if( op & (WO_ISNULL|WO_IS) ){ if( op==WO_ISNULL ){ pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_ISNULL; }else{ pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_IS; } }else{ pIdxCons[j].op = (u8)op; /* The direct assignment in the previous line is possible only because ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The ** following asserts verify this fact. */ assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); assert( pTerm->eOperator&(WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_AUX) ); if( op & (WO_LT|WO_LE|WO_GT|WO_GE) && sqlite3ExprIsVector(pTerm->pExpr->pRight) ){ testcase( j!=i ); if( j<16 ) mNoOmit |= (1 << j); if( op==WO_LT ) pIdxCons[j].op = WO_LE; if( op==WO_GT ) pIdxCons[j].op = WO_GE; } } j++; } assert( j==nTerm ); pIdxInfo->nConstraint = j; for(i=j=0; i<nOrderBy; i++){ Expr *pExpr = pOrderBy->a[i].pExpr; if( sqlite3ExprIsConstant(pExpr) ) continue; assert( pExpr->op==TK_COLUMN || (pExpr->op==TK_COLLATE && pExpr->pLeft->op==TK_COLUMN && pExpr->iColumn==pExpr->pLeft->iColumn) ); pIdxOrderBy[j].iColumn = pExpr->iColumn; pIdxOrderBy[j].desc = pOrderBy->a[i].fg.sortFlags & KEYINFO_ORDER_DESC; j++; } pIdxInfo->nOrderBy = j; *pmNoOmit = mNoOmit; return pIdxInfo; }
0
[ "CWE-129" ]
sqlite
effc07ec9c6e08d3bd17665f8800054770f8c643
118,036,693,794,038,910,000,000,000,000,000,000,000
204
Fix the whereKeyStats() routine (part of STAT4 processing only) so that it is able to cope with row-value comparisons against the primary key index of a WITHOUT ROWID table. [forum:/forumpost/3607259d3c|Forum post 3607259d3c]. FossilOrigin-Name: 2a6f761864a462de5c2d5bc666b82fb0b7e124a03443cd1482620dde344b34bb
static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_auth_request *req) { return ieee80211_mgd_auth(IEEE80211_DEV_TO_SUB_IF(dev), req); }
0
[ "CWE-287" ]
linux
3e493173b7841259a08c5c8e5cbe90adb349da7e
63,233,719,577,003,790,000,000,000,000,000,000,000
5
mac80211: Do not send Layer 2 Update frame before authorization The Layer 2 Update frame is used to update bridges when a station roams to another AP even if that STA does not transmit any frames after the reassociation. This behavior was described in IEEE Std 802.11F-2003 as something that would happen based on MLME-ASSOCIATE.indication, i.e., before completing 4-way handshake. However, this IEEE trial-use recommended practice document was published before RSN (IEEE Std 802.11i-2004) and as such, did not consider RSN use cases. Furthermore, IEEE Std 802.11F-2003 was withdrawn in 2006 and as such, has not been maintained amd should not be used anymore. Sending out the Layer 2 Update frame immediately after association is fine for open networks (and also when using SAE, FT protocol, or FILS authentication when the station is actually authenticated by the time association completes). However, it is not appropriate for cases where RSN is used with PSK or EAP authentication since the station is actually fully authenticated only once the 4-way handshake completes after authentication and attackers might be able to use the unauthenticated triggering of Layer 2 Update frame transmission to disrupt bridge behavior. Fix this by postponing transmission of the Layer 2 Update frame from station entry addition to the point when the station entry is marked authorized. Similarly, send out the VLAN binding update only if the STA entry has already been authorized. Signed-off-by: Jouni Malinen <jouni@codeaurora.org> Reviewed-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: David S. Miller <davem@davemloft.net>
xsltNewStylesheet(void) { xsltStylesheetPtr ret = NULL; ret = (xsltStylesheetPtr) xmlMalloc(sizeof(xsltStylesheet)); if (ret == NULL) { xsltTransformError(NULL, NULL, NULL, "xsltNewStylesheet : malloc failed\n"); goto internal_err; } memset(ret, 0, sizeof(xsltStylesheet)); ret->omitXmlDeclaration = -1; ret->standalone = -1; ret->decimalFormat = xsltNewDecimalFormat(NULL, NULL); ret->indent = -1; ret->errors = 0; ret->warnings = 0; ret->exclPrefixNr = 0; ret->exclPrefixMax = 0; ret->exclPrefixTab = NULL; ret->extInfos = NULL; ret->extrasNr = 0; ret->internalized = 1; ret->literal_result = 0; ret->forwards_compatible = 0; ret->dict = xmlDictCreate(); #ifdef WITH_XSLT_DEBUG xsltGenericDebug(xsltGenericDebugContext, "creating dictionary for stylesheet\n"); #endif xsltInit(); return(ret); internal_err: if (ret != NULL) xsltFreeStylesheet(ret); return(NULL); }
0
[]
libxslt
e03553605b45c88f0b4b2980adfbbb8f6fca2fd6
322,111,322,932,583,300,000,000,000,000,000,000,000
40
Fix security framework bypass xsltCheckRead and xsltCheckWrite return -1 in case of error but callers don't check for this condition and allow access. With a specially crafted URL, xsltCheckRead could be tricked into returning an error because of a supposedly invalid URL that would still be loaded succesfully later on. Fixes #12. Thanks to Felix Wilhelm for the report.
enum ndr_err_code ndr_push_dns_string_list(struct ndr_push *ndr, struct ndr_token_list *string_list, int ndr_flags, const char *s, bool is_nbt) { const char *start = s; bool use_compression; size_t max_length; if (is_nbt) { use_compression = true; /* * Max length is longer in NBT/Wins, because Windows counts * the semi-decompressed size of the netbios name (16 bytes) * rather than the wire size of 32, which is what you'd expect * if it followed RFC1002 (it uses the short form in * [MS-WINSRA]). In other words the maximum size of the * "scope" is 237, not 221. * * We make the size limit slightly larger than 255 + 16, * because the 237 scope limit is already enforced in the * winsserver code with a specific return value; bailing out * here would muck with that. */ max_length = 274; } else { use_compression = !(ndr->flags & LIBNDR_FLAG_NO_COMPRESSION); max_length = 255; } if (!(ndr_flags & NDR_SCALARS)) { return NDR_ERR_SUCCESS; } while (s && *s) { enum ndr_err_code ndr_err; char *compname; size_t complen; uint32_t offset; if (use_compression) { /* see if we have pushed the remaining string already, * if so we use a label pointer to this string */ ndr_err = ndr_token_retrieve_cmp_fn(string_list, s, &offset, (comparison_fn_t)strcmp, false); if (NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { uint8_t b[2]; if (offset > 0x3FFF) { return ndr_push_error(ndr, NDR_ERR_STRING, "offset for dns string " \ "label pointer " \ "%u[%08X] > 0x00003FFF", offset, offset); } b[0] = 0xC0 | (offset>>8); b[1] = (offset & 0xFF); return ndr_push_bytes(ndr, b, 2); } } complen = strcspn(s, "."); /* the length must fit into 6 bits (i.e. <= 63) */ if (complen > 0x3F) { return ndr_push_error(ndr, NDR_ERR_STRING, "component length %u[%08X] > " \ "0x0000003F", (unsigned)complen, (unsigned)complen); } if (complen == 0 && s[complen] == '.') { return ndr_push_error(ndr, NDR_ERR_STRING, "component length is 0 " "(consecutive dots)"); } if (is_nbt && s[complen] == '.' && s[complen + 1] == '\0') { /* nbt names are sometimes usernames, and we need to * keep a trailing dot to ensure it is byte-identical, * (not just semantically identical given DNS * semantics). */ complen++; } compname = talloc_asprintf(ndr, "%c%*.*s", (unsigned char)complen, (unsigned char)complen, (unsigned char)complen, s); NDR_ERR_HAVE_NO_MEMORY(compname); /* remember the current component + the rest of the string * so it can be reused later */ if (use_compression) { NDR_CHECK(ndr_token_store(ndr, string_list, s, ndr->offset)); } /* push just this component into the blob */ NDR_CHECK(ndr_push_bytes(ndr, (const uint8_t *)compname, complen+1)); talloc_free(compname); s += complen; if (*s == '.') { s++; } if (s - start > max_length) { return ndr_push_error(ndr, NDR_ERR_STRING, "name > %zu character long", max_length); } } /* if we reach the end of the string and have pushed the last component * without using a label pointer, we need to terminate the string */ return ndr_push_bytes(ndr, (const uint8_t *)"", 1); }
0
[ "CWE-400" ]
samba
cc3a67760cf9faaad3af73b1eed9e2ef85276633
24,399,994,098,302,414,000,000,000,000,000,000,000
126
CVE-2020-10745: ndr/dns-utils: prepare for NBT compatibility NBT has a funny thing where it sometimes needs to send a trailing dot as part of the last component, because the string representation is a user name. In DNS, "example.com", and "example.com." are the same, both having three components ("example", "com", ""); in NBT, we want to treat them differently, with the second form having the three components ("example", "com.", ""). This retains the logic of e6e2ec0001fe3c010445e26cc0efddbc1f73416b. Also DNS compression cannot be turned off for NBT. BUG: https://bugzilla.samba.org/show_bug.cgi?id=14378 Signed-off-by: Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps) { struct futex_q *match = futex_top_waiter(hb, key); /* * If there is a waiter on that futex, validate it and * attach to the pi_state when the validation succeeds. */ if (match) return attach_to_pi_state(uval, match->pi_state, ps); /* * We are the first waiter - try to look up the owner based on * @uval and attach to it. */ return attach_to_pi_owner(uval, key, ps); }
0
[ "CWE-416" ]
linux
65d8fc777f6dcfee12785c057a6b57f679641c90
141,163,270,191,176,640,000,000,000,000,000,000,000
18
futex: Remove requirement for lock_page() in get_futex_key() When dealing with key handling for shared futexes, we can drastically reduce the usage/need of the page lock. 1) For anonymous pages, the associated futex object is the mm_struct which does not require the page lock. 2) For inode based, keys, we can check under RCU read lock if the page mapping is still valid and take reference to the inode. This just leaves one rare race that requires the page lock in the slow path when examining the swapcache. Additionally realtime users currently have a problem with the page lock being contended for unbounded periods of time during futex operations. Task A get_futex_key() lock_page() ---> preempted Now any other task trying to lock that page will have to wait until task A gets scheduled back in, which is an unbound time. With this patch, we pretty much have a lockless futex_get_key(). Experiments show that this patch can boost/speedup the hashing of shared futexes with the perf futex benchmarks (which is good for measuring such change) by up to 45% when there are high (> 100) thread counts on a 60 core Westmere. Lower counts are pretty much in the noise range or less than 10%, but mid range can be seen at over 30% overall throughput (hash ops/sec). This makes anon-mem shared futexes much closer to its private counterpart. Signed-off-by: Mel Gorman <mgorman@suse.de> [ Ported on top of thp refcount rework, changelog, comments, fixes. ] Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Chris Mason <clm@fb.com> Cc: Darren Hart <dvhart@linux.intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: dave@stgolabs.net Link: http://lkml.kernel.org/r/1455045314-8305-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
static int bpf_enable_stats(union bpf_attr *attr) { if (CHECK_ATTR(BPF_ENABLE_STATS)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; switch (attr->enable_stats.type) { case BPF_STATS_RUN_TIME: return bpf_enable_runtime_stats(); default: break; } return -EINVAL; }
0
[ "CWE-307" ]
linux
350a5c4dd2452ea999cc5e1d4a8dbf12de2f97ef
259,898,901,841,837,300,000,000,000,000,000,000,000
17
bpf: Dont allow vmlinux BTF to be used in map_create and prog_load. The syzbot got FD of vmlinux BTF and passed it into map_create which caused crash in btf_type_id_size() when it tried to access resolved_ids. The vmlinux BTF doesn't have 'resolved_ids' and 'resolved_sizes' initialized to save memory. To avoid such issues disallow using vmlinux BTF in prog_load and map_create commands. Fixes: 5329722057d4 ("bpf: Assign ID to vmlinux BTF and return extra info for BTF in GET_OBJ_INFO") Reported-by: syzbot+8bab8ed346746e7540e8@syzkaller.appspotmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20210307225248.79031-1-alexei.starovoitov@gmail.com
struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) { struct bio *b; b = bio_alloc_bioset(gfp_mask, 0, bs); if (!b) return NULL; __bio_clone_fast(b, bio); if (bio_integrity(bio)) { int ret; ret = bio_integrity_clone(b, bio, gfp_mask); if (ret < 0) { bio_put(b); return NULL; } } return b; }
0
[ "CWE-772", "CWE-787" ]
linux
95d78c28b5a85bacbc29b8dba7c04babb9b0d467
188,235,148,987,863,300,000,000,000,000,000,000,000
23
fix unbalanced page refcounting in bio_map_user_iov bio_map_user_iov and bio_unmap_user do unbalanced pages refcounting if IO vector has small consecutive buffers belonging to the same page. bio_add_pc_page merges them into one, but the page reference is never dropped. Cc: stable@vger.kernel.org Signed-off-by: Vitaly Mayatskikh <v.mayatskih@gmail.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
static ssize_t rbd_pool_ns_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: ""); }
0
[ "CWE-863" ]
linux
f44d04e696feaf13d192d942c4f14ad2e117065a
163,874,904,864,670,180,000,000,000,000,000,000,000
7
rbd: require global CAP_SYS_ADMIN for mapping and unmapping It turns out that currently we rely only on sysfs attribute permissions: $ ll /sys/bus/rbd/{add*,remove*} --w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add --w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add_single_major --w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/remove --w------- 1 root root 4096 Sep 3 20:38 /sys/bus/rbd/remove_single_major This means that images can be mapped and unmapped (i.e. block devices can be created and deleted) by a UID 0 process even after it drops all privileges or by any process with CAP_DAC_OVERRIDE in its user namespace as long as UID 0 is mapped into that user namespace. Be consistent with other virtual block devices (loop, nbd, dm, md, etc) and require CAP_SYS_ADMIN in the initial user namespace for mapping and unmapping, and also for dumping the configuration string and refreshing the image header. Cc: stable@vger.kernel.org Signed-off-by: Ilya Dryomov <idryomov@gmail.com> Reviewed-by: Jeff Layton <jlayton@kernel.org>
ext4_mb_new_group_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg; struct ext4_prealloc_space *pa; struct ext4_group_info *grp; /* preallocate only when found space is larger then requested */ BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); BUG_ON(ac->ac_pa == NULL); pa = ac->ac_pa; /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_lstart = pa->pa_pstart; pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_inode_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_GROUP_PA; mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_group_pa(ac, pa); ext4_mb_use_group_pa(ac, pa); atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); lg = ac->ac_lg; BUG_ON(lg == NULL); pa->pa_obj_lock = &lg->lg_prealloc_lock; pa->pa_inode = NULL; list_add(&pa->pa_group_list, &grp->bb_prealloc_list); /* * We will later add the new pa to the right bucket * after updating the pa_free in ext4_mb_release_context */ }
0
[ "CWE-703" ]
linux
ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1
246,234,617,249,052,160,000,000,000,000,000,000,000
50
ext4: check journal inode extents more carefully Currently, system zones just track ranges of block, that are "important" fs metadata (bitmaps, group descriptors, journal blocks, etc.). This however complicates how extent tree (or indirect blocks) can be checked for inodes that actually track such metadata - currently the journal inode but arguably we should be treating quota files or resize inode similarly. We cannot run __ext4_ext_check() on such metadata inodes when loading their extents as that would immediately trigger the validity checks and so we just hack around that and special-case the journal inode. This however leads to a situation that a journal inode which has extent tree of depth at least one can have invalid extent tree that gets unnoticed until ext4_cache_extents() crashes. To overcome this limitation, track inode number each system zone belongs to (0 is used for zones not belonging to any inode). We can then verify inode number matches the expected one when verifying extent tree and thus avoid the false errors. With this there's no need to to special-case journal inode during extent tree checking anymore so remove it. Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode") Reported-by: Wolfgang Frisch <wolfgang.frisch@suse.com> Reviewed-by: Lukas Czerner <lczerner@redhat.com> Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20200728130437.7804-4-jack@suse.cz Signed-off-by: Theodore Ts'o <tytso@mit.edu>
qemuProcessHandleAcpiOstInfo(qemuMonitorPtr mon G_GNUC_UNUSED, virDomainObjPtr vm, const char *alias, const char *slotType, const char *slot, unsigned int source, unsigned int status, void *opaque) { virQEMUDriverPtr driver = opaque; virObjectEventPtr event = NULL; virObjectLock(vm); VIR_DEBUG("ACPI OST info for device %s domain %p %s. " "slotType='%s' slot='%s' source=%u status=%u", NULLSTR(alias), vm, vm->def->name, slotType, slot, source, status); if (!alias) goto cleanup; if (STREQ(slotType, "DIMM")) { if ((source == 0x003 || source == 0x103) && (status == 0x01 || (status >= 0x80 && status <= 0x83))) { qemuDomainSignalDeviceRemoval(vm, alias, QEMU_DOMAIN_UNPLUGGING_DEVICE_STATUS_GUEST_REJECTED); event = virDomainEventDeviceRemovalFailedNewFromObj(vm, alias); } } cleanup: virObjectUnlock(vm); virObjectEventStateQueue(driver->domainEventState, event); return 0; }
0
[ "CWE-416" ]
libvirt
1ac703a7d0789e46833f4013a3876c2e3af18ec7
16,095,016,604,055,110,000,000,000,000,000,000,000
37
qemu: Add missing lock in qemuProcessHandleMonitorEOF qemuMonitorUnregister will be called in multiple threads (e.g. threads in rpc worker pool and the vm event thread). In some cases, it isn't protected by the monitor lock, which may lead to call g_source_unref more than one time and a use-after-free problem eventually. Add the missing lock in qemuProcessHandleMonitorEOF (which is the only position missing lock of monitor I found). Suggested-by: Michal Privoznik <mprivozn@redhat.com> Signed-off-by: Peng Liang <liangpeng10@huawei.com> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
gst_matroska_demux_reset (GstElement * element) { GstMatroskaDemux *demux = GST_MATROSKA_DEMUX (element); GST_DEBUG_OBJECT (demux, "Resetting state"); gst_matroska_read_common_reset (GST_ELEMENT (demux), &demux->common); demux->num_a_streams = 0; demux->num_t_streams = 0; demux->num_v_streams = 0; demux->have_nonintraonly_v_streams = FALSE; demux->have_group_id = FALSE; demux->group_id = G_MAXUINT; demux->clock = NULL; demux->tracks_parsed = FALSE; if (demux->clusters) { g_array_free (demux->clusters, TRUE); demux->clusters = NULL; } g_list_foreach (demux->seek_parsed, (GFunc) gst_matroska_read_common_free_parsed_el, NULL); g_list_free (demux->seek_parsed); demux->seek_parsed = NULL; demux->last_stop_end = GST_CLOCK_TIME_NONE; demux->seek_block = 0; demux->stream_start_time = GST_CLOCK_TIME_NONE; demux->to_time = GST_CLOCK_TIME_NONE; demux->cluster_time = GST_CLOCK_TIME_NONE; demux->cluster_offset = 0; demux->cluster_prevsize = 0; demux->seen_cluster_prevsize = FALSE; demux->next_cluster_offset = 0; demux->stream_last_time = GST_CLOCK_TIME_NONE; demux->last_cluster_offset = 0; demux->index_offset = 0; demux->seekable = FALSE; demux->need_segment = FALSE; demux->segment_seqnum = 0; demux->requested_seek_time = GST_CLOCK_TIME_NONE; demux->seek_offset = -1; demux->audio_lead_in_ts = 0; demux->building_index = FALSE; if (demux->seek_event) { gst_event_unref (demux->seek_event); demux->seek_event = NULL; } demux->seek_index = NULL; demux->seek_entry = 0; if (demux->new_segment) { gst_event_unref (demux->new_segment); demux->new_segment = NULL; } demux->invalid_duration = FALSE; demux->cached_length = G_MAXUINT64; if (demux->deferred_seek_event) gst_event_unref (demux->deferred_seek_event); demux->deferred_seek_event = NULL; demux->deferred_seek_pad = NULL; gst_flow_combiner_clear (demux->flowcombiner); }
0
[]
gst-plugins-good
9181191511f9c0be6a89c98b311f49d66bd46dc3
201,311,898,078,402,730,000,000,000,000,000,000,000
72
matroskademux: Fix extraction of multichannel WavPack The old code had a couple of issues that all lead to potential memory safety bugs. - Use a constant for the Wavpack4Header size instead of using sizeof. It's written out into the data and not from the struct and who knows what special alignment/padding requirements some C compilers have. - gst_buffer_set_size() does not realloc the buffer when setting a bigger size than allocated, it only allows growing up to the maximum allocated size. Instead use a GstAdapter to collect all the blocks and take out everything at once in the end. - Check that enough data is actually available in the input and otherwise handle it an error in all cases instead of silently ignoring it. Among other things this fixes out of bounds writes because the code assumed gst_buffer_set_size() can grow the buffer and simply wrote after the end of the buffer. Thanks to Natalie Silvanovich for reporting. Fixes https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/issues/859 Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/merge_requests/903>
static int mp_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct sb_uart_state *state = tty->driver_data; struct sb_uart_port *port = state->port; int ret = -EIO; MP_STATE_LOCK(state); if (!(tty->flags & (1 << TTY_IO_ERROR))) { mp_update_mctrl(port, set, clear); ret = 0; } MP_STATE_UNLOCK(state); return ret; }
0
[ "CWE-200" ]
linux
a8b33654b1e3b0c74d4a1fed041c9aae50b3c427
286,306,717,046,627,860,000,000,000,000,000,000,000
16
Staging: sb105x: info leak in mp_get_count() The icount.reserved[] array isn't initialized so it leaks stack information to userspace. Reported-by: Nico Golde <nico@ngolde.de> Reported-by: Fabian Yamaguchi <fabs@goesec.de> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Cc: stable@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static void print_primaries(WriterContext *w, enum AVColorPrimaries color_primaries) { const char *val = av_color_primaries_name(color_primaries); if (!val || color_primaries == AVCOL_PRI_UNSPECIFIED) { print_str_opt("color_primaries", "unknown"); } else { print_str("color_primaries", val); } }
0
[ "CWE-476" ]
FFmpeg
837cb4325b712ff1aab531bf41668933f61d75d2
2,816,641,362,508,964,000,000,000,000,000,000,000
9
ffprobe: Fix null pointer dereference with color primaries Found-by: AD-lab of venustech Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
int tg_nop(struct task_group *tg, void *data) { return 0; }
0
[ "CWE-119" ]
linux
29d6455178a09e1dc340380c582b13356227e8df
115,056,988,974,000,880,000,000,000,000,000,000,000
4
sched: panic on corrupted stack end Until now, hitting this BUG_ON caused a recursive oops (because oops handling involves do_exit(), which calls into the scheduler, which in turn raises an oops), which caused stuff below the stack to be overwritten until a panic happened (e.g. via an oops in interrupt context, caused by the overwritten CPU index in the thread_info). Just panic directly. Signed-off-by: Jann Horn <jannh@google.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
proc_file_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct inode * inode = file->f_dentry->d_inode; char *page; ssize_t retval=0; int eof=0; ssize_t n, count; char *start; struct proc_dir_entry * dp; unsigned long long pos; /* * Gaah, please just use "seq_file" instead. The legacy /proc * interfaces cut loff_t down to off_t for reads, and ignore * the offset entirely for writes.. */ pos = *ppos; if (pos > MAX_NON_LFS) return 0; if (nbytes > MAX_NON_LFS - pos) nbytes = MAX_NON_LFS - pos; dp = PDE(inode); if (!(page = (char*) __get_free_page(GFP_KERNEL))) return -ENOMEM; while ((nbytes > 0) && !eof) { count = min_t(size_t, PROC_BLOCK_SIZE, nbytes); start = NULL; if (dp->get_info) { /* Handle old net routines */ n = dp->get_info(page, &start, *ppos, count); if (n < count) eof = 1; } else if (dp->read_proc) { /* * How to be a proc read function * ------------------------------ * Prototype: * int f(char *buffer, char **start, off_t offset, * int count, int *peof, void *dat) * * Assume that the buffer is "count" bytes in size. * * If you know you have supplied all the data you * have, set *peof. * * You have three ways to return data: * 0) Leave *start = NULL. (This is the default.) * Put the data of the requested offset at that * offset within the buffer. Return the number (n) * of bytes there are from the beginning of the * buffer up to the last byte of data. If the * number of supplied bytes (= n - offset) is * greater than zero and you didn't signal eof * and the reader is prepared to take more data * you will be called again with the requested * offset advanced by the number of bytes * absorbed. This interface is useful for files * no larger than the buffer. * 1) Set *start = an unsigned long value less than * the buffer address but greater than zero. * Put the data of the requested offset at the * beginning of the buffer. Return the number of * bytes of data placed there. If this number is * greater than zero and you didn't signal eof * and the reader is prepared to take more data * you will be called again with the requested * offset advanced by *start. This interface is * useful when you have a large file consisting * of a series of blocks which you want to count * and return as wholes. * (Hack by Paul.Russell@rustcorp.com.au) * 2) Set *start = an address within the buffer. * Put the data of the requested offset at *start. * Return the number of bytes of data placed there. * If this number is greater than zero and you * didn't signal eof and the reader is prepared to * take more data you will be called again with the * requested offset advanced by the number of bytes * absorbed. */ n = dp->read_proc(page, &start, *ppos, count, &eof, dp->data); } else break; if (n == 0) /* end of file */ break; if (n < 0) { /* error */ if (retval == 0) retval = n; break; } if (start == NULL) { if (n > PAGE_SIZE) { printk(KERN_ERR "proc_file_read: Apparent buffer overflow!\n"); n = PAGE_SIZE; } n -= *ppos; if (n <= 0) break; if (n > count) n = count; start = page + *ppos; } else if (start < page) { if (n > PAGE_SIZE) { printk(KERN_ERR "proc_file_read: Apparent buffer overflow!\n"); n = PAGE_SIZE; } if (n > count) { /* * Don't reduce n because doing so might * cut off part of a data block. */ printk(KERN_WARNING "proc_file_read: Read count exceeded\n"); } } else /* start >= page */ { unsigned long startoff = (unsigned long)(start - page); if (n > (PAGE_SIZE - startoff)) { printk(KERN_ERR "proc_file_read: Apparent buffer overflow!\n"); n = PAGE_SIZE - startoff; } if (n > count) n = count; } n -= copy_to_user(buf, start < page ? page : start, n); if (n == 0) { if (retval == 0) retval = -EFAULT; break; } *ppos += start < page ? (unsigned long)start : n; nbytes -= n; buf += n; retval += n; } free_page((unsigned long) page); return retval; }
0
[]
linux-2.6
8b90db0df7187a01fb7177f1f812123138f562cf
77,848,453,600,422,560,000,000,000,000,000,000,000
149
Insanity avoidance in /proc The old /proc interfaces were never updated to use loff_t, and are just generally broken. Now, we should be using the seq_file interface for all of the proc files, but converting the legacy functions is more work than most people care for and has little upside.. But at least we can make the non-LFS rules explicit, rather than just insanely wrapping the offset or something. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
int git_pkt_buffer_wants( const git_remote_head * const *refs, size_t count, transport_smart_caps *caps, git_buf *buf) { size_t i = 0; const git_remote_head *head; if (caps->common) { for (; i < count; ++i) { head = refs[i]; if (!head->local) break; } if (buffer_want_with_caps(refs[i], caps, buf) < 0) return -1; i++; } for (; i < count; ++i) { char oid[GIT_OID_HEXSZ]; head = refs[i]; if (head->local) continue; git_oid_fmt(oid, &head->oid); git_buf_put(buf, pkt_want_prefix, strlen(pkt_want_prefix)); git_buf_put(buf, oid, GIT_OID_HEXSZ); git_buf_putc(buf, '\n'); if (git_buf_oom(buf)) return -1; } return git_pkt_buffer_flush(buf); }
0
[ "CWE-119", "CWE-787" ]
libgit2
66e3774d279672ee51c3b54545a79d20d1ada834
337,357,655,590,456,850,000,000,000,000,000,000,000
39
smart_pkt: verify packet length exceeds PKT_LEN_SIZE Each packet line in the Git protocol is prefixed by a four-byte length of how much data will follow, which we parse in `git_pkt_parse_line`. The transmitted length can either be equal to zero in case of a flush packet or has to be at least of length four, as it also includes the encoded length itself. Not checking this may result in a buffer overflow as we directly pass the length to functions which accept a `size_t` length as parameter. Fix the issue by verifying that non-flush packets have at least a length of `PKT_LEN_SIZE`.
DISOpticalFlowImpl::DISOpticalFlowImpl() { finest_scale = 2; patch_size = 8; patch_stride = 4; grad_descent_iter = 16; variational_refinement_iter = 5; variational_refinement_alpha = 20.f; variational_refinement_gamma = 10.f; variational_refinement_delta = 5.f; border_size = 16; use_mean_normalization = true; use_spatial_propagation = true; coarsest_scale = 10; /* Use separate variational refinement instances for different scales to avoid repeated memory allocation: */ int max_possible_scales = 10; ws = hs = w = h = 0; for (int i = 0; i < max_possible_scales; i++) variational_refinement_processors.push_back(VariationalRefinement::create()); }
0
[ "CWE-125", "CWE-369" ]
opencv
d1615ba11a93062b1429fce9f0f638d1572d3418
292,067,150,545,361,300,000,000,000,000,000,000,000
22
video:fixed DISOpticalFlow segfault from small img
GF_Err pdin_box_dump(GF_Box *a, FILE * trace) { u32 i; GF_ProgressiveDownloadBox *p = (GF_ProgressiveDownloadBox *)a; gf_isom_box_dump_start(a, "ProgressiveDownloadBox", trace); gf_fprintf(trace, ">\n"); if (p->size) { for (i=0; i<p->count; i++) { gf_fprintf(trace, "<DownloadInfo rate=\"%d\" estimatedTime=\"%d\" />\n", p->rates[i], p->times[i]); } } else { gf_fprintf(trace, "<DownloadInfo rate=\"\" estimatedTime=\"\" />\n"); } gf_isom_box_dump_done("ProgressiveDownloadBox", a, trace); return GF_OK; }
0
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
124,917,698,168,124,190,000,000,000,000,000,000,000
17
fixed #2138
need_nsec_chain(dns_db_t *db, dns_dbversion_t *ver, const dns_rdata_nsec3param_t *param, bool *answer) { dns_dbnode_t *node = NULL; dns_rdata_t rdata = DNS_RDATA_INIT; dns_rdata_nsec3param_t myparam; dns_rdataset_t rdataset; isc_result_t result; *answer = false; result = dns_db_getoriginnode(db, &node); RUNTIME_CHECK(result == ISC_R_SUCCESS); dns_rdataset_init(&rdataset); result = dns_db_findrdataset(db, node, ver, dns_rdatatype_nsec, 0, 0, &rdataset, NULL); if (result == ISC_R_SUCCESS) { dns_rdataset_disassociate(&rdataset); dns_db_detachnode(db, &node); return (result); } if (result != ISC_R_NOTFOUND) { dns_db_detachnode(db, &node); return (result); } result = dns_db_findrdataset(db, node, ver, dns_rdatatype_nsec3param, 0, 0, &rdataset, NULL); if (result == ISC_R_NOTFOUND) { *answer = true; dns_db_detachnode(db, &node); return (ISC_R_SUCCESS); } if (result != ISC_R_SUCCESS) { dns_db_detachnode(db, &node); return (result); } for (result = dns_rdataset_first(&rdataset); result == ISC_R_SUCCESS; result = dns_rdataset_next(&rdataset)) { dns_rdataset_current(&rdataset, &rdata); CHECK(dns_rdata_tostruct(&rdata, &myparam, NULL)); dns_rdata_reset(&rdata); /* * Ignore any NSEC3PARAM removals. */ if (NSEC3REMOVE(myparam.flags)) continue; /* * Ignore the chain that we are in the process of deleting. */ if (myparam.hash == param->hash && myparam.iterations == param->iterations && myparam.salt_length == param->salt_length && !memcmp(myparam.salt, param->salt, myparam.salt_length)) continue; /* * Found an active NSEC3 chain. */ break; } if (result == ISC_R_NOMORE) { *answer = true; result = ISC_R_SUCCESS; } failure: if (dns_rdataset_isassociated(&rdataset)) dns_rdataset_disassociate(&rdataset); dns_db_detachnode(db, &node); return (result); }
0
[ "CWE-327" ]
bind9
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
118,275,139,742,095,900,000,000,000,000,000,000,000
76
Update keyfetch_done compute_tag check If in keyfetch_done the compute_tag fails (because for example the algorithm is not supported), don't crash, but instead ignore the key.
com_use(String *buffer __attribute__((unused)), char *line) { char *tmp, buff[FN_REFLEN + 1]; int select_db; bzero(buff, sizeof(buff)); strmake_buf(buff, line); tmp= get_arg(buff, 0); if (!tmp || !*tmp) { put_info("USE must be followed by a database name", INFO_ERROR); return 0; } /* We need to recheck the current database, because it may change under our feet, for example if DROP DATABASE or RENAME DATABASE (latter one not yet available by the time the comment was written) */ get_current_db(); if (!current_db || cmp_database(charset_info, current_db,tmp)) { if (one_database) { skip_updates= 1; select_db= 0; // don't do mysql_select_db() } else select_db= 2; // do mysql_select_db() and build_completion_hash() } else { /* USE to the current db specified. We do need to send mysql_select_db() to make server update database level privileges, which might change since last USE (see bug#10979). For performance purposes, we'll skip rebuilding of completion hash. */ skip_updates= 0; select_db= 1; // do only mysql_select_db(), without completion } if (select_db) { /* reconnect once if connection is down or if connection was found to be down during query */ if (!connected && reconnect()) return opt_reconnect ? -1 : 1; // Fatal error if (mysql_select_db(&mysql,tmp)) { if (mysql_errno(&mysql) != CR_SERVER_GONE_ERROR) return put_error(&mysql); if (reconnect()) return opt_reconnect ? -1 : 1; // Fatal error if (mysql_select_db(&mysql,tmp)) return put_error(&mysql); } my_free(current_db); current_db=my_strdup(tmp,MYF(MY_WME)); #ifdef HAVE_READLINE if (select_db > 1) build_completion_hash(opt_rehash, 1); #endif } put_info("Database changed",INFO_INFO); return 0; }
1
[]
server
383007c75d6ef5043fa5781956a6a02b24e2b79e
251,334,105,837,671,270,000,000,000,000,000,000,000
72
mysql cli: fix USE command quoting * use proper sql quoting rules for USE, while preserving as much of historical behavior as possible * short commands (\u) behave as before
int CLASS ljpeg_start (struct jhead *jh, int info_only) { int c, tag; ushort len; uchar data[0x10000]; const uchar *dp; memset (jh, 0, sizeof *jh); jh->restart = INT_MAX; fread (data, 2, 1, ifp); if (data[1] != 0xd8) return 0; do { fread (data, 2, 2, ifp); tag = data[0] << 8 | data[1]; len = (data[2] << 8 | data[3]) - 2; if (tag <= 0xff00) return 0; fread (data, 1, len, ifp); switch (tag) { case 0xffc3: jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3; case 0xffc0: jh->bits = data[0]; jh->high = data[1] << 8 | data[2]; jh->wide = data[3] << 8 | data[4]; jh->clrs = data[5] + jh->sraw; if (len == 9 && !dng_version) getc(ifp); break; case 0xffc4: if (info_only) break; for (dp = data; dp < data+len && (c = *dp++) < 4; ) jh->free[c] = jh->huff[c] = make_decoder_ref (&dp); break; case 0xffda: jh->psv = data[1+data[0]*2]; jh->bits -= data[3+data[0]*2] & 15; break; case 0xffdd: jh->restart = data[0] << 8 | data[1]; } } while (tag != 0xffda); if (info_only) return 1; if (jh->clrs > 6 || !jh->huff[0]) return 0; FORC(5) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c]; if (jh->sraw) { FORC(4) jh->huff[2+c] = jh->huff[1]; FORC(jh->sraw) jh->huff[1+c] = jh->huff[0]; } jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4); merror (jh->row, "ljpeg_start()"); return zero_after_ff = 1; }
0
[ "CWE-189" ]
LibRaw
4606c28f494a750892c5c1ac7903e62dd1c6fdb5
195,744,538,282,138,660,000,000,000,000,000,000,000
51
0.16.1: fix for dcraw ljpeg_start() vulnerability
const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context, const TfLiteNode* node, int index) { const bool use_tensor = index < node->inputs->size && node->inputs->data[index] != kTfLiteOptionalTensor; if (use_tensor) { return GetMutableInput(context, node, index); } return nullptr; }
1
[ "CWE-125", "CWE-787" ]
tensorflow
00302787b788c5ff04cb6f62aed5a74d936e86c0
268,531,922,670,221,250,000,000,000,000,000,000,000
9
[tflite] Make `GetOptionalInputTensor` the same as `GetInput`. With the previous change, there is no more need for two separate APIs. We would deprecate `GetOptionalInputTensor` in the future. PiperOrigin-RevId: 332513386 Change-Id: Id7110271c25ebd6126ad8c82a493e37e0e0756b3
static int opfisttp(RAsm *a, ut8 *data, const Opcode *op) { int l = 0; switch (op->operands_count) { case 1: if ( op->operands[0].type & OT_MEMORY ) { if ( op->operands[0].type & OT_WORD ) { data[l++] = 0xdf; data[l++] = 0x08 | op->operands[0].regs[0]; } else if ( op->operands[0].type & OT_DWORD ) { data[l++] = 0xdb; data[l++] = 0x08 | op->operands[0].regs[0]; } else if ( op->operands[0].type & OT_QWORD ) { data[l++] = 0xdd; data[l++] = 0x08 | op->operands[0].regs[0]; } else { return -1; } } else { return -1; } break; default: return -1; } return l; }
0
[ "CWE-119", "CWE-125", "CWE-787" ]
radare2
9b46d38dd3c4de6048a488b655c7319f845af185
92,685,606,558,464,250,000,000,000,000,000,000,000
26
Fix #12372 and #12373 - Crash in x86 assembler (#12380) 0 ,0,[bP-bL-bP-bL-bL-r-bL-bP-bL-bL- mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx-- leA ,0,[bP-bL-bL-bP-bL-bP-bL-60@bL- leA ,0,[bP-bL-r-bP-bL-bP-bL-60@bL- mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
void my_net_local_init(NET *net) { net->max_packet= (uint) net_buffer_length; my_net_set_read_timeout(net, CLIENT_NET_READ_TIMEOUT); my_net_set_write_timeout(net, CLIENT_NET_WRITE_TIMEOUT); net->retry_count= 1; net->max_packet_size= max(net_buffer_length, max_allowed_packet); }
0
[]
mysql-server
3d8134d2c9b74bc8883ffe2ef59c168361223837
297,861,009,903,168,140,000,000,000,000,000,000,000
8
Bug#25988681: USE-AFTER-FREE IN MYSQL_STMT_CLOSE() Description: If mysql_stmt_close() encountered error, it recorded error in prepared statement but then frees memory assigned to prepared statement. If mysql_stmt_error() is used to get error information, it will result into use after free. In all cases where mysql_stmt_close() can fail, error would have been set by cli_advanced_command in MYSQL structure. Solution: Don't copy error from MYSQL using set_stmt_errmsg. There is no automated way to test the fix since it is in mysql_stmt_close() which does not expect any reply from server. Reviewed-By: Georgi Kodinov <georgi.kodinov@oracle.com> Reviewed-By: Ramil Kalimullin <ramil.kalimullin@oracle.com>
double Field_enum::val_real(void) { return (double) Field_enum::val_int(); }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
207,272,654,954,712,500,000,000,000,000,000,000,000
4
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <serg@mariadb.org>
static void kvm_register_steal_time(void) { int cpu = smp_processor_id(); struct kvm_steal_time *st = &per_cpu(steal_time, cpu); if (!has_steal_clock) return; memset(st, 0, sizeof(*st)); wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); pr_info("kvm-stealtime: cpu %d, msr %llx\n", cpu, (unsigned long long) slow_virt_to_phys(st)); }
0
[]
kvm
29fa6825463c97e5157284db80107d1bfac5d77b
166,772,444,996,711,000,000,000,000,000,000,000,000
14
x86, kvm: Clear paravirt_enabled on KVM guests for espfix32's benefit paravirt_enabled has the following effects: - Disables the F00F bug workaround warning. There is no F00F bug workaround any more because Linux's standard IDT handling already works around the F00F bug, but the warning still exists. This is only cosmetic, and, in any event, there is no such thing as KVM on a CPU with the F00F bug. - Disables 32-bit APM BIOS detection. On a KVM paravirt system, there should be no APM BIOS anyway. - Disables tboot. I think that the tboot code should check the CPUID hypervisor bit directly if it matters. - paravirt_enabled disables espfix32. espfix32 should *not* be disabled under KVM paravirt. The last point is the purpose of this patch. It fixes a leak of the high 16 bits of the kernel stack address on 32-bit KVM paravirt guests. Fixes CVE-2014-8134. Cc: stable@vger.kernel.org Suggested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
xmlBufCreate(void) { xmlBufPtr ret; ret = (xmlBufPtr) xmlMalloc(sizeof(xmlBuf)); if (ret == NULL) { xmlBufMemoryError(NULL, "creating buffer"); return(NULL); } ret->compat_use = 0; ret->use = 0; ret->error = 0; ret->buffer = NULL; ret->size = xmlDefaultBufferSize; ret->compat_size = xmlDefaultBufferSize; ret->alloc = xmlBufferAllocScheme; ret->content = (xmlChar *) xmlMallocAtomic(ret->size * sizeof(xmlChar)); if (ret->content == NULL) { xmlBufMemoryError(ret, "creating buffer"); xmlFree(ret); return(NULL); } ret->content[0] = 0; ret->contentIO = NULL; return(ret); }
0
[ "CWE-399" ]
libxml2
213f1fe0d76d30eaed6e5853057defc43e6df2c9
118,356,951,086,759,700,000,000,000,000,000,000,000
25
CVE-2015-1819 Enforce the reader to run in constant memory One of the operation on the reader could resolve entities leading to the classic expansion issue. Make sure the buffer used for xmlreader operation is bounded. Introduce a new allocation type for the buffers for this effect.
HuffmanDecoder * operator()() const { unsigned int codeLengths[32]; std::fill(codeLengths + 0, codeLengths + 32, 5); member_ptr<HuffmanDecoder> pDecoder(new HuffmanDecoder); pDecoder->Initialize(codeLengths, 32); return pDecoder.release(); }
0
[ "CWE-190", "CWE-125" ]
cryptopp
07dbcc3d9644b18e05c1776db2a57fe04d780965
27,835,650,915,753,803,000,000,000,000,000,000,000
8
Add Inflator::BadDistanceErr exception (Issue 414) The improved validation and excpetion clears the Address Sanitizer and Undefined Behavior Sanitizer findings
const boost::optional<std::string>& TopologyDescription::getSetName() const { return _setName; }
0
[ "CWE-755" ]
mongo
75f7184eafa78006a698cda4c4adfb57f1290047
99,665,677,411,901,960,000,000,000,000,000,000,000
3
SERVER-50170 fix max staleness read preference parameter for server selection
cached_NPN_GetStringIdentifiers(const NPUTF8 **names, int32_t nameCount, NPIdentifier *identifiers) { /* XXX: could be optimized further */ invoke_NPN_GetStringIdentifiers(names, nameCount, identifiers); #if USE_NPIDENTIFIER_CACHE if (use_npidentifier_cache()) { for (int i = 0; i < nameCount; i++) { NPIdentifier ident = identifiers[i]; if (npidentifier_cache_lookup(ident) == NULL) { npidentifier_cache_reserve(1); npidentifier_cache_add_string(ident, names[i]); } } } #endif }
0
[ "CWE-264" ]
nspluginwrapper
7e4ab8e1189846041f955e6c83f72bc1624e7a98
80,448,270,142,590,870,000,000,000,000,000,000,000
17
Support all the new variables added
nfnl_cthelper_update(const struct nlattr * const tb[], struct nf_conntrack_helper *helper) { int ret; if (tb[NFCTH_PRIV_DATA_LEN]) return -EBUSY; if (tb[NFCTH_POLICY]) { ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]); if (ret < 0) return ret; } if (tb[NFCTH_QUEUE_NUM]) helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM])); if (tb[NFCTH_STATUS]) { int status = ntohl(nla_get_be32(tb[NFCTH_STATUS])); switch(status) { case NFCT_HELPER_STATUS_ENABLED: helper->flags |= NF_CT_HELPER_F_CONFIGURED; break; case NFCT_HELPER_STATUS_DISABLED: helper->flags &= ~NF_CT_HELPER_F_CONFIGURED; break; } } return 0; }
0
[ "CWE-862" ]
linux
4b380c42f7d00a395feede754f0bc2292eebe6e5
164,898,436,274,996,720,000,000,000,000,000,000,000
30
netfilter: nfnetlink_cthelper: Add missing permission checks The capability check in nfnetlink_rcv() verifies that the caller has CAP_NET_ADMIN in the namespace that "owns" the netlink socket. However, nfnl_cthelper_list is shared by all net namespaces on the system. An unprivileged user can create user and net namespaces in which he holds CAP_NET_ADMIN to bypass the netlink_net_capable() check: $ nfct helper list nfct v1.4.4: netlink error: Operation not permitted $ vpnns -- nfct helper list { .name = ftp, .queuenum = 0, .l3protonum = 2, .l4protonum = 6, .priv_data_len = 24, .status = enabled, }; Add capable() checks in nfnetlink_cthelper, as this is cleaner than trying to generalize the solution. Signed-off-by: Kevin Cernekee <cernekee@chromium.org> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
static GtkBuilder *make_builder() { GError *error = NULL; GtkBuilder *builder = gtk_builder_new(); if (!g_glade_file) { /* load additional widgets from glade */ gtk_builder_add_objects_from_string(builder, WIZARD_GLADE_CONTENTS, sizeof(WIZARD_GLADE_CONTENTS) - 1, (gchar**)misc_widgets, &error); if (error != NULL) error_msg_and_die("Error loading glade data: %s", error->message); /* Load pages from internal string */ gtk_builder_add_objects_from_string(builder, WIZARD_GLADE_CONTENTS, sizeof(WIZARD_GLADE_CONTENTS) - 1, (gchar**)page_names, &error); if (error != NULL) error_msg_and_die("Error loading glade data: %s", error->message); } else { /* -g FILE: load UI from it */ /* load additional widgets from glade */ gtk_builder_add_objects_from_file(builder, g_glade_file, (gchar**)misc_widgets, &error); if (error != NULL) error_msg_and_die("Can't load %s: %s", g_glade_file, error->message); gtk_builder_add_objects_from_file(builder, g_glade_file, (gchar**)page_names, &error); if (error != NULL) error_msg_and_die("Can't load %s: %s", g_glade_file, error->message); } return builder; }
0
[ "CWE-200" ]
libreport
257578a23d1537a2d235aaa2b1488ee4f818e360
28,766,785,502,210,860,000,000,000,000,000,000,000
35
wizard: fix save users changes after reviewing dump dir files If the user reviewed the dump dir's files during reporting the crash, the changes was thrown away and original data was passed to the bugzilla bug report. report-gtk saves the first text view buffer and then reloads data from the reported problem directory, which causes that the changes made to those text views are thrown away. Function save_text_if_changed(), except of saving text, also reload the files from dump dir and update gui state from the dump dir. The commit moves the reloading and updating gui functions away from this function. Related to rhbz#1270235 Signed-off-by: Matej Habrnal <mhabrnal@redhat.com>
SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, struct kvec *iov, int n_vec) { int rc = 0; struct smb2_write_req *req = NULL; struct smb2_write_rsp *rsp = NULL; int resp_buftype; *nbytes = 0; if (n_vec < 1) return rc; rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req); if (rc) return rc; if (io_parms->tcon->ses->server == NULL) return -ECONNABORTED; req->hdr.ProcessId = cpu_to_le32(io_parms->pid); req->PersistentFileId = io_parms->persistent_fid; req->VolatileFileId = io_parms->volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = 0; req->Length = cpu_to_le32(io_parms->length); req->Offset = cpu_to_le64(io_parms->offset); /* 4 for rfc1002 length field */ req->DataOffset = cpu_to_le16( offsetof(struct smb2_write_req, Buffer) - 4); req->RemainingBytes = 0; iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field and 1 for Buffer */ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; /* length of entire message including data to be written */ inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */); rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1, &resp_buftype, 0); rsp = (struct smb2_write_rsp *)iov[0].iov_base; if (rc) { cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); cifs_dbg(VFS, "Send error in write = %d\n", rc); } else *nbytes = le32_to_cpu(rsp->DataLength); free_rsp_buf(resp_buftype, rsp); return rc; }
0
[ "CWE-399" ]
linux
18f39e7be0121317550d03e267e3ebd4dbfbb3ce
50,489,718,958,481,430,000,000,000,000,000,000,000
53
[CIFS] Possible null ptr deref in SMB2_tcon As Raphael Geissert pointed out, tcon_error_exit can dereference tcon and there is one path in which tcon can be null. Signed-off-by: Steve French <smfrench@gmail.com> CC: Stable <stable@vger.kernel.org> # v3.7+ Reported-by: Raphael Geissert <geissert@debian.org>
void LibRaw::process_Sony_0x0116(uchar *buf, ushort len, unsigned long long id) { int i = 0; if (((id == SonyID_DSLR_A900) || (id == SonyID_DSLR_A900_APSC) || (id == SonyID_DSLR_A850) || (id == SonyID_DSLR_A850_APSC)) && (len >= 2)) i = 1; else if ((id >= SonyID_DSLR_A550) && (len >= 3)) i = 2; else return; imCommon.BatteryTemperature = (float)(buf[i] - 32) / 1.8f; }
0
[ "CWE-125" ]
LibRaw
c243f4539233053466c1309bde606815351bee81
11,669,846,482,309,768,000,000,000,000,000,000,000
17
additional checks in parseSonySRF parseSonySR2: buffer size check
ffi_closure_free (void *ptr) { void *codeseg, *dataseg; size_t rounded_size; dataseg = ADD_TO_POINTER(ptr, -overhead); memcpy(&rounded_size, dataseg, sizeof(rounded_size)); memcpy(&codeseg, ADD_TO_POINTER(dataseg, sizeof(size_t)), sizeof(void *)); munmap(dataseg, rounded_size); munmap(codeseg, rounded_size); }
0
[ "CWE-787" ]
libffi
44a6c28545186d78642487927952844156fc7ab5
89,380,002,484,716,500,000,000,000,000,000,000,000
11
aarch64: Flush code mapping in addition to data mapping (#471) This needs a new function, ffi_data_to_code_pointer, to translate from data pointers to code pointers. Fixes issue #470.
static int ff_layout_async_handle_error_v3(struct rpc_task *task, struct pnfs_layout_segment *lseg, u32 idx) { struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); switch (task->tk_status) { /* File access problems. Don't mark the device as unavailable */ case -EACCES: case -ESTALE: case -EISDIR: case -EBADHANDLE: case -ELOOP: case -ENOSPC: break; case -EJUKEBOX: nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); goto out_retry; default: dprintk("%s DS connection error %d\n", __func__, task->tk_status); nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid); } /* FIXME: Need to prevent infinite looping here. */ return -NFS4ERR_RESET_TO_PNFS; out_retry: task->tk_status = 0; rpc_restart_call_prepare(task); rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); return -EAGAIN; }
0
[ "CWE-787" ]
linux
ed34695e15aba74f45247f1ee2cf7e09d449f925
50,022,064,211,193,575,000,000,000,000,000,000,000
32
pNFS/flexfiles: fix incorrect size check in decode_nfs_fh() We (adam zabrocki, alexander matrosov, alexander tereshkin, maksym bazalii) observed the check: if (fh->size > sizeof(struct nfs_fh)) should not use the size of the nfs_fh struct which includes an extra two bytes from the size field. struct nfs_fh { unsigned short size; unsigned char data[NFS_MAXFHSIZE]; } but should determine the size from data[NFS_MAXFHSIZE] so the memcpy will not write 2 bytes beyond destination. The proposed fix is to compare against the NFS_MAXFHSIZE directly, as is done elsewhere in fs code base. Fixes: d67ae825a59d ("pnfs/flexfiles: Add the FlexFile Layout Driver") Signed-off-by: Nikola Livic <nlivic@gmail.com> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
T _cubic_atX_pc(const float fx, const int y, const int z, const int c) const { return cimg::type<T>::cut(_cubic_atX_p(fx,y,z,c)); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
116,817,289,603,728,600,000,000,000,000,000,000,000
3
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
static NOINLINE void ReleaseFreeInCache(nedpool *p, threadcache *tc, int mymspace) THROWSPEC { unsigned int age=THREADCACHEMAXFREESPACE/8192; /*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/ while(age && tc->freeInCache>=THREADCACHEMAXFREESPACE) { RemoveCacheEntries(p, tc, age); /*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/ age>>=1; } /*RELEASE_LOCK(&p->m[mymspace]->mutex);*/ }
0
[ "CWE-119", "CWE-787" ]
git
34fa79a6cde56d6d428ab0d3160cb094ebad3305
274,167,393,305,576,900,000,000,000,000,000,000,000
12
prefer memcpy to strcpy When we already know the length of a string (e.g., because we just malloc'd to fit it), it's nicer to use memcpy than strcpy, as it makes it more obvious that we are not going to overflow the buffer (because the size we pass matches the size in the allocation). This also eliminates calls to strcpy, which make auditing the code base harder. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>