func
stringlengths
0
484k
target
int64
0
1
cwe
sequence
project
stringlengths
2
29
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
GF_Box *alis_box_new() { ISOM_DECL_BOX_ALLOC(GF_DataEntryAliasBox, GF_QT_BOX_TYPE_ALIS); return (GF_Box *)tmp; }
0
[ "CWE-476" ]
gpac
6170024568f4dda310e98ef7508477b425c58d09
202,939,168,640,326,600,000,000,000,000,000,000,000
5
fixed potential crash - cf #1263
add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, uint64_t page_size) { uint64_t reg_size = reg->size; uint64_t host_user_addr = reg->host_user_addr; uint64_t guest_phys_addr = reg->guest_phys_addr; uint64_t host_phys_addr; uint64_t size; host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr); size = page_size - (guest_phys_addr & (page_size - 1)); size = RTE_MIN(size, reg_size); if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0) return -1; host_user_addr += size; guest_phys_addr += size; reg_size -= size; while (reg_size > 0) { size = RTE_MIN(reg_size, page_size); host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t) host_user_addr); if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0) return -1; host_user_addr += size; guest_phys_addr += size; reg_size -= size; } /* sort guest page array if over binary search threshold */ if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { qsort((void *)dev->guest_pages, dev->nr_guest_pages, sizeof(struct guest_page), guest_page_addrcmp); } return 0; }
0
[ "CWE-190" ]
dpdk
3ae4beb079ce242240c34376a066bbccd0c0b23e
326,346,801,272,621,300,000,000,000,000,000,000,000
41
vhost: check log mmap offset and size overflow vhost_user_set_log_base() is a message handler that is called to handle the VHOST_USER_SET_LOG_BASE message. Its payload contains a 64 bit size and offset. Both are added up and used as a size when calling mmap(). There is no integer overflow check. If an integer overflow occurs a smaller memory map would be created than requested. Since the returned mapping is mapped as writable and used for logging, a memory corruption could occur. CVE-2020-10722 Fixes: fbc4d248b198 ("vhost: fix offset while mmaping log base address") Cc: stable@dpdk.org Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com> Reviewed-by: Xiaolong Ye <xiaolong.ye@intel.com> Reviewed-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, int cmd, int data) { struct request *rq; int err; rq = blk_get_request(q, WRITE, __GFP_WAIT); rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->data = NULL; rq->data_len = 0; rq->extra_len = 0; rq->timeout = BLK_DEFAULT_SG_TIMEOUT; rq->cmd[0] = cmd; rq->cmd[4] = data; rq->cmd_len = 6; err = blk_execute_rq(q, bd_disk, rq, 0); blk_put_request(rq); return err; }
0
[ "CWE-399" ]
linux-2.6
f2f1fa78a155524b849edf359e42a3001ea652c0
167,156,212,871,333,580,000,000,000,000,000,000,000
20
Enforce a minimum SG_IO timeout There's no point in having too short SG_IO timeouts, since if the command does end up timing out, we'll end up through the reset sequence that is several seconds long in order to abort the command that timed out. As a result, shorter timeouts than a few seconds simply do not make sense, as the recovery would be longer than the timeout itself. Add a BLK_MIN_SG_TIMEOUT to match the existign BLK_DEFAULT_SG_TIMEOUT. Suggested-by: Alan Cox <alan@lxorguk.ukuu.org.uk> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Jens Axboe <jens.axboe@oracle.com> Cc: Jeff Garzik <jeff@garzik.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
virtual void updateFont(GfxState *state) { }
0
[]
poppler
abf167af8b15e5f3b510275ce619e6fdb42edd40
185,794,397,075,400,600,000,000,000,000,000,000,000
1
Implement tiling/patterns in SplashOutputDev Fixes bug 13518
static double mp_list_depth(_cimg_math_parser& mp) { const unsigned int ind = (unsigned int)cimg::mod((int)_mp_arg(2),mp.imglist.width()); return (double)mp.imglist[ind]._depth; }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
330,787,561,465,616,100,000,000,000,000,000,000,000
4
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
TEST_F(RouterTest, ConnectPauseNoResume) { // Explicitly configure an HTTP upstream, to test factory creation. cm_.thread_local_cluster_.cluster_.info_->upstream_config_ = absl::make_optional<envoy::config::core::v3::TypedExtensionConfig>(); envoy::extensions::upstreams::http::http::v3::HttpConnectionPoolProto http_config; cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value() .mutable_typed_config() ->PackFrom(http_config); NiceMock<Http::MockRequestEncoder> encoder; Http::ResponseDecoder* response_decoder = nullptr; EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) .WillOnce(Invoke( [&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { response_decoder = &decoder; callbacks.onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_, upstream_stream_info_, Http::Protocol::Http10); return nullptr; })); expectResponseTimerCreate(); EXPECT_CALL(encoder, encodeHeaders(_, false)); Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); headers.setMethod("CONNECT"); router_.decodeHeaders(headers, false); // Make sure any early data does not go upstream. EXPECT_CALL(encoder, encodeData(_, _)).Times(0); Buffer::OwnedImpl data; router_.decodeData(data, true); // Now send the response headers, and ensure the deferred payload is not proxied. EXPECT_CALL(encoder, encodeData(_, _)).Times(0); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "400"}}); response_decoder->decodeHeaders(std::move(response_headers), true); }
0
[ "CWE-703" ]
envoy
18871dbfb168d3512a10c78dd267ff7c03f564c6
73,160,237,050,713,120,000,000,000,000,000,000,000
39
[1.18] CVE-2022-21655 Crash with direct_response Signed-off-by: Otto van der Schaaf <ovanders@redhat.com>
static void setError(UErrorCode& ec, UErrorCode err) { if (U_SUCCESS(ec)) { ec = err; } }
0
[ "CWE-190" ]
icu
53d8c8f3d181d87a6aa925b449b51c4a2c922a51
8,034,529,901,722,793,000,000,000,000,000,000,000
5
ICU-20246 Fixing another integer overflow in number parsing.
char* dd_load_text_ext(const struct dump_dir *dd, const char *name, unsigned flags) { // if (!dd->locked) // error_msg_and_die("dump_dir is not opened"); /* bug */ if (!str_is_correct_filename(name)) { error_msg("Cannot load text. '%s' is not a valid file name", name); if (!(flags & DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE)) xfunc_die(); } /* Compat with old abrt dumps. Remove in abrt-2.1 */ if (strcmp(name, "release") == 0) name = FILENAME_OS_RELEASE; char *full_path = concat_path_file(dd->dd_dirname, name); char *ret = load_text_file(full_path, flags); free(full_path); return ret; }
1
[ "CWE-20" ]
libreport
1951e7282043dfe1268d492aea056b554baedb75
58,806,084,395,265,575,000,000,000,000,000,000,000
22
lib: fix races in dump directory handling code Florian Weimer <fweimer@redhat.com>: dd_opendir() should keep a file handle (opened with O_DIRECTORY) and use openat() and similar functions to access files in it. ... The file system manipulation functions should guard against hard links (check that link count is <= 1, just as in the user coredump code in abrt-hook-ccpp), possibly after opening the file with O_PATH first to avoid side effects on open/close. Related: #1214745 Signed-off-by: Jakub Filak <jfilak@redhat.com>
static Bool adts_dmx_sync_frame_bs(GF_BitStream *bs, ADTSHeader *hdr) { u32 val; u64 pos; while (gf_bs_available(bs)>7) { val = gf_bs_read_u8(bs); if (val!=0xFF) continue; val = gf_bs_read_int(bs, 4); if (val != 0x0F) { gf_bs_read_int(bs, 4); continue; } hdr->is_mp2 = (Bool)gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 2); hdr->no_crc = (Bool)gf_bs_read_int(bs, 1); pos = gf_bs_get_position(bs) - 2; hdr->profile = 1 + gf_bs_read_int(bs, 2); hdr->sr_idx = gf_bs_read_int(bs, 4); gf_bs_read_int(bs, 1); hdr->nb_ch = gf_bs_read_int(bs, 3); //value 1->6 match channel number, value 7 is 7.1 if (hdr->nb_ch==7) hdr->nb_ch = 8; gf_bs_read_int(bs, 4); hdr->frame_size = gf_bs_read_int(bs, 13); gf_bs_read_int(bs, 11); gf_bs_read_int(bs, 2); hdr->hdr_size = 7; if (!hdr->no_crc) { gf_bs_read_u16(bs); hdr->hdr_size = 9; } if (!GF_M4ASampleRates[hdr->sr_idx] || (hdr->frame_size < hdr->hdr_size)) { gf_bs_seek(bs, pos+1); continue; } hdr->frame_size -= hdr->hdr_size; if (gf_bs_available(bs) == hdr->frame_size) { return GF_TRUE; } if (gf_bs_available(bs) < hdr->frame_size) { break; } gf_bs_skip_bytes(bs, hdr->frame_size); val = gf_bs_read_u8(bs); if (val!=0xFF) { gf_bs_seek(bs, pos+1); continue; } val = gf_bs_read_int(bs, 4); if (val!=0x0F) { gf_bs_read_int(bs, 4); gf_bs_seek(bs, pos+1); continue; } gf_bs_seek(bs, pos+hdr->hdr_size); return GF_TRUE; } return GF_FALSE; }
0
[ "CWE-476", "CWE-787" ]
gpac
22774aa9e62f586319c8f107f5bae950fed900bc
12,396,286,037,079,902,000,000,000,000,000,000,000
65
fixed potential crash in adts reframer with broken streams - cf #1723
static int break_ksm(struct vm_area_struct *vma, unsigned long addr) { struct page *page; int ret = 0; do { cond_resched(); page = follow_page(vma, addr, FOLL_GET); if (IS_ERR_OR_NULL(page)) break; if (PageKsm(page)) ret = handle_mm_fault(vma->vm_mm, vma, addr, FAULT_FLAG_WRITE); else ret = VM_FAULT_WRITE; put_page(page); } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); /* * We must loop because handle_mm_fault() may back out if there's * any difficulty e.g. if pte accessed bit gets updated concurrently. * * VM_FAULT_WRITE is what we have been hoping for: it indicates that * COW has been broken, even if the vma does not permit VM_WRITE; * but note that a concurrent fault might break PageKsm for us. * * VM_FAULT_SIGBUS could occur if we race with truncation of the * backing file, which also invalidates anonymous pages: that's * okay, that truncation will have unmapped the PageKsm for us. * * VM_FAULT_OOM: at the time of writing (late July 2009), setting * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the * current task has TIF_MEMDIE set, and will be OOM killed on return * to user; and ksmd, having no mm, would never be chosen for that. * * But if the mm is in a limited mem_cgroup, then the fault may fail * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and * even ksmd can fail in this way - though it's usually breaking ksm * just to undo a merge it made a moment before, so unlikely to oom. * * That's a pity: we might therefore have more kernel pages allocated * than we're counting as nodes in the stable tree; but ksm_do_scan * will retry to break_cow on each pass, so should recover the page * in due course. The important thing is to not let VM_MERGEABLE * be cleared while any such pages might remain in the area. */ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; }
0
[ "CWE-362", "CWE-125" ]
linux
2b472611a32a72f4a118c069c2d62a1a3f087afd
1,631,897,480,858,169,000,000,000,000,000,000,000
47
ksm: fix NULL pointer dereference in scan_get_next_rmap_item() Andrea Righi reported a case where an exiting task can race against ksmd::scan_get_next_rmap_item (http://lkml.org/lkml/2011/6/1/742) easily triggering a NULL pointer dereference in ksmd. ksm_scan.mm_slot == &ksm_mm_head with only one registered mm CPU 1 (__ksm_exit) CPU 2 (scan_get_next_rmap_item) list_empty() is false lock slot == &ksm_mm_head list_del(slot->mm_list) (list now empty) unlock lock slot = list_entry(slot->mm_list.next) (list is empty, so slot is still ksm_mm_head) unlock slot->mm == NULL ... Oops Close this race by revalidating that the new slot is not simply the list head again. Andrea's test case: #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/mman.h> #define BUFSIZE getpagesize() int main(int argc, char **argv) { void *ptr; if (posix_memalign(&ptr, getpagesize(), BUFSIZE) < 0) { perror("posix_memalign"); exit(1); } if (madvise(ptr, BUFSIZE, MADV_MERGEABLE) < 0) { perror("madvise"); exit(1); } *(char *)NULL = 0; return 0; } Reported-by: Andrea Righi <andrea@betterlinux.com> Tested-by: Andrea Righi <andrea@betterlinux.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
htp_status_t htp_tx_req_set_uri(htp_tx_t *tx, const char *uri, size_t uri_len, enum htp_alloc_strategy_t alloc) { if ((tx == NULL) || (uri == NULL)) return HTP_ERROR; tx->request_uri = copy_or_wrap_mem(uri, uri_len, alloc); if (tx->request_uri == NULL) return HTP_ERROR; return HTP_OK; }
0
[]
libhtp
c7c03843cd6b1cbf44eb435d160ba53aec948828
56,147,721,297,511,540,000,000,000,000,000,000,000
8
Harden decompress code against memory stress Under severe memory pressure the decompress code can fail to setup properly. Add checks before dereferencing pointers.
int wakeup_source_sysfs_add(struct device *parent, struct wakeup_source *ws) { struct device *dev; dev = wakeup_source_device_create(parent, ws); if (IS_ERR(dev)) return PTR_ERR(dev); ws->dev = dev; return 0; }
0
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
22,760,754,580,729,610,000,000,000,000,000,000,000
11
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <joe@perches.com> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
int ppp_register_channel(struct ppp_channel *chan) { return ppp_register_net_channel(current->nsproxy->net_ns, chan); }
0
[]
linux
4ab42d78e37a294ac7bc56901d563c642e03c4ae
41,109,785,916,717,706,000,000,000,000,000,000,000
4
ppp, slip: Validate VJ compression slot parameters completely Currently slhc_init() treats out-of-range values of rslots and tslots as equivalent to 0, except that if tslots is too large it will dereference a null pointer (CVE-2015-7799). Add a range-check at the top of the function and make it return an ERR_PTR() on error instead of NULL. Change the callers accordingly. Compile-tested only. Reported-by: 郭永刚 <guoyonggang@360.cn> References: http://article.gmane.org/gmane.comp.security.oss.general/17908 Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
bool FunctionContextImpl::check_allocations_empty() { if (_allocations.empty() && _external_bytes_tracked == 0) { return true; } // TODO: fix this //if (_debug) _context->set_error("Leaked allocations."); return false; }
0
[ "CWE-200" ]
incubator-doris
246ac4e37aa4da6836b7850cb990f02d1c3725a3
2,128,458,912,902,215,200,000,000,000,000,000,000
9
[fix] fix a bug of encryption function with iv may return wrong result (#8277)
String *Field_longstr::uncompress(String *val_buffer, String *val_ptr, const uchar *from, uint from_length) { if (from_length) { uchar method= (*from & 0xF0) >> 4; /* Uncompressed data */ if (!method) { val_ptr->set((const char*) from + 1, from_length - 1, field_charset); return val_ptr; } if (compression_methods[method].uncompress) { if (!compression_methods[method].uncompress(val_buffer, from, from_length, field_length)) { val_buffer->set_charset(field_charset); status_var_increment(get_thd()->status_var.column_decompressions); return val_buffer; } } } /* It would be better to return 0 in case of errors, but to take the safer route, let's return a zero string and let the general handler catch the error. */ val_ptr->set("", 0, field_charset); return val_ptr; }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
239,367,792,564,101,940,000,000,000,000,000,000,000
34
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <serg@mariadb.org>
ecma_date_min_from_time (ecma_number_t time) /**< time value */ { JERRY_ASSERT (!ecma_number_is_nan (time)); int32_t time_in_day = ecma_date_time_in_day_from_time (time); return ((int32_t) (time_in_day / ECMA_DATE_MS_PER_MINUTE)) % ECMA_DATE_MINUTES_PER_HOUR; } /* ecma_date_min_from_time */
0
[ "CWE-416" ]
jerryscript
3bcd48f72d4af01d1304b754ef19fe1a02c96049
308,558,106,687,174,400,000,000,000,000,000,000,000
8
Improve parse_identifier (#4691) Ascii string length is no longer computed during string allocation. JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz batizjob@gmail.com
static int xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) { RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct xen_netif_rx_response rx; skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; RING_COPY_RESPONSE(&queue->rx, ++cons, &rx); if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to < skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { xennet_set_rx_rsp_cons(queue, ++cons + skb_queue_len(list)); kfree_skb(nskb); return -ENOENT; } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(nfrag), rx.offset, rx.status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); } xennet_set_rx_rsp_cons(queue, cons); return 0; }
0
[]
linux
f63c2c2032c2e3caad9add3b82cc6e91c376fd26
133,519,759,218,387,460,000,000,000,000,000,000,000
38
xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses() The commit referenced below moved the invocation past the "next" label, without any explanation. In fact this allows misbehaving backends undue control over the domain the frontend runs in, as earlier detected errors require the skb to not be freed (it may be retained for later processing via xennet_move_rx_slot(), or it may simply be unsafe to have it freed). This is CVE-2022-33743 / XSA-405. Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront") Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Juergen Gross <jgross@suse.com> Signed-off-by: Juergen Gross <jgross@suse.com>
static void function_rename(RzFlag *flags, RzAnalysisFunction *fcn) { const char *locname = "loc."; const size_t locsize = strlen(locname); char *fcnname = fcn->name; if (strncmp(fcn->name, locname, locsize) == 0) { const char *fcnpfx, *restofname; RzFlagItem *f; fcn->type = RZ_ANALYSIS_FCN_TYPE_FCN; fcnpfx = rz_analysis_fcntype_tostring(fcn->type); restofname = fcn->name + locsize; fcn->name = rz_str_newf("%s.%s", fcnpfx, restofname); f = rz_flag_get_i(flags, fcn->addr); rz_flag_rename(flags, f, fcn->name); free(fcnname); } }
0
[ "CWE-703" ]
rizin
6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939
273,545,558,069,413,220,000,000,000,000,000,000,000
20
Initialize retctx,ctx before freeing the inner elements In rz_core_analysis_type_match retctx structure was initialized on the stack only after a "goto out_function", where a field of that structure was freed. When the goto path is taken, the field is not properly initialized and it cause cause a crash of Rizin or have other effects. Fixes: CVE-2021-4022
static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; }
0
[ "CWE-200" ]
linux-2.6
78b79876761b86653df89c48a7010b5cbd41a84a
207,594,734,305,831,870,000,000,000,000,000,000,000
19
netfilter: ip_tables: fix infoleak to userspace Structures ipt_replace, compat_ipt_replace, and xt_get_revision are copied from userspace. Fields of these structs that are zero-terminated strings are not checked. When they are used as argument to a format string containing "%s" in request_module(), some sensitive information is leaked to userspace via argument of spawned modprobe process. The first and the third bugs were introduced before the git epoch; the second was introduced in 2722971c (v2.6.17-rc1). To trigger the bug one should have CAP_NET_ADMIN. Signed-off-by: Vasiliy Kulikov <segoon@openwall.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
FuzzStream(ConnectionManagerImpl& conn_manager, FuzzConfig& config, const HeaderMap& request_headers, test::common::http::HeaderStatus decode_header_status, bool end_stream) : conn_manager_(conn_manager), config_(config) { config_.newStream(); request_state_ = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; response_state_ = StreamState::PendingHeaders; decoder_filter_ = config.decoder_filter_; encoder_filter_ = config.encoder_filter_; EXPECT_CALL(*config_.codec_, dispatch(_)) .WillOnce(InvokeWithoutArgs([this, &request_headers, end_stream] { decoder_ = &conn_manager_.newStream(encoder_); auto headers = std::make_unique<TestRequestHeaderMapImpl>(request_headers); if (headers->Method() == nullptr) { headers->setReferenceKey(Headers::get().Method, "GET"); } if (headers->Host() != nullptr && !HeaderUtility::authorityIsValid(headers->getHostValue())) { // Sanitize host header so we don't fail at ASSERTs that verify header sanity checks // which should have been performed by the codec. headers->setHost(Fuzz::replaceInvalidHostCharacters(headers->getHostValue())); } // If sendLocalReply is called: ON_CALL(encoder_, encodeHeaders(_, true)) .WillByDefault(Invoke([this](const ResponseHeaderMap&, bool end_stream) -> void { response_state_ = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; })); decoder_->decodeHeaders(std::move(headers), end_stream); return Http::okStatus(); })); ON_CALL(*decoder_filter_, decodeHeaders(_, _)) .WillByDefault( InvokeWithoutArgs([this, decode_header_status]() -> Http::FilterHeadersStatus { header_status_ = fromHeaderStatus(decode_header_status); return *header_status_; })); fakeOnData(); FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); }
0
[ "CWE-22" ]
envoy
5333b928d8bcffa26ab19bf018369a835f697585
64,521,952,910,200,740,000,000,000,000,000,000,000
40
Implement handling of escaped slash characters in URL path Fixes: CVE-2021-29492 Signed-off-by: Yan Avlasov <yavlasov@google.com>
static void cgroup_pidlist_stop(struct seq_file *s, void *v) { struct kernfs_open_file *of = s->private; struct cgroup_file_ctx *ctx = of->priv; struct cgroup_pidlist *l = ctx->procs1.pidlist; if (l) mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, CGROUP_PIDLIST_DESTROY_DELAY); mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex); }
0
[ "CWE-287", "CWE-269" ]
linux
24f6008564183aa120d07c03d9289519c2fe02af
267,447,119,341,283,860,000,000,000,000,000,000,000
11
cgroup-v1: Require capabilities to set release_agent The cgroup release_agent is called with call_usermodehelper. The function call_usermodehelper starts the release_agent with a full set fo capabilities. Therefore require capabilities when setting the release_agaent. Reported-by: Tabitha Sable <tabitha.c.sable@gmail.com> Tested-by: Tabitha Sable <tabitha.c.sable@gmail.com> Fixes: 81a6a5cdd2c5 ("Task Control Groups: automatic userspace notification of idle cgroups") Cc: stable@vger.kernel.org # v2.6.24+ Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: Tejun Heo <tj@kernel.org>
static int imap_ac_add(struct Account *a, struct Mailbox *m) { struct ImapAccountData *adata = a->adata; if (!adata) { struct ConnAccount cac = { { 0 } }; char mailbox[PATH_MAX]; if (imap_parse_path(mailbox_path(m), &cac, mailbox, sizeof(mailbox)) < 0) return -1; adata = imap_adata_new(a); adata->conn = mutt_conn_new(&cac); if (!adata->conn) { imap_adata_free((void **) &adata); return -1; } mutt_account_hook(m->realpath); if (imap_login(adata) < 0) { imap_adata_free((void **) &adata); return -1; } a->adata = adata; a->adata_free = imap_adata_free; } if (!m->mdata) { struct Url *url = url_parse(mailbox_path(m)); struct ImapMboxData *mdata = imap_mdata_new(adata, url->path); /* fixup path and realpath, mainly to replace / by /INBOX */ char buf[1024]; imap_qualify_path(buf, sizeof(buf), &adata->conn->account, mdata->name); mutt_buffer_strcpy(&m->pathbuf, buf); mutt_str_replace(&m->realpath, mailbox_path(m)); m->mdata = mdata; m->mdata_free = imap_mdata_free; url_free(&url); } return 0; }
0
[ "CWE-522", "CWE-287", "CWE-755" ]
neomutt
9c36717a3e2af1f2c1b7242035455ec8112b4b06
105,289,729,712,083,920,000,000,000,000,000,000,000
49
imap: close connection on all failures Thanks to Gabriel Salles-Loustau for spotting the problem. Co-authored-by: Kevin McCarthy <kevin@8t8.us>
static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, int page_end) { int nr = page_end - page_start; lockdep_assert_held(&pcpu_lock); bitmap_set(chunk->populated, page_start, nr); chunk->nr_populated += nr; pcpu_nr_empty_pop_pages += nr; }
0
[]
linux
4f996e234dad488e5d9ba0858bc1bae12eff82c3
56,926,679,844,767,440,000,000,000,000,000,000,000
11
percpu: fix synchronization between chunk->map_extend_work and chunk destruction Atomic allocations can trigger async map extensions which is serviced by chunk->map_extend_work. pcpu_balance_work which is responsible for destroying idle chunks wasn't synchronizing properly against chunk->map_extend_work and may end up freeing the chunk while the work item is still in flight. This patch fixes the bug by rolling async map extension operations into pcpu_balance_work. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-and-tested-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Reported-by: Vlastimil Babka <vbabka@suse.cz> Reported-by: Sasha Levin <sasha.levin@oracle.com> Cc: stable@vger.kernel.org # v3.18+ Fixes: 9c824b6a172c ("percpu: make sure chunk->map array has available space")
std::string AsyncSSLSocket::getSSLClientSupportedVersions() const { if (!parseClientHello_) { return ""; } return folly::join(":", clientHelloInfo_->clientHelloSupportedVersions_); }
0
[ "CWE-125" ]
folly
c321eb588909646c15aefde035fd3133ba32cdee
180,978,535,250,031,300,000,000,000,000,000,000,000
6
Handle close_notify as standard writeErr in AsyncSSLSocket. Summary: Fixes CVE-2019-11934 Reviewed By: mingtaoy Differential Revision: D18020613 fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836
translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ipt_replace *repl) { struct xt_percpu_counter_alloc_state alloc_state = { 0 }; struct ipt_entry *iter; unsigned int *offsets; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } offsets = xt_alloc_entry_offsets(newinfo->number); if (!offsets) return -ENOMEM; i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) goto out_free; if (i < repl->num_entries) offsets[i] = (void *)iter - entry0; ++i; if (strcmp(ipt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } ret = -EINVAL; if (i != repl->num_entries) goto out_free; ret = xt_check_table_hooks(newinfo, repl->valid_hooks); if (ret) goto out_free; if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) { ret = -ELOOP; goto out_free; } kvfree(offsets); /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, net, repl->name, repl->size, &alloc_state); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter, net); } return ret; } return ret; out_free: kvfree(offsets); return ret; }
0
[ "CWE-787" ]
linux
b29c457a6511435960115c0f548c4360d5f4801d
9,446,357,104,697,793,000,000,000,000,000,000,000
77
netfilter: x_tables: fix compat match/target pad out-of-bound write xt_compat_match/target_from_user doesn't check that zeroing the area to start of next rule won't write past end of allocated ruleset blob. Remove this code and zero the entire blob beforehand. Reported-by: syzbot+cfc0247ac173f597aaaa@syzkaller.appspotmail.com Reported-by: Andy Nguyen <theflow@google.com> Fixes: 9fa492cdc160c ("[NETFILTER]: x_tables: simplify compat API") Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; }
0
[ "CWE-284", "CWE-264" ]
linux
bfdc0b497faa82a0ba2f9dddcf109231dd519fcc
98,773,626,931,543,110,000,000,000,000,000,000,000
5
sysctl: restrict write access to dmesg_restrict When dmesg_restrict is set to 1 CAP_SYS_ADMIN is needed to read the kernel ring buffer. But a root user without CAP_SYS_ADMIN is able to reset dmesg_restrict to 0. This is an issue when e.g. LXC (Linux Containers) are used and complete user space is running without CAP_SYS_ADMIN. A unprivileged and jailed root user can bypass the dmesg_restrict protection. With this patch writing to dmesg_restrict is only allowed when root has CAP_SYS_ADMIN. Signed-off-by: Richard Weinberger <richard@nod.at> Acked-by: Dan Rosenberg <drosenberg@vsecurity.com> Acked-by: Serge E. Hallyn <serge@hallyn.com> Cc: Eric Paris <eparis@redhat.com> Cc: Kees Cook <kees.cook@canonical.com> Cc: James Morris <jmorris@namei.org> Cc: Eugene Teo <eugeneteo@kernel.org> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
irqreturn_t xen_debug_interrupt(int irq, void *dev_id) { struct shared_info *sh = HYPERVISOR_shared_info; int cpu = smp_processor_id(); xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); int i; unsigned long flags; static DEFINE_SPINLOCK(debug_lock); struct vcpu_info *v; spin_lock_irqsave(&debug_lock, flags); printk("\nvcpu %d\n ", cpu); for_each_online_cpu(i) { int pending; v = per_cpu(xen_vcpu, i); pending = (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask; printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i, pending, v->evtchn_upcall_pending, (int)(sizeof(v->evtchn_pending_sel)*2), v->evtchn_pending_sel); } v = per_cpu(xen_vcpu, cpu); printk("\npending:\n "); for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) printk("%0*"PRI_xen_ulong"%s", (int)sizeof(sh->evtchn_pending[0])*2, sh->evtchn_pending[i], i % 8 == 0 ? "\n " : " "); printk("\nglobal mask:\n "); for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(sh->evtchn_mask[0])*2), sh->evtchn_mask[i], i % 8 == 0 ? "\n " : " "); printk("\nglobally unmasked:\n "); for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(sh->evtchn_mask[0])*2), sh->evtchn_pending[i] & ~sh->evtchn_mask[i], i % 8 == 0 ? "\n " : " "); printk("\nlocal cpu%d mask:\n ", cpu); for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--) printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2), cpu_evtchn[i], i % 8 == 0 ? "\n " : " "); printk("\nlocally unmasked:\n "); for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) { xen_ulong_t pending = sh->evtchn_pending[i] & ~sh->evtchn_mask[i] & cpu_evtchn[i]; printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(sh->evtchn_mask[0])*2), pending, i % 8 == 0 ? "\n " : " "); } printk("\npending list:\n"); for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) { if (sync_test_bit(i, BM(sh->evtchn_pending))) { int word_idx = i / BITS_PER_EVTCHN_WORD; printk(" %d: event %d -> irq %d%s%s%s\n", cpu_from_evtchn(i), i, get_evtchn_to_irq(i), sync_test_bit(word_idx, BM(&v->evtchn_pending_sel)) ? "" : " l2-clear", !sync_test_bit(i, BM(sh->evtchn_mask)) ? "" : " globally-masked", sync_test_bit(i, BM(cpu_evtchn)) ? "" : " locally-masked"); } } spin_unlock_irqrestore(&debug_lock, flags); return IRQ_HANDLED; }
0
[ "CWE-400", "CWE-703" ]
linux
e99502f76271d6bc4e374fe368c50c67a1fd3070
85,988,948,974,009,600,000,000,000,000,000,000,000
83
xen/events: defer eoi in case of excessive number of events In case rogue guests are sending events at high frequency it might happen that xen_evtchn_do_upcall() won't stop processing events in dom0. As this is done in irq handling a crash might be the result. In order to avoid that, delay further inter-domain events after some time in xen_evtchn_do_upcall() by forcing eoi processing into a worker on the same cpu, thus inhibiting new events coming in. The time after which eoi processing is to be delayed is configurable via a new module parameter "event_loop_timeout" which specifies the maximum event loop time in jiffies (default: 2, the value was chosen after some tests showing that a value of 2 was the lowest with an only slight drop of dom0 network throughput while multiple guests performed an event storm). How long eoi processing will be delayed can be specified via another parameter "event_eoi_delay" (again in jiffies, default 10, again the value was chosen after testing with different delay values). This is part of XSA-332. Cc: stable@vger.kernel.org Reported-by: Julien Grall <julien@xen.org> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Reviewed-by: Wei Liu <wl@xen.org>
static void do_int(struct kernel_vm86_regs *regs, int i, unsigned char __user *ssp, unsigned short sp) { unsigned long __user *intr_ptr; unsigned long segoffs; if (regs->pt.cs == BIOSSEG) goto cannot_handle; if (is_revectored(i, &KVM86->int_revectored)) goto cannot_handle; if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) goto cannot_handle; intr_ptr = (unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) goto cannot_handle; pushw(ssp, sp, get_vflags(regs), cannot_handle); pushw(ssp, sp, regs->pt.cs, cannot_handle); pushw(ssp, sp, IP(regs), cannot_handle); regs->pt.cs = segoffs >> 16; SP(regs) -= 6; IP(regs) = segoffs & 0xffff; clear_TF(regs); clear_IF(regs); clear_AC(regs); return; cannot_handle: return_to_32bit(regs, VM86_INTx + (i << 8)); }
0
[ "CWE-264" ]
linux-2.6
1a5a9906d4e8d1976b701f889d8f35d54b928f25
52,642,091,771,813,720,000,000,000,000,000,000,000
31
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [akpm@linux-foundation.org: checkpatch fixes] Reported-by: Ulrich Obergfell <uobergfe@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Jones <davej@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: <stable@vger.kernel.org> [2.6.38+] Cc: Mark Salter <msalter@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
evutil_format_sockaddr_port_(const struct sockaddr *sa, char *out, size_t outlen) { char b[128]; const char *res=NULL; int port; if (sa->sa_family == AF_INET) { const struct sockaddr_in *sin = (const struct sockaddr_in*)sa; res = evutil_inet_ntop(AF_INET, &sin->sin_addr,b,sizeof(b)); port = ntohs(sin->sin_port); if (res) { evutil_snprintf(out, outlen, "%s:%d", b, port); return out; } } else if (sa->sa_family == AF_INET6) { const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6*)sa; res = evutil_inet_ntop(AF_INET6, &sin6->sin6_addr,b,sizeof(b)); port = ntohs(sin6->sin6_port); if (res) { evutil_snprintf(out, outlen, "[%s]:%d", b, port); return out; } } evutil_snprintf(out, outlen, "<addr with socktype %d>", (int)sa->sa_family); return out; }
0
[ "CWE-119", "CWE-787" ]
libevent
329acc18a0768c21ba22522f01a5c7f46cacc4d5
267,648,879,629,467,160,000,000,000,000,000,000,000
27
evutil_parse_sockaddr_port(): fix buffer overflow @asn-the-goblin-slayer: "Length between '[' and ']' is cast to signed 32 bit integer on line 1815. Is the length is more than 2<<31 (INT_MAX), len will hold a negative value. Consequently, it will pass the check at line 1816. Segfault happens at line 1819. Generate a resolv.conf with generate-resolv.conf, then compile and run poc.c. See entry-functions.txt for functions in tor that might be vulnerable. Please credit 'Guido Vranken' for this discovery through the Tor bug bounty program." Reproducer for gdb (https://gist.github.com/azat/be2b0d5e9417ba0dfe2c): start p (1ULL<<31)+1ULL # $1 = 2147483649 p malloc(sizeof(struct sockaddr)) # $2 = (void *) 0x646010 p malloc(sizeof(int)) # $3 = (void *) 0x646030 p malloc($1) # $4 = (void *) 0x7fff76a2a010 p memset($4, 1, $1) # $5 = 1990369296 p (char *)$4 # $6 = 0x7fff76a2a010 '\001' <repeats 200 times>... set $6[0]='[' set $6[$1]=']' p evutil_parse_sockaddr_port($4, $2, $3) # $7 = -1 Before: $ gdb bin/http-connect < gdb (gdb) $1 = 2147483649 (gdb) (gdb) $2 = (void *) 0x646010 (gdb) (gdb) $3 = (void *) 0x646030 (gdb) (gdb) $4 = (void *) 0x7fff76a2a010 (gdb) (gdb) $5 = 1990369296 (gdb) (gdb) $6 = 0x7fff76a2a010 '\001' <repeats 200 times>... (gdb) (gdb) (gdb) (gdb) Program received signal SIGSEGV, Segmentation fault. __memcpy_sse2_unaligned () at memcpy-sse2-unaligned.S:36 After: $ gdb bin/http-connect < gdb (gdb) $1 = 2147483649 (gdb) (gdb) $2 = (void *) 0x646010 (gdb) (gdb) $3 = (void *) 0x646030 (gdb) (gdb) $4 = (void *) 0x7fff76a2a010 (gdb) (gdb) $5 = 1990369296 (gdb) (gdb) $6 = 0x7fff76a2a010 '\001' <repeats 200 times>... (gdb) (gdb) (gdb) (gdb) $7 = -1 (gdb) (gdb) quit Fixes: #318
static void evtchn_2l_clear_pending(evtchn_port_t port) { struct shared_info *s = HYPERVISOR_shared_info; sync_clear_bit(port, BM(&s->evtchn_pending[0])); }
0
[ "CWE-400", "CWE-703" ]
linux
e99502f76271d6bc4e374fe368c50c67a1fd3070
159,572,197,545,793,900,000,000,000,000,000,000,000
5
xen/events: defer eoi in case of excessive number of events In case rogue guests are sending events at high frequency it might happen that xen_evtchn_do_upcall() won't stop processing events in dom0. As this is done in irq handling a crash might be the result. In order to avoid that, delay further inter-domain events after some time in xen_evtchn_do_upcall() by forcing eoi processing into a worker on the same cpu, thus inhibiting new events coming in. The time after which eoi processing is to be delayed is configurable via a new module parameter "event_loop_timeout" which specifies the maximum event loop time in jiffies (default: 2, the value was chosen after some tests showing that a value of 2 was the lowest with an only slight drop of dom0 network throughput while multiple guests performed an event storm). How long eoi processing will be delayed can be specified via another parameter "event_eoi_delay" (again in jiffies, default 10, again the value was chosen after testing with different delay values). This is part of XSA-332. Cc: stable@vger.kernel.org Reported-by: Julien Grall <julien@xen.org> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Reviewed-by: Wei Liu <wl@xen.org>
ves_icall_get_property_info (MonoReflectionProperty *property, MonoPropertyInfo *info, PInfo req_info) { MonoDomain *domain = mono_object_domain (property); MONO_ARCH_SAVE_REGS; if ((req_info & PInfo_ReflectedType) != 0) MONO_STRUCT_SETREF (info, parent, mono_type_get_object (domain, &property->klass->byval_arg)); else if ((req_info & PInfo_DeclaringType) != 0) MONO_STRUCT_SETREF (info, parent, mono_type_get_object (domain, &property->property->parent->byval_arg)); if ((req_info & PInfo_Name) != 0) MONO_STRUCT_SETREF (info, name, mono_string_new (domain, property->property->name)); if ((req_info & PInfo_Attributes) != 0) info->attrs = property->property->attrs; if ((req_info & PInfo_GetMethod) != 0) MONO_STRUCT_SETREF (info, get, property->property->get ? mono_method_get_object (domain, property->property->get, property->klass): NULL); if ((req_info & PInfo_SetMethod) != 0) MONO_STRUCT_SETREF (info, set, property->property->set ? mono_method_get_object (domain, property->property->set, property->klass): NULL); /* * There may be other methods defined for properties, though, it seems they are not exposed * in the reflection API */ }
0
[ "CWE-264" ]
mono
035c8587c0d8d307e45f1b7171a0d337bb451f1e
325,482,180,519,141,460,000,000,000,000,000,000,000
29
Allow only primitive types/enums in RuntimeHelpers.InitializeArray ().
absl::string_view pathAndQueryParams() { return path_and_query_params_; }
0
[]
envoy
3b5acb2f43548862dadb243de7cf3994986a8e04
250,749,299,539,440,500,000,000,000,000,000,000,000
1
http, url: Bring back chromium_url and http_parser_parse_url (#198) * Revert GURL as HTTP URL parser utility This reverts: 1. commit c9c4709c844b90b9bb2935d784a428d667c9df7d 2. commit d828958b591a6d79f4b5fa608ece9962b7afbe32 3. commit 2d69e30c51f2418faf267aaa6c1126fce9948c62 Signed-off-by: Dhi Aurrahman <dio@tetrate.io>
onig_compile(regex_t* reg, const UChar* pattern, const UChar* pattern_end, OnigErrorInfo* einfo) { #define COMPILE_INIT_SIZE 20 int r, init_size; Node* root; ScanEnv scan_env; #ifdef USE_CALL UnsetAddrList uslist; #endif root = 0; if (IS_NOT_NULL(einfo)) { einfo->enc = reg->enc; einfo->par = (UChar* )NULL; } #ifdef ONIG_DEBUG print_enc_string(stderr, reg->enc, pattern, pattern_end); #endif if (reg->alloc == 0) { init_size = (int )(pattern_end - pattern) * 2; if (init_size <= 0) init_size = COMPILE_INIT_SIZE; r = BB_INIT(reg, init_size); if (r != 0) goto end; } else reg->used = 0; reg->num_mem = 0; reg->num_repeat = 0; reg->num_null_check = 0; reg->repeat_range_alloc = 0; reg->repeat_range = (OnigRepeatRange* )NULL; r = onig_parse_tree(&root, pattern, pattern_end, reg, &scan_env); if (r != 0) goto err; /* mixed use named group and no-named group */ if (scan_env.num_named > 0 && IS_SYNTAX_BV(scan_env.syntax, ONIG_SYN_CAPTURE_ONLY_NAMED_GROUP) && ! ONIG_IS_OPTION_ON(reg->options, ONIG_OPTION_CAPTURE_GROUP)) { if (scan_env.num_named != scan_env.num_mem) r = disable_noname_group_capture(&root, reg, &scan_env); else r = numbered_ref_check(root); if (r != 0) goto err; } r = check_backrefs(root, &scan_env); if (r != 0) goto err; #ifdef USE_CALL if (scan_env.num_call > 0) { r = unset_addr_list_init(&uslist, scan_env.num_call); if (r != 0) goto err; scan_env.unset_addr_list = &uslist; r = setup_call(root, &scan_env, 0); if (r != 0) goto err_unset; r = setup_call2(root); if (r != 0) goto err_unset; r = recursive_call_check_trav(root, &scan_env, 0); if (r < 0) goto err_unset; r = infinite_recursive_call_check_trav(root, &scan_env); if (r != 0) goto err_unset; setup_called_state(root, 0); } reg->num_call = scan_env.num_call; #endif r = setup_tree(root, reg, 0, &scan_env); if (r != 0) goto err_unset; #ifdef ONIG_DEBUG_PARSE print_tree(stderr, root); #endif reg->capture_history = scan_env.capture_history; reg->bt_mem_start = scan_env.bt_mem_start; reg->bt_mem_start |= reg->capture_history; if (IS_FIND_CONDITION(reg->options)) MEM_STATUS_ON_ALL(reg->bt_mem_end); else { reg->bt_mem_end = scan_env.bt_mem_end; reg->bt_mem_end |= reg->capture_history; } reg->bt_mem_start |= reg->bt_mem_end; clear_optimize_info(reg); #ifndef ONIG_DONT_OPTIMIZE r = set_optimize_info_from_tree(root, reg, &scan_env); if (r != 0) goto err_unset; #endif if (IS_NOT_NULL(scan_env.mem_env_dynamic)) { xfree(scan_env.mem_env_dynamic); scan_env.mem_env_dynamic = (MemEnv* )NULL; } r = compile_tree(root, reg, &scan_env); if (r == 0) { if (scan_env.keep_num > 0) { r = add_opcode(reg, OP_UPDATE_VAR); if (r != 0) goto err; r = add_update_var_type(reg, UPDATE_VAR_KEEP_FROM_STACK_LAST); if (r != 0) goto err; r = add_mem_num(reg, 0 /* not used */); if (r != 0) goto err; } r = add_opcode(reg, OP_END); #ifdef USE_CALL if (scan_env.num_call > 0) { r = fix_unset_addr_list(&uslist, reg); unset_addr_list_end(&uslist); if (r != 0) goto err; } #endif if ((reg->num_repeat != 0) || (reg->bt_mem_end != 0) #ifdef USE_CALLOUT || (IS_NOT_NULL(reg->extp) && reg->extp->callout_num != 0) #endif ) reg->stack_pop_level = STACK_POP_LEVEL_ALL; else { if (reg->bt_mem_start != 0) reg->stack_pop_level = STACK_POP_LEVEL_MEM_START; else reg->stack_pop_level = STACK_POP_LEVEL_FREE; } } #ifdef USE_CALL else if (scan_env.num_call > 0) { unset_addr_list_end(&uslist); } #endif onig_node_free(root); #ifdef ONIG_DEBUG_COMPILE onig_print_names(stderr, reg); onig_print_compiled_byte_code_list(stderr, reg); #endif end: return r; err_unset: #ifdef USE_CALL if (scan_env.num_call > 0) { unset_addr_list_end(&uslist); } #endif err: if (IS_NOT_NULL(scan_env.error)) { if (IS_NOT_NULL(einfo)) { einfo->par = scan_env.error; einfo->par_end = scan_env.error_end; } } onig_node_free(root); if (IS_NOT_NULL(scan_env.mem_env_dynamic)) xfree(scan_env.mem_env_dynamic); return r; }
0
[ "CWE-125" ]
oniguruma
4d461376bd85e7994835677b2ff453a43c49cd28
155,608,384,999,261,100,000,000,000,000,000,000,000
171
don't expand string case folds to alternatives if code length == 1 and byte length is same
load_public_key (MonoArray *pkey, MonoDynamicImage *assembly) { gsize len; guint32 token = 0; char blob_size [6]; char *b = blob_size; if (!pkey) return token; len = mono_array_length (pkey); mono_metadata_encode_value (len, b, &b); token = mono_image_add_stream_data (&assembly->blob, blob_size, b - blob_size); mono_image_add_stream_data (&assembly->blob, mono_array_addr (pkey, char, 0), len); assembly->public_key = g_malloc (len); memcpy (assembly->public_key, mono_array_addr (pkey, char, 0), len); assembly->public_key_len = len; /* Special case: check for ECMA key (16 bytes) */ if ((len == MONO_ECMA_KEY_LENGTH) && mono_is_ecma_key (mono_array_addr (pkey, char, 0), len)) { /* In this case we must reserve 128 bytes (1024 bits) for the signature */ assembly->strong_name_size = MONO_DEFAULT_PUBLIC_KEY_LENGTH; } else if (len >= MONO_PUBLIC_KEY_HEADER_LENGTH + MONO_MINIMUM_PUBLIC_KEY_LENGTH) { /* minimum key size (in 2.0) is 384 bits */ assembly->strong_name_size = len - MONO_PUBLIC_KEY_HEADER_LENGTH; } else { /* FIXME - verifier */ g_warning ("Invalid public key length: %d bits (total: %d)", (int)MONO_PUBLIC_KEY_BIT_SIZE (len), (int)len); assembly->strong_name_size = MONO_DEFAULT_PUBLIC_KEY_LENGTH; /* to be safe */ } assembly->strong_name = g_malloc0 (assembly->strong_name_size); return token; }
0
[ "CWE-20" ]
mono
4905ef1130feb26c3150b28b97e4a96752e0d399
5,521,645,104,606,552,000,000,000,000,000,000,000
34
Handle invalid instantiation of generic methods. * verify.c: Add new function to internal verifier API to check method instantiations. * reflection.c (mono_reflection_bind_generic_method_parameters): Check the instantiation before returning it. Fixes #655847
dissect_kafka_controlled_shutdown_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset, kafka_api_version_t api_version) { /* error_code */ offset = dissect_kafka_error(tvb, pinfo, tree, offset); /* [partition_remaining] */ offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version >= 3, api_version, &dissect_kafka_controlled_shutdown_response_partition_remaining, NULL); if (api_version >= 3) { offset = dissect_kafka_tagged_fields(tvb, pinfo, tree, offset, 0); } return offset; }
0
[ "CWE-401" ]
wireshark
f4374967bbf9c12746b8ec3cd54dddada9dd353e
295,598,662,920,714,950,000,000,000,000,000,000,000
16
Kafka: Limit our decompression size. Don't assume that the Internet has our best interests at heart when it gives us the size of our decompression buffer. Assign an arbitrary limit of 50 MB. This fixes #16739 in that it takes care of ** (process:17681): WARNING **: 20:03:07.440: Dissector bug, protocol Kafka, in packet 31: ../epan/proto.c:7043: failed assertion "end >= fi->start" which is different from the original error output. It looks like *that* might have taken care of in one of the other recent Kafka bug fixes. The decompression routines return a success or failure status. Use gbooleans instead of ints for that.
const GF_FilterRegister *av1dmx_register(GF_FilterSession *session) { return NULL; }
0
[ "CWE-476", "CWE-787" ]
gpac
13dad7d5ef74ca2e6fe4010f5b03eb12e9bbe0ec
195,406,079,736,280,930,000,000,000,000,000,000,000
4
fixed #1719
size_t Magick::Image::rows(void) const { return(constImage()->rows); }
0
[ "CWE-416" ]
ImageMagick
8c35502217c1879cb8257c617007282eee3fe1cc
267,593,690,982,605,430,000,000,000,000,000,000,000
4
Added missing return to avoid use after free.
int r_jwe_set_payload(jwe_t * jwe, const unsigned char * payload, size_t payload_len) { int ret; if (jwe != NULL) { o_free(jwe->payload); if (payload != NULL && payload_len) { if ((jwe->payload = o_malloc(payload_len)) != NULL) { memcpy(jwe->payload, payload, payload_len); jwe->payload_len = payload_len; ret = RHN_OK; } else { y_log_message(Y_LOG_LEVEL_ERROR, "r_jwe_set_payload - Error allocating resources for payload"); ret = RHN_ERROR_MEMORY; } } else { jwe->payload = NULL; jwe->payload_len = 0; ret = RHN_OK; } } else { ret = RHN_ERROR_PARAM; } return ret; }
0
[ "CWE-787" ]
rhonabwy
b4c2923a1ba4fabf9b55a89244127e153a3e549b
125,953,680,710,333,100,000,000,000,000,000,000,000
24
Fix buffer overflow on r_jwe_aesgcm_key_unwrap
static int ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t *logical, ext4_fsblk_t *phys, struct ext4_extent **ret_ex) { struct buffer_head *bh = NULL; struct ext4_extent_header *eh; struct ext4_extent_idx *ix; struct ext4_extent *ex; ext4_fsblk_t block; int depth; /* Note, NOT eh_depth; depth from top of tree */ int ee_len; if (unlikely(path == NULL)) { EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); return -EIO; } depth = path->p_depth; *phys = 0; if (depth == 0 && path->p_ext == NULL) return 0; /* usually extent in the path covers blocks smaller * then *logical, but it can be that extent is the * first one in the file */ ex = path[depth].p_ext; ee_len = ext4_ext_get_actual_len(ex); if (*logical < le32_to_cpu(ex->ee_block)) { if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { EXT4_ERROR_INODE(inode, "first_extent(path[%d].p_hdr) != ex", depth); return -EIO; } while (--depth >= 0) { ix = path[depth].p_idx; if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { EXT4_ERROR_INODE(inode, "ix != EXT_FIRST_INDEX *logical %d!", *logical); return -EIO; } } goto found_extent; } if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { EXT4_ERROR_INODE(inode, "logical %d < ee_block %d + ee_len %d!", *logical, le32_to_cpu(ex->ee_block), ee_len); return -EIO; } if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { /* next allocated block in this leaf */ ex++; goto found_extent; } /* go up and search for index to the right */ while (--depth >= 0) { ix = path[depth].p_idx; if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) goto got_index; } /* we've gone up to the root and found no index to the right */ return 0; got_index: /* we've found index to the right, let's * follow it and find the closest allocated * block to the right */ ix++; block = ext4_idx_pblock(ix); while (++depth < path->p_depth) { /* subtract from p_depth to get proper eh_depth */ bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); if (IS_ERR(bh)) return PTR_ERR(bh); eh = ext_block_hdr(bh); ix = EXT_FIRST_INDEX(eh); block = ext4_idx_pblock(ix); put_bh(bh); } bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); if (IS_ERR(bh)) return PTR_ERR(bh); eh = ext_block_hdr(bh); ex = EXT_FIRST_EXTENT(eh); found_extent: *logical = le32_to_cpu(ex->ee_block); *phys = ext4_ext_pblock(ex); *ret_ex = ex; if (bh) put_bh(bh); return 0; }
0
[ "CWE-17" ]
linux
0f2af21aae11972fa924374ddcf52e88347cf5a8
265,589,636,842,585,940,000,000,000,000,000,000,000
102
ext4: allocate entire range in zero range Currently there is a bug in zero range code which causes zero range calls to only allocate block aligned portion of the range, while ignoring the rest in some cases. In some cases, namely if the end of the range is past i_size, we do attempt to preallocate the last nonaligned block. However this might cause kernel to BUG() in some carefully designed zero range requests on setups where page size > block size. Fix this problem by first preallocating the entire range, including the nonaligned edges and converting the written extents to unwritten in the next step. This approach will also give us the advantage of having the range to be as linearly contiguous as possible. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
static int do_arpt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: #ifdef CONFIG_COMPAT if (in_compat_syscall()) ret = compat_do_replace(sock_net(sk), arg, len); else #endif ret = do_replace(sock_net(sk), arg, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), arg, len); break; default: ret = -EINVAL; } return ret; }
0
[ "CWE-787" ]
linux
b29c457a6511435960115c0f548c4360d5f4801d
257,390,121,143,559,100,000,000,000,000,000,000,000
28
netfilter: x_tables: fix compat match/target pad out-of-bound write xt_compat_match/target_from_user doesn't check that zeroing the area to start of next rule won't write past end of allocated ruleset blob. Remove this code and zero the entire blob beforehand. Reported-by: syzbot+cfc0247ac173f597aaaa@syzkaller.appspotmail.com Reported-by: Andy Nguyen <theflow@google.com> Fixes: 9fa492cdc160c ("[NETFILTER]: x_tables: simplify compat API") Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
static struct ldb_val map_objectclass_convert_remote(struct ldb_module *module, void *mem_ctx, const struct ldb_val *val) { const struct ldb_map_context *data = map_get_context(module); const char *name = (char *)val->data; const struct ldb_map_objectclass *map = map_objectclass_find_remote(data, name); struct ldb_val newval; if (map) { newval.data = (uint8_t*)talloc_strdup(mem_ctx, map->local_name); newval.length = strlen((char *)newval.data); return newval; } return ldb_val_dup(mem_ctx, val); }
0
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
126,572,458,511,426,970,000,000,000,000,000,000,000
15
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
mono_stack_walk (MonoStackWalk func, gpointer user_data) { stack_walk (func, TRUE, user_data); }
0
[]
mono
8e890a3bf80a4620e417814dc14886b1bbd17625
44,674,135,228,392,560,000,000,000,000,000,000,000
4
Search for dllimported shared libs in the base directory, not cwd. * loader.c: we don't search the current directory anymore for shared libraries referenced in DllImport attributes, as it has a slight security risk. We search in the same directory where the referencing image was loaded from, instead. Fixes bug# 641915.
qemuProcessInitPasswords(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob) { int ret = 0; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); size_t i; for (i = 0; i < vm->def->ngraphics; ++i) { virDomainGraphicsDefPtr graphics = vm->def->graphics[i]; if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) { ret = qemuDomainChangeGraphicsPasswords(driver, vm, VIR_DOMAIN_GRAPHICS_TYPE_VNC, &graphics->data.vnc.auth, cfg->vncPassword, asyncJob); } else if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) { ret = qemuDomainChangeGraphicsPasswords(driver, vm, VIR_DOMAIN_GRAPHICS_TYPE_SPICE, &graphics->data.spice.auth, cfg->spicePassword, asyncJob); } if (ret < 0) return ret; } return ret; }
0
[ "CWE-416" ]
libvirt
1ac703a7d0789e46833f4013a3876c2e3af18ec7
265,873,482,028,265,740,000,000,000,000,000,000,000
30
qemu: Add missing lock in qemuProcessHandleMonitorEOF qemuMonitorUnregister will be called in multiple threads (e.g. threads in rpc worker pool and the vm event thread). In some cases, it isn't protected by the monitor lock, which may lead to call g_source_unref more than one time and a use-after-free problem eventually. Add the missing lock in qemuProcessHandleMonitorEOF (which is the only position missing lock of monitor I found). Suggested-by: Michal Privoznik <mprivozn@redhat.com> Signed-off-by: Peng Liang <liangpeng10@huawei.com> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, int ref_change) { struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode); struct ext4_iloc iloc; s64 ref_count; u32 hash; int ret; inode_lock(ea_inode); ret = ext4_reserve_inode_write(handle, ea_inode, &iloc); if (ret) { iloc.bh = NULL; goto out; } ref_count = ext4_xattr_inode_get_ref(ea_inode); ref_count += ref_change; ext4_xattr_inode_set_ref(ea_inode, ref_count); if (ref_change > 0) { WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld", ea_inode->i_ino, ref_count); if (ref_count == 1) { WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u", ea_inode->i_ino, ea_inode->i_nlink); set_nlink(ea_inode, 1); ext4_orphan_del(handle, ea_inode); if (ea_inode_cache) { hash = ext4_xattr_inode_get_hash(ea_inode); mb_cache_entry_create(ea_inode_cache, GFP_NOFS, hash, ea_inode->i_ino, true /* reusable */); } } } else { WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld", ea_inode->i_ino, ref_count); if (ref_count == 0) { WARN_ONCE(ea_inode->i_nlink != 1, "EA inode %lu i_nlink=%u", ea_inode->i_ino, ea_inode->i_nlink); clear_nlink(ea_inode); ext4_orphan_add(handle, ea_inode); if (ea_inode_cache) { hash = ext4_xattr_inode_get_hash(ea_inode); mb_cache_entry_delete(ea_inode_cache, hash, ea_inode->i_ino); } } } ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc); iloc.bh = NULL; if (ret) ext4_warning_inode(ea_inode, "ext4_mark_iloc_dirty() failed ret=%d", ret); out: brelse(iloc.bh); inode_unlock(ea_inode); return ret; }
0
[]
linux
54dd0e0a1b255f115f8647fc6fb93273251b01b9
231,937,443,084,045,950,000,000,000,000,000,000,000
70
ext4: add extra checks to ext4_xattr_block_get() Add explicit checks in ext4_xattr_block_get() just in case the e_value_offs and e_value_size fields in the the xattr block are corrupted in memory after the buffer_verified bit is set on the xattr block. Signed-off-by: Theodore Ts'o <tytso@mit.edu> Cc: stable@kernel.org
static int __net_init xt_net_init(struct net *net) { int i; for (i = 0; i < NFPROTO_NUMPROTO; i++) INIT_LIST_HEAD(&net->xt.tables[i]); return 0; }
0
[ "CWE-119" ]
nf-next
d7591f0c41ce3e67600a982bab6989ef0f07b3ce
305,813,814,546,177,820,000,000,000,000,000,000,000
8
netfilter: x_tables: introduce and use xt_copy_counters_from_user The three variants use same copy&pasted code, condense this into a helper and use that. Make sure info.name is 0-terminated. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, ext4_lblk_t len) { int ret; if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) return fscrypt_zeroout_range(inode, lblk, pblk, len); ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); if (ret > 0) ret = 0; return ret; }
0
[ "CWE-703" ]
linux
ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1
268,172,567,851,335,600,000,000,000,000,000,000,000
14
ext4: check journal inode extents more carefully Currently, system zones just track ranges of block, that are "important" fs metadata (bitmaps, group descriptors, journal blocks, etc.). This however complicates how extent tree (or indirect blocks) can be checked for inodes that actually track such metadata - currently the journal inode but arguably we should be treating quota files or resize inode similarly. We cannot run __ext4_ext_check() on such metadata inodes when loading their extents as that would immediately trigger the validity checks and so we just hack around that and special-case the journal inode. This however leads to a situation that a journal inode which has extent tree of depth at least one can have invalid extent tree that gets unnoticed until ext4_cache_extents() crashes. To overcome this limitation, track inode number each system zone belongs to (0 is used for zones not belonging to any inode). We can then verify inode number matches the expected one when verifying extent tree and thus avoid the false errors. With this there's no need to to special-case journal inode during extent tree checking anymore so remove it. Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode") Reported-by: Wolfgang Frisch <wolfgang.frisch@suse.com> Reviewed-by: Lukas Czerner <lczerner@redhat.com> Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20200728130437.7804-4-jack@suse.cz Signed-off-by: Theodore Ts'o <tytso@mit.edu>
uint32_t writeMessageEnd() { T_VIRTUAL_CALL(); return writeMessageEnd_virt(); }
0
[ "CWE-20" ]
thrift
cfaadcc4adcfde2a8232c62ec89870b73ef40df1
99,823,541,187,222,630,000,000,000,000,000,000,000
4
THRIFT-3231 CPP: Limit recursion depth to 64 Client: cpp Patch: Ben Craig <bencraig@apache.org>
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color; ssize_t count; /* Find the median value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; do { color=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); *pixel=ScaleShortToQuantum((unsigned short) color); }
0
[ "CWE-119", "CWE-787" ]
ImageMagick
025e77fcb2f45b21689931ba3bf74eac153afa48
123,582,758,319,019,700,000,000,000,000,000,000,000
24
https://github.com/ImageMagick/ImageMagick/issues/1615
EXPORTED void mailbox_set_quotaroot(struct mailbox *mailbox, const char *quotaroot) { if (mailbox->h.quotaroot) { if (quotaroot && !strcmp(mailbox->h.quotaroot, quotaroot)) return; /* no change */ xzfree(mailbox->h.quotaroot); } else { if (!quotaroot) return; /* no change */ } if (quotaroot) mailbox->h.quotaroot = xstrdup(quotaroot); /* either way, it's changed, so dirty */ mailbox->header_dirty = 1; }
0
[]
cyrus-imapd
1d6d15ee74e11a9bd745e80be69869e5fb8d64d6
48,301,563,191,643,100,000,000,000,000,000,000,000
18
mailbox.c/reconstruct.c: Add mailbox_mbentry_from_path()
static void automount_dump(Unit *u, FILE *f, const char *prefix) { char time_string[FORMAT_TIMESPAN_MAX]; Automount *a = AUTOMOUNT(u); assert(a); fprintf(f, "%sAutomount State: %s\n" "%sResult: %s\n" "%sWhere: %s\n" "%sDirectoryMode: %04o\n" "%sTimeoutIdleUSec: %s\n", prefix, automount_state_to_string(a->state), prefix, automount_result_to_string(a->result), prefix, a->where, prefix, a->directory_mode, prefix, format_timespan(time_string, FORMAT_TIMESPAN_MAX, a->timeout_idle_usec, USEC_PER_SEC)); }
0
[ "CWE-362" ]
systemd
e7d54bf58789545a9eb0b3964233defa0b007318
277,530,872,908,242,760,000,000,000,000,000,000,000
18
automount: ack automount requests even when already mounted (#5916) If a process accesses an autofs filesystem while systemd is in the middle of starting the mount unit on top of it, it is possible for the autofs_ptype_missing_direct request from the kernel to be received after the mount unit has been fully started: systemd forks and execs mount ... ... access autofs, blocks mount exits ... systemd receives SIGCHLD ... ... kernel sends request systemd receives request ... systemd needs to respond to this request, otherwise the kernel will continue to block access to the mount point.
static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector, struct kvm_segment *kvm_desct) { kvm_desct->base = seg_desc->base0; kvm_desct->base |= seg_desc->base1 << 16; kvm_desct->base |= seg_desc->base2 << 24; kvm_desct->limit = seg_desc->limit0; kvm_desct->limit |= seg_desc->limit << 16; if (seg_desc->g) { kvm_desct->limit <<= 12; kvm_desct->limit |= 0xfff; } kvm_desct->selector = selector; kvm_desct->type = seg_desc->type; kvm_desct->present = seg_desc->p; kvm_desct->dpl = seg_desc->dpl; kvm_desct->db = seg_desc->d; kvm_desct->s = seg_desc->s; kvm_desct->l = seg_desc->l; kvm_desct->g = seg_desc->g; kvm_desct->avl = seg_desc->avl; if (!selector) kvm_desct->unusable = 1; else kvm_desct->unusable = 0; kvm_desct->padding = 0; }
0
[ "CWE-476" ]
linux-2.6
59839dfff5eabca01cc4e20b45797a60a80af8cb
337,166,164,548,148,240,000,000,000,000,000,000,000
27
KVM: x86: check for cr3 validity in ioctl_set_sregs Matt T. Yourst notes that kvm_arch_vcpu_ioctl_set_sregs lacks validity checking for the new cr3 value: "Userspace callers of KVM_SET_SREGS can pass a bogus value of cr3 to the kernel. This will trigger a NULL pointer access in gfn_to_rmap() when userspace next tries to call KVM_RUN on the affected VCPU and kvm attempts to activate the new non-existent page table root. This happens since kvm only validates that cr3 points to a valid guest physical memory page when code *inside* the guest sets cr3. However, kvm currently trusts the userspace caller (e.g. QEMU) on the host machine to always supply a valid page table root, rather than properly validating it along with the rest of the reloaded guest state." http://sourceforge.net/tracker/?func=detail&atid=893831&aid=2687641&group_id=180599 Check for a valid cr3 address in kvm_arch_vcpu_ioctl_set_sregs, triple fault in case of failure. Cc: stable@kernel.org Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
int ci_hdrc_host_init(struct ci13xxx *ci) { struct ci_role_driver *rdrv; if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_HC)) return -ENXIO; rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL); if (!rdrv) return -ENOMEM; rdrv->start = host_start; rdrv->stop = host_stop; rdrv->irq = host_irq; rdrv->name = "host"; ci->roles[CI_ROLE_HOST] = rdrv; ehci_init_driver(&ci_ehci_hc_driver, NULL); return 0; }
0
[ "CWE-119", "CWE-787" ]
linux
929473ea05db455ad88cdc081f2adc556b8dc48f
219,699,230,906,424,800,000,000,000,000,000,000,000
21
usb: chipidea: Allow disabling streaming not only in udc mode When running a scp transfer using a USB/Ethernet adapter the following crash happens: $ scp test.tar.gz fabio@192.168.1.100:/home/fabio fabio@192.168.1.100's password: test.tar.gz 0% 0 0.0KB/s --:-- ETA ------------[ cut here ]------------ WARNING: at net/sched/sch_generic.c:255 dev_watchdog+0x2cc/0x2f0() NETDEV WATCHDOG: eth0 (asix): transmit queue 0 timed out Modules linked in: Backtrace: [<80011c94>] (dump_backtrace+0x0/0x10c) from [<804d3a5c>] (dump_stack+0x18/0x1c) r6:000000ff r5:80412388 r4:80685dc0 r3:80696cc0 [<804d3a44>] (dump_stack+0x0/0x1c) from [<80021868>] (warn_slowpath_common+0x54/0x6c) [<80021814>] (warn_slowpath_common+0x0/0x6c) from [<80021924>] (warn_slowpath_fmt+0x38/0x40) ... Setting SDIS (Stream Disable Mode- bit 4 of USBMODE register) fixes the problem. However, in current code CI13XXX_DISABLE_STREAMING flag is only set in udc mode, so allow disabling streaming also in host mode. Tested on a mx6qsabrelite board. Suggested-by: Peter Chen <peter.chen@freescale.com> Signed-off-by: Fabio Estevam <fabio.estevam@freescale.com> Reviewed-by: Peter Chen <peter.chen@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
md_open(gx_device *pdev) { static const float md_margins[4] = { MD_SIDE_MARGIN, MD_BOTTOM_MARGIN, MD_SIDE_MARGIN, MD_TOP_MARGIN }; if (pdev->HWResolution[0] != 600) { emprintf(pdev->memory, "device must have an X resolution of 600dpi\n"); return_error(gs_error_rangecheck); } gx_device_set_margins(pdev, md_margins, true); return gdev_prn_open(pdev); }
0
[]
ghostpdl
4fcbece468706e0e89ed2856729b2ccacbc112be
149,105,736,969,037,250,000,000,000,000,000,000,000
17
Avoid some devices dying due to inappropriate resolutions.
CmdRevokeRolesFromRole() : Command("revokeRolesFromRole") {}
0
[ "CWE-613" ]
mongo
64d8e9e1b12d16b54d6a592bae8110226c491b4e
161,141,724,156,539,330,000,000,000,000,000,000,000
1
SERVER-38984 Validate unique User ID on UserCache hit (cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7)
TEST(IndexBoundsBuilderTest, TypeNumber) { auto testIndex = buildSimpleIndexEntry(); BSONObj obj = fromjson("{a: {$type: 'number'}}"); auto expr = parseMatchExpression(obj); BSONElement elt = obj.firstElement(); OrderedIntervalList oil; IndexBoundsBuilder::BoundsTightness tightness; IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness); ASSERT_EQUALS(oil.name, "a"); ASSERT_EQUALS(oil.intervals.size(), 1U); // Build the expected interval. BSONObjBuilder bob; BSONType type = BSONType::NumberInt; bob.appendMinForType("", type); bob.appendMaxForType("", type); BSONObj expectedInterval = bob.obj(); ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(Interval(expectedInterval, true, true))); ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT); }
0
[ "CWE-754" ]
mongo
f8f55e1825ee5c7bdb3208fc7c5b54321d172732
262,891,055,735,899,100,000,000,000,000,000,000,000
22
SERVER-44377 generate correct plan for indexed inequalities to null
void simplestring_add(simplestring* target, const char* source) { if(target && source) { simplestring_addn(target, source, strlen(source)); } }
0
[ "CWE-119" ]
php-src
e6c48213c22ed50b2b987b479fcc1ac709394caa
7,575,796,191,563,260,000,000,000,000,000,000,000
5
Fix bug #72606: heap-buffer-overflow (write) simplestring_addn simplestring.c
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) { return queue_var_show(queue_max_segment_size(q), (page)); }
0
[ "CWE-416" ]
linux
c3e2219216c92919a6bd1711f340f5faa98695e6
158,676,374,374,995,260,000,000,000,000,000,000,000
4
block: free sched's request pool in blk_cleanup_queue In theory, IO scheduler belongs to request queue, and the request pool of sched tags belongs to the request queue too. However, the current tags allocation interfaces are re-used for both driver tags and sched tags, and driver tags is definitely host wide, and doesn't belong to any request queue, same with its request pool. So we need tagset instance for freeing request of sched tags. Meantime, blk_mq_free_tag_set() often follows blk_cleanup_queue() in case of non-BLK_MQ_F_TAG_SHARED, this way requires that request pool of sched tags to be freed before calling blk_mq_free_tag_set(). Commit 47cdee29ef9d94e ("block: move blk_exit_queue into __blk_release_queue") moves blk_exit_queue into __blk_release_queue for simplying the fast path in generic_make_request(), then causes oops during freeing requests of sched tags in __blk_release_queue(). Fix the above issue by move freeing request pool of sched tags into blk_cleanup_queue(), this way is safe becasue queue has been frozen and no any in-queue requests at that time. Freeing sched tags has to be kept in queue's release handler becasue there might be un-completed dispatch activity which might refer to sched tags. Cc: Bart Van Assche <bvanassche@acm.org> Cc: Christoph Hellwig <hch@lst.de> Fixes: 47cdee29ef9d94e485eb08f962c74943023a5271 ("block: move blk_exit_queue into __blk_release_queue") Tested-by: Yi Zhang <yi.zhang@redhat.com> Reported-by: kernel test robot <rong.a.chen@intel.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
nfsd4_getdeviceinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_getdeviceinfo *gdp) { const struct nfsd4_layout_ops *ops; struct nfsd4_deviceid_map *map; struct svc_export *exp; __be32 nfserr; dprintk("%s: layout_type %u dev_id [0x%llx:0x%x] maxcnt %u\n", __func__, gdp->gd_layout_type, gdp->gd_devid.fsid_idx, gdp->gd_devid.generation, gdp->gd_maxcount); map = nfsd4_find_devid_map(gdp->gd_devid.fsid_idx); if (!map) { dprintk("%s: couldn't find device ID to export mapping!\n", __func__); return nfserr_noent; } exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid); if (IS_ERR(exp)) { dprintk("%s: could not find device id\n", __func__); return nfserr_noent; } nfserr = nfserr_layoutunavailable; ops = nfsd4_layout_verify(exp, gdp->gd_layout_type); if (!ops) goto out; nfserr = nfs_ok; if (gdp->gd_maxcount != 0) { nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, rqstp, cstate->session->se_client, gdp); } gdp->gd_notify_types &= ops->notify_types; out: exp_put(exp); return nfserr; }
0
[ "CWE-20", "CWE-129" ]
linux
b550a32e60a4941994b437a8d662432a486235a5
269,094,498,396,035,200,000,000,000,000,000,000,000
44
nfsd: fix undefined behavior in nfsd4_layout_verify UBSAN: Undefined behaviour in fs/nfsd/nfs4proc.c:1262:34 shift exponent 128 is too large for 32-bit type 'int' Depending on compiler+architecture, this may cause the check for layout_type to succeed for overly large values (which seems to be the case with amd64). The large value will be later used in de-referencing nfsd4_layout_ops for function pointers. Reported-by: Jani Tuovila <tuovila@synopsys.com> Signed-off-by: Ari Kauppi <ari@synopsys.com> [colin.king@canonical.com: use LAYOUT_TYPE_MAX instead of 32] Cc: stable@vger.kernel.org Reviewed-by: Dan Carpenter <dan.carpenter@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
static void dm_blk_close(struct gendisk *disk, fmode_t mode) { struct mapped_device *md; spin_lock(&_minor_lock); md = disk->private_data; if (WARN_ON(!md)) goto out; if (atomic_dec_and_test(&md->open_count) && (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) queue_work(deferred_remove_workqueue, &deferred_remove_work); dm_put(md); out: spin_unlock(&_minor_lock); }
0
[ "CWE-362" ]
linux
b9a41d21dceadf8104812626ef85dc56ee8a60ed
100,138,494,816,131,960,000,000,000,000,000,000,000
18
dm: fix race between dm_get_from_kobject() and __dm_destroy() The following BUG_ON was hit when testing repeat creation and removal of DM devices: kernel BUG at drivers/md/dm.c:2919! CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44 Call Trace: [<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a [<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e [<ffffffff817b46d1>] ? mutex_lock+0x26/0x44 [<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf [<ffffffff811de257>] kernfs_seq_show+0x23/0x25 [<ffffffff81199118>] seq_read+0x16f/0x325 [<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f [<ffffffff8117b625>] __vfs_read+0x26/0x9d [<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44 [<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9 [<ffffffff8117be9d>] vfs_read+0x8f/0xcf [<ffffffff81193e34>] ? __fdget_pos+0x12/0x41 [<ffffffff8117c686>] SyS_read+0x4b/0x76 [<ffffffff817b606e>] system_call_fastpath+0x12/0x71 The bug can be easily triggered, if an extra delay (e.g. 10ms) is added between the test of DMF_FREEING & DMF_DELETING and dm_get() in dm_get_from_kobject(). To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and dm_get() are done in an atomic way, so _minor_lock is used. The other callers of dm_get() have also been checked to be OK: some callers invoke dm_get() under _minor_lock, some callers invoke it under _hash_lock, and dm_start_request() invoke it after increasing md->open_count. Cc: stable@vger.kernel.org Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
void RegexMatchExpression::debugString(StringBuilder& debug, int indentationLevel) const { _debugAddSpace(debug, indentationLevel); debug << path() << " regex /" << _regex << "/" << _flags; MatchExpression::TagData* td = getTag(); if (nullptr != td) { debug << " "; td->debugString(&debug); } debug << "\n"; }
0
[ "CWE-190" ]
mongo
21d8699ed6c517b45e1613e20231cd8eba894985
248,984,075,856,616,400,000,000,000,000,000,000,000
11
SERVER-43699 $mod should not overflow for large negative values
static int replmd_replicated_request_werror(struct replmd_replicated_request *ar, WERROR status) { int ret = LDB_ERR_OTHER; /* TODO: do some error mapping */ /* Let the caller know the full WERROR */ ar->objs->error = status; return ret; }
0
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
291,565,121,317,447,000,000,000,000,000,000,000,000
10
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz>
nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp, struct nfs4_ace *ace) { if (ace->whotype != NFS4_ACL_WHO_NAMED) return nfs4_acl_write_who(xdr, ace->whotype); else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP) return nfsd4_encode_group(xdr, rqstp, ace->who_gid); else return nfsd4_encode_user(xdr, rqstp, ace->who_uid); }
0
[ "CWE-20", "CWE-129" ]
linux
f961e3f2acae94b727380c0b74e2d3954d0edf79
315,725,910,555,421,300,000,000,000,000,000,000,000
10
nfsd: encoders mustn't use unitialized values in error cases In error cases, lgp->lg_layout_type may be out of bounds; so we shouldn't be using it until after the check of nfserr. This was seen to crash nfsd threads when the server receives a LAYOUTGET request with a large layout type. GETDEVICEINFO has the same problem. Reported-by: Ari Kauppi <Ari.Kauppi@synopsys.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields <bfields@redhat.com>
int neigh_table_clear(struct neigh_table *tbl) { struct neigh_table **tp; /* It is not clean... Fix it to unload IPv6 module safely */ del_timer_sync(&tbl->gc_timer); del_timer_sync(&tbl->proxy_timer); pneigh_queue_purge(&tbl->proxy_queue); neigh_ifdown(tbl, NULL); if (atomic_read(&tbl->entries)) printk(KERN_CRIT "neighbour leakage\n"); write_lock(&neigh_tbl_lock); for (tp = &neigh_tables; *tp; tp = &(*tp)->next) { if (*tp == tbl) { *tp = tbl->next; break; } } write_unlock(&neigh_tbl_lock); neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1); tbl->hash_buckets = NULL; kfree(tbl->phash_buckets); tbl->phash_buckets = NULL; return 0; }
0
[ "CWE-200" ]
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
246,638,971,949,845,170,000,000,000,000,000,000,000
28
[NETLINK]: Missing initializations in dumped data Mostly missing initialization of padding fields of 1 or 2 bytes length, two instances of uninitialized nlmsgerr->msg of 16 bytes length. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
static MagickBooleanType IsSGI(const unsigned char *magick,const size_t length) { if (length < 2) return(MagickFalse); if (memcmp(magick,"\001\332",2) == 0) return(MagickTrue); return(MagickFalse); }
0
[ "CWE-125" ]
ImageMagick
7afcf9f71043df15508e46f079387bd4689a738d
322,777,257,584,832,170,000,000,000,000,000,000,000
8
Prevent buffer overflow in BMP & SGI coders (bug report from pwchen&rayzhong of tencent)
QPDFObjectHandle::getUIntValue() { unsigned long long result = 0; long long v = getIntValue(); if (v < 0) { QTC::TC("qpdf", "QPDFObjectHandle uint returning 0"); warnIfPossible( "unsigned value request for negative number; returning 0", false); } else { result = static_cast<unsigned long long>(v); } return result; }
0
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
1,882,059,005,993,447,500,000,000,000,000,000,000
17
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
static HANDLE create_named_pipe(MYSQL *mysql, DWORD connect_timeout, const char **arg_host, const char **arg_unix_socket) { HANDLE hPipe=INVALID_HANDLE_VALUE; char pipe_name[1024]; DWORD dwMode; int i; my_bool testing_named_pipes=0; const char *host= *arg_host, *unix_socket= *arg_unix_socket; if ( ! unix_socket || (unix_socket)[0] == 0x00) unix_socket = mysql_unix_port; if (!host || !strcmp(host,LOCAL_HOST)) host=LOCAL_HOST_NAMEDPIPE; pipe_name[sizeof(pipe_name)-1]= 0; /* Safety if too long string */ strxnmov(pipe_name, sizeof(pipe_name)-1, "\\\\", host, "\\pipe\\", unix_socket, NullS); DBUG_PRINT("info",("Server name: '%s'. Named Pipe: %s", host, unix_socket)); for (i=0 ; i < 100 ; i++) /* Don't retry forever */ { if ((hPipe = CreateFile(pipe_name, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL )) != INVALID_HANDLE_VALUE) break; if (GetLastError() != ERROR_PIPE_BUSY) { set_mysql_extended_error(mysql, CR_NAMEDPIPEOPEN_ERROR, unknown_sqlstate, ER(CR_NAMEDPIPEOPEN_ERROR), host, unix_socket, (ulong) GetLastError()); return INVALID_HANDLE_VALUE; } /* wait for for an other instance */ if (!WaitNamedPipe(pipe_name, connect_timeout)) { set_mysql_extended_error(mysql, CR_NAMEDPIPEWAIT_ERROR, unknown_sqlstate, ER(CR_NAMEDPIPEWAIT_ERROR), host, unix_socket, (ulong) GetLastError()); return INVALID_HANDLE_VALUE; } } if (hPipe == INVALID_HANDLE_VALUE) { set_mysql_extended_error(mysql, CR_NAMEDPIPEOPEN_ERROR, unknown_sqlstate, ER(CR_NAMEDPIPEOPEN_ERROR), host, unix_socket, (ulong) GetLastError()); return INVALID_HANDLE_VALUE; } dwMode = PIPE_READMODE_BYTE | PIPE_WAIT; if ( !SetNamedPipeHandleState(hPipe, &dwMode, NULL, NULL) ) { CloseHandle( hPipe ); set_mysql_extended_error(mysql, CR_NAMEDPIPESETSTATE_ERROR, unknown_sqlstate, ER(CR_NAMEDPIPESETSTATE_ERROR), host, unix_socket, (ulong) GetLastError()); return INVALID_HANDLE_VALUE; } *arg_host=host ; *arg_unix_socket=unix_socket; /* connect arg */ return (hPipe); }
0
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
229,070,978,735,705,100,000,000,000,000,000,000,000
67
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
static const char *req_content_type_field(request_rec *r) { return r->content_type; }
0
[ "CWE-20" ]
httpd
78eb3b9235515652ed141353d98c239237030410
37,843,662,434,147,535,000,000,000,000,000,000,000
4
*) SECURITY: CVE-2015-0228 (cve.mitre.org) mod_lua: A maliciously crafted websockets PING after a script calls r:wsupgrade() can cause a child process crash. [Edward Lu <Chaosed0 gmail.com>] Discovered by Guido Vranken <guidovranken gmail.com> Submitted by: Edward Lu Committed by: covener git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1657261 13f79535-47bb-0310-9956-ffa450edef68
DeepScanLineInputFile::version () const { return _data->version; }
0
[ "CWE-125" ]
openexr
e79d2296496a50826a15c667bf92bdc5a05518b4
130,289,970,960,664,850,000,000,000,000,000,000,000
4
fix memory leaks and invalid memory accesses Signed-off-by: Peter Hillman <peterh@wetafx.co.nz>
static int nntp_mbox_sync(struct Context *ctx, int *index_hint) { struct NntpData *nntp_data = ctx->data; int rc; #ifdef USE_HCACHE header_cache_t *hc = NULL; #endif /* check for new articles */ nntp_data->nserv->check_time = 0; rc = check_mailbox(ctx); if (rc) return rc; #ifdef USE_HCACHE nntp_data->last_cached = 0; hc = nntp_hcache_open(nntp_data); #endif for (int i = 0; i < ctx->msgcount; i++) { struct Header *hdr = ctx->hdrs[i]; char buf[16]; snprintf(buf, sizeof(buf), "%d", NHDR(hdr)->article_num); if (nntp_data->bcache && hdr->deleted) { mutt_debug(2, "mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } #ifdef USE_HCACHE if (hc && (hdr->changed || hdr->deleted)) { if (hdr->deleted && !hdr->read) nntp_data->unread--; mutt_debug(2, "mutt_hcache_store %s\n", buf); mutt_hcache_store(hc, buf, strlen(buf), hdr, 0); } #endif } #ifdef USE_HCACHE if (hc) { mutt_hcache_close(hc); nntp_data->last_cached = nntp_data->last_loaded; } #endif /* save .newsrc entries */ nntp_newsrc_gen_entries(ctx); nntp_newsrc_update(nntp_data->nserv); nntp_newsrc_close(nntp_data->nserv); return 0; }
0
[ "CWE-20" ]
neomutt
9e927affe3a021175f354af5fa01d22657c20585
249,717,983,468,259,980,000,000,000,000,000,000,000
56
Add alloc fail check in nntp_fetch_headers
paste_from_archive_remove_ready_cb (GObject *source_object, GAsyncResult *result, gpointer user_data) { FrWindow *window = user_data; GError *error = NULL; if (! fr_archive_operation_finish (FR_ARCHIVE (source_object), result, &error)) { _paste_from_archive_operation_completed (window, FR_ACTION_PASTING_FILES, error); g_error_free (error); return; } paste_from_archive_completed_successfully (window); }
0
[ "CWE-22" ]
file-roller
b147281293a8307808475e102a14857055f81631
313,565,407,440,465,100,000,000,000,000,000,000,000
15
libarchive: sanitize filenames before extracting
static void skcipher_queue_write(struct skcipher_walk *walk, struct skcipher_walk_buffer *p) { p->dst = walk->out; list_add_tail(&p->entry, &walk->buffers); }
0
[ "CWE-476", "CWE-703" ]
linux
9933e113c2e87a9f46a40fde8dafbf801dca1ab9
217,083,818,904,420,800,000,000,000,000,000,000,000
6
crypto: skcipher - Add missing API setkey checks The API setkey checks for key sizes and alignment went AWOL during the skcipher conversion. This patch restores them. Cc: <stable@vger.kernel.org> Fixes: 4e6c3df4d729 ("crypto: skcipher - Add low-level skcipher...") Reported-by: Baozeng <sploving1@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
static int v9fs_set_super(struct super_block *s, void *data) { s->s_fs_info = data; return set_anon_super(s, data); }
0
[ "CWE-835" ]
linux
5e3cc1ee1405a7eb3487ed24f786dec01b4cbe1f
327,030,879,195,271,960,000,000,000,000,000,000,000
5
9p: use inode->i_lock to protect i_size_write() under 32-bit Use inode->i_lock to protect i_size_write(), else i_size_read() in generic_fillattr() may loop infinitely in read_seqcount_begin() when multiple processes invoke v9fs_vfs_getattr() or v9fs_vfs_getattr_dotl() simultaneously under 32-bit SMP environment, and a soft lockup will be triggered as show below: watchdog: BUG: soft lockup - CPU#5 stuck for 22s! [stat:2217] Modules linked in: CPU: 5 PID: 2217 Comm: stat Not tainted 5.0.0-rc1-00005-g7f702faf5a9e #4 Hardware name: Generic DT based system PC is at generic_fillattr+0x104/0x108 LR is at 0xec497f00 pc : [<802b8898>] lr : [<ec497f00>] psr: 200c0013 sp : ec497e20 ip : ed608030 fp : ec497e3c r10: 00000000 r9 : ec497f00 r8 : ed608030 r7 : ec497ebc r6 : ec497f00 r5 : ee5c1550 r4 : ee005780 r3 : 0000052d r2 : 00000000 r1 : ec497f00 r0 : ed608030 Flags: nzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none Control: 10c5387d Table: ac48006a DAC: 00000051 CPU: 5 PID: 2217 Comm: stat Not tainted 5.0.0-rc1-00005-g7f702faf5a9e #4 Hardware name: Generic DT based system Backtrace: [<8010d974>] (dump_backtrace) from [<8010dc88>] (show_stack+0x20/0x24) [<8010dc68>] (show_stack) from [<80a1d194>] (dump_stack+0xb0/0xdc) [<80a1d0e4>] (dump_stack) from [<80109f34>] (show_regs+0x1c/0x20) [<80109f18>] (show_regs) from [<801d0a80>] (watchdog_timer_fn+0x280/0x2f8) [<801d0800>] (watchdog_timer_fn) from [<80198658>] (__hrtimer_run_queues+0x18c/0x380) [<801984cc>] (__hrtimer_run_queues) from [<80198e60>] (hrtimer_run_queues+0xb8/0xf0) [<80198da8>] (hrtimer_run_queues) from [<801973e8>] (run_local_timers+0x28/0x64) [<801973c0>] (run_local_timers) from [<80197460>] (update_process_times+0x3c/0x6c) [<80197424>] (update_process_times) from [<801ab2b8>] (tick_nohz_handler+0xe0/0x1bc) [<801ab1d8>] (tick_nohz_handler) from [<80843050>] (arch_timer_handler_virt+0x38/0x48) [<80843018>] (arch_timer_handler_virt) from [<80180a64>] (handle_percpu_devid_irq+0x8c/0x240) [<801809d8>] (handle_percpu_devid_irq) from [<8017ac20>] (generic_handle_irq+0x34/0x44) [<8017abec>] (generic_handle_irq) from [<8017b344>] (__handle_domain_irq+0x6c/0xc4) [<8017b2d8>] (__handle_domain_irq) from [<801022e0>] (gic_handle_irq+0x4c/0x88) [<80102294>] (gic_handle_irq) from [<80101a30>] (__irq_svc+0x70/0x98) [<802b8794>] (generic_fillattr) from [<8056b284>] (v9fs_vfs_getattr_dotl+0x74/0xa4) [<8056b210>] (v9fs_vfs_getattr_dotl) from [<802b8904>] (vfs_getattr_nosec+0x68/0x7c) [<802b889c>] (vfs_getattr_nosec) from [<802b895c>] (vfs_getattr+0x44/0x48) [<802b8918>] (vfs_getattr) from [<802b8a74>] (vfs_statx+0x9c/0xec) [<802b89d8>] (vfs_statx) from [<802b9428>] (sys_lstat64+0x48/0x78) [<802b93e0>] (sys_lstat64) from [<80101000>] (ret_fast_syscall+0x0/0x28) [dominique.martinet@cea.fr: updated comment to not refer to a function in another subsystem] Link: http://lkml.kernel.org/r/20190124063514.8571-2-houtao1@huawei.com Cc: stable@vger.kernel.org Fixes: 7549ae3e81cc ("9p: Use the i_size_[read, write]() macros instead of using inode->i_size directly.") Reported-by: Xing Gaopeng <xingaopeng@huawei.com> Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
backend_valid_range (struct backend *b, struct connection *conn, uint64_t offset, uint32_t count) { struct b_conn_handle *h = &conn->handles[b->i]; assert (h->exportsize >= 0); /* Guaranteed by negotiation phase */ return count > 0 && offset <= h->exportsize && offset + count <= h->exportsize; }
0
[ "CWE-406" ]
nbdkit
a6b88b195a959b17524d1c8353fd425d4891dc5f
299,712,858,476,160,850,000,000,000,000,000,000,000
9
server: Fix regression for NBD_OPT_INFO before NBD_OPT_GO Most known NBD clients do not bother with NBD_OPT_INFO (except for clients like 'qemu-nbd --list' that don't ever intend to connect), but go straight to NBD_OPT_GO. However, it's not too hard to hack up qemu to add in an extra client step (whether info on the same name, or more interestingly, info on a different name), as a patch against qemu commit 6f214b30445: | diff --git i/nbd/client.c w/nbd/client.c | index f6733962b49b..425292ac5ea9 100644 | --- i/nbd/client.c | +++ w/nbd/client.c | @@ -1038,6 +1038,14 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc, | * TLS). If it is not available, fall back to | * NBD_OPT_LIST for nicer error messages about a missing | * export, then use NBD_OPT_EXPORT_NAME. */ | + if (getenv ("HACK")) | + info->name[0]++; | + result = nbd_opt_info_or_go(ioc, NBD_OPT_INFO, info, errp); | + if (getenv ("HACK")) | + info->name[0]--; | + if (result < 0) { | + return -EINVAL; | + } | result = nbd_opt_info_or_go(ioc, NBD_OPT_GO, info, errp); | if (result < 0) { | return -EINVAL; This works just fine in 1.14.0, where we call .open only once (so the INFO and GO repeat calls into the same plugin handle), but in 1.14.1 it regressed into causing an assertion failure: we are now calling .open a second time on a connection that is already opened: $ nbdkit -rfv null & $ hacked-qemu-io -f raw -r nbd://localhost -c quit ... nbdkit: null[1]: debug: null: open readonly=1 nbdkit: backend.c:179: backend_open: Assertion `h->handle == NULL' failed. Worse, on the mainline development, we have recently made it possible for plugins to actively report different information for different export names; for example, a plugin may choose to report different answers for .can_write on export A than for export B; but if we share cached handles, then an NBD_OPT_INFO on one export prevents correct answers for NBD_OPT_GO on the second export name. (The HACK envvar in my qemu modifications can be used to demonstrate cross-name requests, which are even less likely in a real client). The solution is to call .close after NBD_OPT_INFO, coupled with enough glue logic to reset cached connection handles back to the state expected by .open. This in turn means factoring out another backend_* function, but also gives us an opportunity to change backend_set_handle to no longer accept NULL. The assertion failure is, to some extent, a possible denial of service attack (one client can force nbdkit to exit by merely sending OPT_INFO before OPT_GO, preventing the next client from connecting), although this is mitigated by using TLS to weed out untrusted clients. Still, the fact that we introduced a potential DoS attack while trying to fix a traffic amplification security bug is not very nice. Sadly, as there are no known clients that easily trigger this mode of operation (OPT_INFO before OPT_GO), there is no easy way to cover this via a testsuite addition. I may end up hacking something into libnbd. Fixes: c05686f957 Signed-off-by: Eric Blake <eblake@redhat.com>
const char *XMLRPC_SetValueID_Case(XMLRPC_VALUE value, const char* id, int len, XMLRPC_CASE id_case) { const char* pRetval = NULL; if(value) { if(id) { simplestring_clear(&value->id); (len > 0) ? simplestring_addn(&value->id, id, len) : simplestring_add(&value->id, id); /* upper or lower case string in place if required. could be a separate func. */ if(id_case == xmlrpc_case_lower || id_case == xmlrpc_case_upper) { int i; for(i = 0; i < value->id.len; i++) { value->id.str[i] = (id_case == xmlrpc_case_lower) ? tolower (value->id. str[i]) : toupper (value-> id. str[i]); } } pRetval = value->id.str; #ifdef XMLRPC_DEBUG_REFCOUNT printf("set value id: %s\n", pRetval); #endif } } return pRetval; }
0
[ "CWE-119" ]
php-src
88412772d295ebf7dd34409534507dc9bcac726e
164,070,573,908,305,530,000,000,000,000,000,000,000
31
Fix bug #68027 - fix date parsing in XMLRPC lib
static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev) { int err = -1; struct inet6_ifaddr *ifp; read_lock_bh(&idev->lock); for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { memcpy(eui, ifp->addr.s6_addr+8, 8); err = 0; break; } } read_unlock_bh(&idev->lock); return err; }
0
[ "CWE-200" ]
linux-2.6
8a47077a0b5aa2649751c46e7a27884e6686ccbf
141,846,449,824,708,640,000,000,000,000,000,000,000
16
[NETLINK]: Missing padding fields in dumped structures Plug holes with padding fields and initialized them to zero. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
static int __init fw_devlink_setup(char *arg) { if (!arg) return -EINVAL; if (strcmp(arg, "off") == 0) { fw_devlink_flags = 0; } else if (strcmp(arg, "permissive") == 0) { fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; } else if (strcmp(arg, "on") == 0) { fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER; } else if (strcmp(arg, "rpm") == 0) { fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER | DL_FLAG_PM_RUNTIME; } return 0; }
0
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
252,242,992,380,584,500,000,000,000,000,000,000,000
17
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <joe@perches.com> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
**/ CImg<T>& blur(const float sigma_x, const float sigma_y, const float sigma_z, const bool boundary_conditions=true, const bool is_gaussian=false) { if (is_empty()) return *this; if (is_gaussian) { if (_width>1) vanvliet(sigma_x,0,'x',boundary_conditions); if (_height>1) vanvliet(sigma_y,0,'y',boundary_conditions); if (_depth>1) vanvliet(sigma_z,0,'z',boundary_conditions); } else { if (_width>1) deriche(sigma_x,0,'x',boundary_conditions); if (_height>1) deriche(sigma_y,0,'y',boundary_conditions); if (_depth>1) deriche(sigma_z,0,'z',boundary_conditions); } return *this;
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
336,960,466,727,034,700,000,000,000,000,000,000,000
14
Fix other issues in 'CImg<T>::load_bmp()'.
GF_Err gmin_box_size(GF_Box *s) { GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; ptr->size += 12; return GF_OK; }
0
[ "CWE-476" ]
gpac
6170024568f4dda310e98ef7508477b425c58d09
242,865,494,608,724,660,000,000,000,000,000,000,000
6
fixed potential crash - cf #1263
u32 a_copy_from_user(void *to, const void *from, u32 n) { return(copy_from_user(to, from, n)); }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
54,371,005,763,322,350,000,000,000,000,000,000,000
4
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <nhorman@tuxdriver.com> CC: Karsten Keil <isdn@linux-pingi.de> CC: "David S. Miller" <davem@davemloft.net> CC: Jay Vosburgh <fubar@us.ibm.com> CC: Andy Gospodarek <andy@greyhouse.net> CC: Patrick McHardy <kaber@trash.net> CC: Krzysztof Halasa <khc@pm.waw.pl> CC: "John W. Linville" <linville@tuxdriver.com> CC: Greg Kroah-Hartman <gregkh@suse.de> CC: Marcel Holtmann <marcel@holtmann.org> CC: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: David S. Miller <davem@davemloft.net>
int bond_set_carrier(struct bonding *bond) { struct list_head *iter; struct slave *slave; if (!bond_has_slaves(bond)) goto down; if (BOND_MODE(bond) == BOND_MODE_8023AD) return bond_3ad_set_carrier(bond); bond_for_each_slave(bond, slave, iter) { if (slave->link == BOND_LINK_UP) { if (!netif_carrier_ok(bond->dev)) { netif_carrier_on(bond->dev); return 1; } return 0; } } down: if (netif_carrier_ok(bond->dev)) { netif_carrier_off(bond->dev); return 1; } return 0; }
0
[ "CWE-476", "CWE-703" ]
linux
105cd17a866017b45f3c45901b394c711c97bf40
198,748,019,269,402,660,000,000,000,000,000,000,000
28
bonding: fix null dereference in bond_ipsec_add_sa() If bond doesn't have real device, bond->curr_active_slave is null. But bond_ipsec_add_sa() dereferences bond->curr_active_slave without null checking. So, null-ptr-deref would occur. Test commands: ip link add bond0 type bond ip link set bond0 up ip x s add proto esp dst 14.1.1.1 src 15.1.1.1 spi \ 0x07 mode transport reqid 0x07 replay-window 32 aead 'rfc4106(gcm(aes))' \ 0x44434241343332312423222114131211f4f3f2f1 128 sel src 14.0.0.52/24 \ dst 14.0.0.70/24 proto tcp offload dev bond0 dir in Splat looks like: KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] CPU: 4 PID: 680 Comm: ip Not tainted 5.13.0-rc3+ #1168 RIP: 0010:bond_ipsec_add_sa+0xc4/0x2e0 [bonding] Code: 85 21 02 00 00 4d 8b a6 48 0c 00 00 e8 75 58 44 ce 85 c0 0f 85 14 01 00 00 48 b8 00 00 00 00 00 fc ff df 4c 89 e2 48 c1 ea 03 <80> 3c 02 00 0f 85 fc 01 00 00 48 8d bb e0 02 00 00 4d 8b 2c 24 48 RSP: 0018:ffff88810946f508 EFLAGS: 00010246 RAX: dffffc0000000000 RBX: ffff88810b4e8040 RCX: 0000000000000001 RDX: 0000000000000000 RSI: ffffffff8fe34280 RDI: ffff888115abe100 RBP: ffff88810946f528 R08: 0000000000000003 R09: fffffbfff2287e11 R10: 0000000000000001 R11: ffff888115abe0c8 R12: 0000000000000000 R13: ffffffffc0aea9a0 R14: ffff88800d7d2000 R15: ffff88810b4e8330 FS: 00007efc5552e680(0000) GS:ffff888119c00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055c2530dbf40 CR3: 0000000103056004 CR4: 00000000003706e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: xfrm_dev_state_add+0x2a9/0x770 ? memcpy+0x38/0x60 xfrm_add_sa+0x2278/0x3b10 [xfrm_user] ? xfrm_get_policy+0xaa0/0xaa0 [xfrm_user] ? register_lock_class+0x1750/0x1750 xfrm_user_rcv_msg+0x331/0x660 [xfrm_user] ? rcu_read_lock_sched_held+0x91/0xc0 ? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user] ? find_held_lock+0x3a/0x1c0 ? mutex_lock_io_nested+0x1210/0x1210 ? sched_clock_cpu+0x18/0x170 netlink_rcv_skb+0x121/0x350 ? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user] ? netlink_ack+0x9d0/0x9d0 ? netlink_deliver_tap+0x17c/0xa50 xfrm_netlink_rcv+0x68/0x80 [xfrm_user] netlink_unicast+0x41c/0x610 ? netlink_attachskb+0x710/0x710 netlink_sendmsg+0x6b9/0xb70 [ ...] Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
void posix_cpu_timers_exit_group(struct task_struct *tsk) { cleanup_timers(tsk->signal->cpu_timers, cputime_add(tsk->utime, tsk->signal->utime), cputime_add(tsk->stime, tsk->signal->stime), tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime); }
0
[ "CWE-189" ]
linux
f8bd2258e2d520dff28c855658bd24bdafb5102d
232,162,920,393,694,930,000,000,000,000,000,000,000
7
remove div_long_long_rem x86 is the only arch right now, which provides an optimized for div_long_long_rem and it has the downside that one has to be very careful that the divide doesn't overflow. The API is a little akward, as the arguments for the unsigned divide are signed. The signed version also doesn't handle a negative divisor and produces worse code on 64bit archs. There is little incentive to keep this API alive, so this converts the few users to the new API. Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: john stultz <johnstul@us.ibm.com> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static inline void put_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){ int i,j; for (i=0; i < height; i++) { for (j=0; j < width; j++) { dst[j] = (683*(2*src[j] + src[j+stride] + 1)) >> 11; } src += stride; dst += stride; } }
0
[ "CWE-703", "CWE-189" ]
FFmpeg
454a11a1c9c686c78aa97954306fb63453299760
152,749,711,041,837,200,000,000,000,000,000,000,000
10
avcodec/dsputil: fix signedness in sizeof() comparissions Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
transformFKConstraints(CreateStmtContext *cxt, bool skipValidation, bool isAddConstraint) { ListCell *fkclist; if (cxt->fkconstraints == NIL) return; /* * If CREATE TABLE or adding a column with NULL default, we can safely * skip validation of FK constraints, and nonetheless mark them valid. * (This will override any user-supplied NOT VALID flag.) */ if (skipValidation) { foreach(fkclist, cxt->fkconstraints) { Constraint *constraint = (Constraint *) lfirst(fkclist); constraint->skip_validation = true; constraint->initially_valid = true; } } /* * For CREATE TABLE or ALTER TABLE ADD COLUMN, gin up an ALTER TABLE ADD * CONSTRAINT command to execute after the basic command is complete. (If * called from ADD CONSTRAINT, that routine will add the FK constraints to * its own subcommand list.) * * Note: the ADD CONSTRAINT command must also execute after any index * creation commands. Thus, this should run after * transformIndexConstraints, so that the CREATE INDEX commands are * already in cxt->alist. See also the handling of cxt->likeclauses. */ if (!isAddConstraint) { AlterTableStmt *alterstmt = makeNode(AlterTableStmt); alterstmt->relation = cxt->relation; alterstmt->cmds = NIL; alterstmt->objtype = OBJECT_TABLE; foreach(fkclist, cxt->fkconstraints) { Constraint *constraint = (Constraint *) lfirst(fkclist); AlterTableCmd *altercmd = makeNode(AlterTableCmd); altercmd->subtype = AT_AddConstraint; altercmd->name = NULL; altercmd->def = (Node *) constraint; alterstmt->cmds = lappend(alterstmt->cmds, altercmd); } cxt->alist = lappend(cxt->alist, alterstmt); } }
0
[ "CWE-94" ]
postgres
b9b21acc766db54d8c337d508d0fe2f5bf2daab0
140,803,110,518,631,070,000,000,000,000,000,000,000
57
In extensions, don't replace objects not belonging to the extension. Previously, if an extension script did CREATE OR REPLACE and there was an existing object not belonging to the extension, it would overwrite the object and adopt it into the extension. This is problematic, first because the overwrite is probably unintentional, and second because we didn't change the object's ownership. Thus a hostile user could create an object in advance of an expected CREATE EXTENSION command, and would then have ownership rights on an extension object, which could be modified for trojan-horse-type attacks. Hence, forbid CREATE OR REPLACE of an existing object unless it already belongs to the extension. (Note that we've always forbidden replacing an object that belongs to some other extension; only the behavior for previously-free-standing objects changes here.) For the same reason, also fail CREATE IF NOT EXISTS when there is an existing object that doesn't belong to the extension. Our thanks to Sven Klemm for reporting this problem. Security: CVE-2022-2625
writeDataError(instanceData *pData, cJSON **pReplyRoot, uchar *reqmsg) { char *rendered = NULL; cJSON *errRoot; cJSON *req; cJSON *replyRoot = *pReplyRoot; size_t toWrite; ssize_t wrRet; char errStr[1024]; DEFiRet; if(pData->errorFile == NULL) { DBGPRINTF("omelasticsearch: no local error logger defined - " "ignoring ES error information\n"); FINALIZE; } if(pData->fdErrFile == -1) { pData->fdErrFile = open((char*)pData->errorFile, O_WRONLY|O_CREAT|O_APPEND|O_LARGEFILE|O_CLOEXEC, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP); if(pData->fdErrFile == -1) { rs_strerror_r(errno, errStr, sizeof(errStr)); DBGPRINTF("omelasticsearch: error opening error file: %s\n", errStr); ABORT_FINALIZE(RS_RET_ERR); } } if((req=cJSON_CreateObject()) == NULL) ABORT_FINALIZE(RS_RET_ERR); cJSON_AddItemToObject(req, "url", cJSON_CreateString((char*)pData->restURL)); cJSON_AddItemToObject(req, "postdata", cJSON_CreateString((char*)reqmsg)); if((errRoot=cJSON_CreateObject()) == NULL) ABORT_FINALIZE(RS_RET_ERR); cJSON_AddItemToObject(errRoot, "request", req); cJSON_AddItemToObject(errRoot, "reply", replyRoot); rendered = cJSON_Print(errRoot); /* we do not do real error-handling on the err file, as this finally complicates * things way to much. */ DBGPRINTF("omelasticsearch: error record: '%s'\n", rendered); toWrite = strlen(rendered); wrRet = write(pData->fdErrFile, rendered, toWrite); if(wrRet != (ssize_t) toWrite) { DBGPRINTF("omelasticsearch: error %d writing error file, write returns %lld\n", errno, (long long) wrRet); } free(rendered); cJSON_Delete(errRoot); *pReplyRoot = NULL; /* tell caller not to delete once again! */ finalize_it: if(rendered != NULL) free(rendered); RETiRet; }
1
[ "CWE-399" ]
rsyslog
80f88242982c9c6ad6ce8628fc5b94ea74051cf4
48,498,255,380,292,090,000,000,000,000,000,000,000
54
bugfix: double-free in omelasticsearch closes: http://bugzilla.adiscon.com/show_bug.cgi?id=461 Thanks to Marius Ionescu for providing a detailled bug report
static int ssl_compress_buf( ssl_context *ssl ) { int ret; unsigned char *msg_post = ssl->out_msg; size_t len_pre = ssl->out_msglen; unsigned char *msg_pre = ssl->compress_buf; SSL_DEBUG_MSG( 2, ( "=> compress buf" ) ); if( len_pre == 0 ) return( 0 ); memcpy( msg_pre, ssl->out_msg, len_pre ); SSL_DEBUG_MSG( 3, ( "before compression: msglen = %d, ", ssl->out_msglen ) ); SSL_DEBUG_BUF( 4, "before compression: output payload", ssl->out_msg, ssl->out_msglen ); ssl->transform_out->ctx_deflate.next_in = msg_pre; ssl->transform_out->ctx_deflate.avail_in = len_pre; ssl->transform_out->ctx_deflate.next_out = msg_post; ssl->transform_out->ctx_deflate.avail_out = SSL_BUFFER_LEN; ret = deflate( &ssl->transform_out->ctx_deflate, Z_SYNC_FLUSH ); if( ret != Z_OK ) { SSL_DEBUG_MSG( 1, ( "failed to perform compression (%d)", ret ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } ssl->out_msglen = SSL_BUFFER_LEN - ssl->transform_out->ctx_deflate.avail_out; SSL_DEBUG_MSG( 3, ( "after compression: msglen = %d, ", ssl->out_msglen ) ); SSL_DEBUG_BUF( 4, "after compression: output payload", ssl->out_msg, ssl->out_msglen ); SSL_DEBUG_MSG( 2, ( "<= compress buf" ) ); return( 0 ); }
0
[ "CWE-119" ]
mbedtls
c988f32adde62a169ba340fee0da15aecd40e76e
87,463,391,862,460,660,000,000,000,000,000,000,000
45
Added max length checking of hostname
int ext4_ext_precache(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_ext_path *path = NULL; struct buffer_head *bh; int i = 0, depth, ret = 0; if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) return 0; /* not an extent-mapped inode */ down_read(&ei->i_data_sem); depth = ext_depth(inode); /* Don't cache anything if there are no external extent blocks */ if (!depth) { up_read(&ei->i_data_sem); return ret; } path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), GFP_NOFS); if (path == NULL) { up_read(&ei->i_data_sem); return -ENOMEM; } path[0].p_hdr = ext_inode_hdr(inode); ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); if (ret) goto out; path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); while (i >= 0) { /* * If this is a leaf block or we've reached the end of * the index block, go up */ if ((i == depth) || path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { brelse(path[i].p_bh); path[i].p_bh = NULL; i--; continue; } bh = read_extent_tree_block(inode, ext4_idx_pblock(path[i].p_idx++), depth - i - 1, EXT4_EX_FORCE_CACHE); if (IS_ERR(bh)) { ret = PTR_ERR(bh); break; } i++; path[i].p_bh = bh; path[i].p_hdr = ext_block_hdr(bh); path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); } ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); out: up_read(&ei->i_data_sem); ext4_ext_drop_refs(path); kfree(path); return ret; }
0
[ "CWE-703" ]
linux
ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1
306,015,413,505,286,530,000,000,000,000,000,000,000
63
ext4: check journal inode extents more carefully Currently, system zones just track ranges of block, that are "important" fs metadata (bitmaps, group descriptors, journal blocks, etc.). This however complicates how extent tree (or indirect blocks) can be checked for inodes that actually track such metadata - currently the journal inode but arguably we should be treating quota files or resize inode similarly. We cannot run __ext4_ext_check() on such metadata inodes when loading their extents as that would immediately trigger the validity checks and so we just hack around that and special-case the journal inode. This however leads to a situation that a journal inode which has extent tree of depth at least one can have invalid extent tree that gets unnoticed until ext4_cache_extents() crashes. To overcome this limitation, track inode number each system zone belongs to (0 is used for zones not belonging to any inode). We can then verify inode number matches the expected one when verifying extent tree and thus avoid the false errors. With this there's no need to to special-case journal inode during extent tree checking anymore so remove it. Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode") Reported-by: Wolfgang Frisch <wolfgang.frisch@suse.com> Reviewed-by: Lukas Czerner <lczerner@redhat.com> Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20200728130437.7804-4-jack@suse.cz Signed-off-by: Theodore Ts'o <tytso@mit.edu>
xmlOutputBufferWriteString(xmlOutputBufferPtr out, const char *str) { int len; if ((out == NULL) || (out->error)) return(-1); if (str == NULL) return(-1); len = strlen(str); if (len > 0) return(xmlOutputBufferWrite(out, len, str)); return(len); }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
6,749,544,547,466,285,000,000,000,000,000,000,000
12
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
AttVal *TY_(NewAttribute)( TidyDocImpl* doc ) { AttVal *av = (AttVal*) TidyDocAlloc( doc, sizeof(AttVal) ); TidyClearMemory( av, sizeof(AttVal) ); return av; }
0
[ "CWE-119" ]
tidy-html5
c18f27a58792f7fbd0b30a0ff50d6b40a82f940d
236,824,816,698,799,840,000,000,000,000,000,000,000
6
Issue #217 - avoid len going negative, ever...
void sspi_SecureHandleInvalidate(SecHandle* handle) { if (!handle) return; sspi_SecureHandleInit(handle); }
0
[ "CWE-476", "CWE-125" ]
FreeRDP
0773bb9303d24473fe1185d85a424dfe159aff53
30,702,951,111,887,990,000,000,000,000,000,000,000
7
nla: invalidate sec handle after creation If sec pointer isn't invalidated after creation it is not possible to check if the upper and lower pointers are valid. This fixes a segfault in the server part if the client disconnects before the authentication was finished.
PS_CLOSE_FUNC(files) { PS_FILES_DATA; ps_files_close(data); if (data->lastkey) { efree(data->lastkey); } efree(data->basedir); efree(data); *mod_data = NULL; return SUCCESS; }
0
[ "CWE-264" ]
php-src
25e8fcc88fa20dc9d4c47184471003f436927cde
127,684,390,368,372,780,000,000,000,000,000,000,000
16
Strict session
static u8 get_x86_family(unsigned long sig) { u8 x86; x86 = (sig >> 8) & 0xf; if (x86 == 0xf) x86 += (sig >> 20) & 0xff; return x86; }
0
[ "CWE-119", "CWE-787" ]
linux
f84598bd7c851f8b0bf8cd0d7c3be0d73c432ff4
220,218,828,293,730,700,000,000,000,000,000,000,000
11
x86/microcode/intel: Guard against stack overflow in the loader mc_saved_tmp is a static array allocated on the stack, we need to make sure mc_saved_count stays within its bounds, otherwise we're overflowing the stack in _save_mc(). A specially crafted microcode header could lead to a kernel crash or potentially kernel execution. Signed-off-by: Quentin Casasnovas <quentin.casasnovas@oracle.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Link: http://lkml.kernel.org/r/1422964824-22056-1-git-send-email-quentin.casasnovas@oracle.com Signed-off-by: Borislav Petkov <bp@suse.de>
static int binlog_close_connection(handlerton *hton, THD *thd) { binlog_cache_mngr *const cache_mngr= (binlog_cache_mngr*) thd_get_ha_data(thd, binlog_hton); DBUG_ASSERT(cache_mngr->trx_cache.empty() && cache_mngr->stmt_cache.empty()); thd_set_ha_data(thd, binlog_hton, NULL); cache_mngr->~binlog_cache_mngr(); my_free(cache_mngr); return 0; }
0
[ "CWE-264" ]
mysql-server
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
61,894,537,887,382,180,000,000,000,000,000,000,000
10
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE [This is the 5.5/5.6 version of the bugfix]. The problem was that it was possible to write log files ending in .ini/.cnf that later could be parsed as an options file. This made it possible for users to specify startup options without the permissions to do so. This patch fixes the problem by disallowing general query log and slow query log to be written to files ending in .ini and .cnf.
ms_escher_opt_start (GString *buf) { gsize res = buf->len; guint8 tmp[8] = { 0x03, 0, 0xb, 0xf0, 0xde, 0xad, 0xbe, 0xef }; g_string_append_len (buf, tmp, sizeof tmp); return res; }
0
[ "CWE-119" ]
gnumeric
b5480b69345b3c6d56ee0ed9c9e9880bb2a08cdc
308,850,854,273,714,450,000,000,000,000,000,000,000
7
xls: fuzzed file crash.
hook_connect (struct t_weechat_plugin *plugin, const char *proxy, const char *address, int port, int ipv6, int retry, void *gnutls_sess, void *gnutls_cb, int gnutls_dhkey_size, const char *gnutls_priorities, const char *local_hostname, t_hook_callback_connect *callback, void *callback_data) { struct t_hook *new_hook; struct t_hook_connect *new_hook_connect; #ifdef HOOK_CONNECT_MAX_SOCKETS int i; #endif #ifndef HAVE_GNUTLS /* make C compiler happy */ (void) gnutls_sess; (void) gnutls_cb; (void) gnutls_dhkey_size; (void) gnutls_priorities; #endif if (!address || (port <= 0) || !callback) return NULL; new_hook = malloc (sizeof (*new_hook)); if (!new_hook) return NULL; new_hook_connect = malloc (sizeof (*new_hook_connect)); if (!new_hook_connect) { free (new_hook); return NULL; } hook_init_data (new_hook, plugin, HOOK_TYPE_CONNECT, HOOK_PRIORITY_DEFAULT, callback_data); new_hook->hook_data = new_hook_connect; new_hook_connect->callback = callback; new_hook_connect->proxy = (proxy) ? strdup (proxy) : NULL; new_hook_connect->address = strdup (address); new_hook_connect->port = port; new_hook_connect->sock = -1; new_hook_connect->ipv6 = ipv6; new_hook_connect->retry = retry; #ifdef HAVE_GNUTLS new_hook_connect->gnutls_sess = gnutls_sess; new_hook_connect->gnutls_cb = gnutls_cb; new_hook_connect->gnutls_dhkey_size = gnutls_dhkey_size; new_hook_connect->gnutls_priorities = (gnutls_priorities) ? strdup (gnutls_priorities) : NULL; #endif new_hook_connect->local_hostname = (local_hostname) ? strdup (local_hostname) : NULL; new_hook_connect->child_read = -1; new_hook_connect->child_write = -1; new_hook_connect->child_recv = -1; new_hook_connect->child_send = -1; new_hook_connect->child_pid = 0; new_hook_connect->hook_child_timer = NULL; new_hook_connect->hook_fd = NULL; new_hook_connect->handshake_hook_fd = NULL; new_hook_connect->handshake_hook_timer = NULL; new_hook_connect->handshake_fd_flags = 0; new_hook_connect->handshake_ip_address = NULL; #ifdef HOOK_CONNECT_MAX_SOCKETS for (i = 0; i < HOOK_CONNECT_MAX_SOCKETS; i++) { new_hook_connect->sock_v4[i] = -1; new_hook_connect->sock_v6[i] = -1; } #endif hook_add_to_list (new_hook); network_connect_with_fork (new_hook); return new_hook; }
0
[ "CWE-20" ]
weechat
efb795c74fe954b9544074aafcebb1be4452b03a
170,532,594,551,878,160,000,000,000,000,000,000,000
78
core: do not call shell to execute command in hook_process (fix security problem when a plugin/script gives untrusted command) (bug #37764)
void FilterManager::addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data, bool streaming) { if (state_.filter_call_state_ == 0 || (state_.filter_call_state_ & FilterCallState::DecodeHeaders) || (state_.filter_call_state_ & FilterCallState::DecodeData) || ((state_.filter_call_state_ & FilterCallState::DecodeTrailers) && !filter.canIterate())) { // Make sure if this triggers watermarks, the correct action is taken. state_.decoder_filters_streaming_ = streaming; // If no call is happening or we are in the decode headers/data callback, buffer the data. // Inline processing happens in the decodeHeaders() callback if necessary. filter.commonHandleBufferData(data); } else if (state_.filter_call_state_ & FilterCallState::DecodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. decodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); } else { IS_ENVOY_BUG("Invalid request data"); sendLocalReply(Http::Code::BadGateway, "Filter error", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().FilterAddedInvalidRequestData); } }
0
[ "CWE-416" ]
envoy
148de954ed3585d8b4298b424aa24916d0de6136
90,543,974,065,442,280,000,000,000,000,000,000,000
21
CVE-2021-43825 Response filter manager crash Signed-off-by: Yan Avlasov <yavlasov@google.com>
static void usb_ehci_pci_realize(PCIDevice *dev, Error **errp) { EHCIPCIState *i = PCI_EHCI(dev); EHCIState *s = &i->ehci; uint8_t *pci_conf = dev->config; pci_set_byte(&pci_conf[PCI_CLASS_PROG], 0x20); /* capabilities pointer */ pci_set_byte(&pci_conf[PCI_CAPABILITY_LIST], 0x00); /* pci_set_byte(&pci_conf[PCI_CAPABILITY_LIST], 0x50); */ pci_set_byte(&pci_conf[PCI_INTERRUPT_PIN], 4); /* interrupt pin D */ pci_set_byte(&pci_conf[PCI_MIN_GNT], 0); pci_set_byte(&pci_conf[PCI_MAX_LAT], 0); /* pci_conf[0x50] = 0x01; *//* power management caps */ pci_set_byte(&pci_conf[USB_SBRN], USB_RELEASE_2); /* release # (2.1.4) */ pci_set_byte(&pci_conf[0x61], 0x20); /* frame length adjustment (2.1.5) */ pci_set_word(&pci_conf[0x62], 0x00); /* port wake up capability (2.1.6) */ pci_conf[0x64] = 0x00; pci_conf[0x65] = 0x00; pci_conf[0x66] = 0x00; pci_conf[0x67] = 0x00; pci_conf[0x68] = 0x01; pci_conf[0x69] = 0x00; pci_conf[0x6a] = 0x00; pci_conf[0x6b] = 0x00; /* USBLEGSUP */ pci_conf[0x6c] = 0x00; pci_conf[0x6d] = 0x00; pci_conf[0x6e] = 0x00; pci_conf[0x6f] = 0xc0; /* USBLEFCTLSTS */ s->irq = pci_allocate_irq(dev); s->as = pci_get_address_space(dev); usb_ehci_realize(s, DEVICE(dev), NULL); pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mem); }
0
[ "CWE-772", "CWE-401" ]
qemu
d710e1e7bd3d5bfc26b631f02ae87901ebe646b0
44,529,451,078,385,530,000,000,000,000,000,000,000
41
usb: ehci: fix memory leak in ehci In usb_ehci_init function, it initializes 's->ipacket', but there is no corresponding function to free this. As the ehci can be hotplug and unplug, this will leak host memory leak. In order to make the hierarchy clean, we should add a ehci pci finalize function, then call the clean function in ehci device. Signed-off-by: Li Qiang <liqiang6-s@360.cn> Message-id: 589a85b8.3c2b9d0a.b8e6.1434@mx.google.com Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static ssize_t bt_att_chan_write(struct bt_att_chan *chan, uint8_t opcode, const void *pdu, uint16_t len) { struct bt_att *att = chan->att; ssize_t ret; struct iovec iov; iov.iov_base = (void *) pdu; iov.iov_len = len; util_debug(att->debug_callback, att->debug_data, "(chan %p) ATT op 0x%02x", chan, opcode); ret = io_send(chan->io, &iov, 1); if (ret < 0) { util_debug(att->debug_callback, att->debug_data, "(chan %p) write failed: %s", chan, strerror(-ret)); return ret; } util_hexdump('<', pdu, ret, att->debug_callback, att->debug_data); return ret; }
0
[ "CWE-415" ]
bluez
1cd644db8c23a2f530ddb93cebed7dacc5f5721a
215,228,973,153,616,850,000,000,000,000,000,000,000
27
shared/att: Fix possible crash on disconnect If there are pending request while disconnecting they would be notified but clients may endup being freed in the proccess which will then be calling bt_att_cancel to cancal its requests causing the following trace: Invalid read of size 4 at 0x1D894C: enable_ccc_callback (gatt-client.c:1627) by 0x1D247B: disc_att_send_op (att.c:417) by 0x1CCC17: queue_remove_all (queue.c:354) by 0x1D47B7: disconnect_cb (att.c:635) by 0x1E0707: watch_callback (io-glib.c:170) by 0x48E963B: g_main_context_dispatch (in /usr/lib/libglib-2.0.so.0.6400.4) by 0x48E9AC7: ??? (in /usr/lib/libglib-2.0.so.0.6400.4) by 0x48E9ECF: g_main_loop_run (in /usr/lib/libglib-2.0.so.0.6400.4) by 0x1E0E97: mainloop_run (mainloop-glib.c:79) by 0x1E13B3: mainloop_run_with_signal (mainloop-notify.c:201) by 0x12BC3B: main (main.c:770) Address 0x7d40a28 is 24 bytes inside a block of size 32 free'd at 0x484A2E0: free (vg_replace_malloc.c:540) by 0x1CCC17: queue_remove_all (queue.c:354) by 0x1CCC83: queue_destroy (queue.c:73) by 0x1D7DD7: bt_gatt_client_free (gatt-client.c:2209) by 0x16497B: batt_free (battery.c:77) by 0x16497B: batt_remove (battery.c:286) by 0x1A0013: service_remove (service.c:176) by 0x1A9B7B: device_remove_gatt_service (device.c:3691) by 0x1A9B7B: gatt_service_removed (device.c:3805) by 0x1CC90B: queue_foreach (queue.c:220) by 0x1DE27B: notify_service_changed.isra.0.part.0 (gatt-db.c:369) by 0x1DE387: notify_service_changed (gatt-db.c:361) by 0x1DE387: gatt_db_service_destroy (gatt-db.c:385) by 0x1DE3EF: gatt_db_remove_service (gatt-db.c:519) by 0x1D674F: discovery_op_complete (gatt-client.c:388) by 0x1D6877: discover_primary_cb (gatt-client.c:1260) by 0x1E220B: discovery_op_complete (gatt-helpers.c:628) by 0x1E249B: read_by_grp_type_cb (gatt-helpers.c:730) by 0x1D247B: disc_att_send_op (att.c:417) by 0x1CCC17: queue_remove_all (queue.c:354) by 0x1D47B7: disconnect_cb (att.c:635)
__realpath (const char *name, char *resolved) { #ifdef GCC_BOGUS_WRETURN_LOCAL_ADDR #warning "GCC might issue a bogus -Wreturn-local-addr warning here." #warning "See <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93644>." #endif struct scratch_buffer rname_buffer; return realpath_stk (name, resolved, &rname_buffer); }
0
[ "CWE-125" ]
glibc
ee8d5e33adb284601c00c94687bc907e10aec9bb
27,450,906,756,472,940,000,000,000,000,000,000,000
9
realpath: Set errno to ENAMETOOLONG for result larger than PATH_MAX [BZ #28770] realpath returns an allocated string when the result exceeds PATH_MAX, which is unexpected when its second argument is not NULL. This results in the second argument (resolved) being uninitialized and also results in a memory leak since the caller expects resolved to be the same as the returned value. Return NULL and set errno to ENAMETOOLONG if the result exceeds PATH_MAX. This fixes [BZ #28770], which is CVE-2021-3998. Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org> Signed-off-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID); }
0
[ "CWE-284" ]
linux
727ba748e110b4de50d142edca9d6a9b7e6111d8
189,029,878,846,671,500,000,000,000,000,000,000,000
4
kvm: nVMX: Enforce cpl=0 for VMX instructions VMX instructions executed inside a L1 VM will always trigger a VM exit even when executed with cpl 3. This means we must perform the privilege check in software. Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks") Cc: stable@vger.kernel.org Signed-off-by: Felix Wilhelm <fwilhelm@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
MOBI_RET mobi_decompress_lz77(unsigned char *out, const unsigned char *in, size_t *len_out, const size_t len_in) { MOBI_RET ret = MOBI_SUCCESS; MOBIBuffer *buf_in = mobi_buffer_init_null((unsigned char *) in, len_in); if (buf_in == NULL) { debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } MOBIBuffer *buf_out = mobi_buffer_init_null(out, *len_out); if (buf_out == NULL) { mobi_buffer_free_null(buf_in); debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } while (ret == MOBI_SUCCESS && buf_in->offset < buf_in->maxlen) { uint8_t byte = mobi_buffer_get8(buf_in); /* byte pair: space + char */ if (byte >= 0xc0) { mobi_buffer_add8(buf_out, ' '); mobi_buffer_add8(buf_out, byte ^ 0x80); } /* length, distance pair */ /* 0x8000 + (distance << 3) + ((length-3) & 0x07) */ else if (byte >= 0x80) { uint8_t next = mobi_buffer_get8(buf_in); uint16_t distance = ((((byte << 8) | ((uint8_t)next)) >> 3) & 0x7ff); uint8_t length = (next & 0x7) + 3; while (length--) { mobi_buffer_move(buf_out, -distance, 1); } } /* single char, not modified */ else if (byte >= 0x09) { mobi_buffer_add8(buf_out, byte); } /* val chars not modified */ else if (byte >= 0x01) { mobi_buffer_copy(buf_out, buf_in, byte); } /* char '\0', not modified */ else { mobi_buffer_add8(buf_out, byte); } if (buf_in->error || buf_out->error) { ret = MOBI_BUFFER_END; } } *len_out = buf_out->offset; mobi_buffer_free_null(buf_out); mobi_buffer_free_null(buf_in); return ret; }
0
[ "CWE-119", "CWE-125" ]
libmobi
bec783e6212439a335ba6e8df7ab8ed610ca9a21
42,162,019,926,437,733,000,000,000,000,000,000,000
51
Fix potential out-of-buffer read while parsing corrupt file, closes #35, #36