func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequence | project
stringlengths 2
29
| commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
int fileblobAddData(fileblob *fb, const unsigned char *data, size_t len)
{
if (len == 0)
return 0;
assert(data != NULL);
if (fb->fp) {
#if defined(MAX_SCAN_SIZE) && (MAX_SCAN_SIZE > 0)
const cli_ctx *ctx = fb->ctx;
if (fb->isInfected) /* pretend all was written */
return 0;
if (ctx) {
int do_scan = 1;
if (cli_checklimits("fileblobAddData", ctx, fb->bytes_scanned, 0, 0) != CL_CLEAN)
do_scan = 0;
if (fb->bytes_scanned > MAX_SCAN_SIZE)
do_scan = 0;
if (do_scan) {
if (ctx->scanned)
*ctx->scanned += (unsigned long)len / CL_COUNT_PRECISION;
fb->bytes_scanned += (unsigned long)len;
if ((len > 5) && cli_updatelimits(ctx, len) == CL_CLEAN && (cli_scanbuff(data, (unsigned int)len, 0, ctx->virname, ctx->engine, CL_TYPE_BINARY_DATA, NULL) == CL_VIRUS)) {
cli_dbgmsg("fileblobAddData: found %s\n", cli_get_last_virus_str(ctx->virname));
fb->isInfected = 1;
}
}
}
#endif
if (fwrite(data, len, 1, fb->fp) != 1) {
cli_errmsg("fileblobAddData: Can't write %lu bytes to temporary file %s\n",
(unsigned long)len, fb->b.name);
return -1;
}
fb->isNotEmpty = 1;
return 0;
}
return blobAddData(&(fb->b), data, len);
} | 0 | [
"CWE-476"
] | clamav-devel | 8bb3716be9c7ab7c6a3a1889267b1072f48af87b | 143,998,451,542,159,940,000,000,000,000,000,000,000 | 44 | fuzz-22348 null deref in egg utf8 conversion
Corrected memory leaks and a null dereference in the egg utf8 conversion. |
dp_packet_set_flow_mark(struct dp_packet *p, uint32_t mark)
{
p->mbuf.hash.fdir.hi = mark;
p->mbuf.ol_flags |= PKT_RX_FDIR_ID;
} | 0 | [
"CWE-400"
] | ovs | 3512fb512c76a1f08eba4005aa2eb69160d0840e | 123,473,322,819,645,310,000,000,000,000,000,000,000 | 5 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <joakim.hindersson@elastx.se>
Acked-by: Ilya Maximets <i.maximets@ovn.org>
Signed-off-by: Flavio Leitner <fbl@sysclose.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org> |
get_thread_static_data (MonoInternalThread *thread, guint32 offset)
{
int idx;
g_assert ((offset & 0x80000000) == 0);
offset &= 0x7fffffff;
idx = (offset >> 24) - 1;
return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
} | 0 | [
"CWE-399",
"CWE-264"
] | mono | 722f9890f09aadfc37ae479e7d946d5fc5ef7b91 | 242,796,499,760,182,800,000,000,000,000,000,000,000 | 8 | Fix access to freed members of a dead thread
* threads.c: Fix access to freed members of a dead thread. Found
and fixed by Rodrigo Kumpera <rkumpera@novell.com>
Ref: CVE-2011-0992 |
static int unqueue_me(struct futex_q *q)
{
spinlock_t *lock_ptr;
int ret = 0;
/* In the common case we don't take the spinlock, which is nice. */
retry:
/*
* q->lock_ptr can change between this read and the following spin_lock.
* Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
* optimizing lock_ptr out of the logic below.
*/
lock_ptr = READ_ONCE(q->lock_ptr);
if (lock_ptr != NULL) {
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
* spin_lock(), causing us to take the wrong lock. This
* corrects the race condition.
*
* Reasoning goes like this: if we have the wrong lock,
* q->lock_ptr must have changed (maybe several times)
* between reading it and the spin_lock(). It can
* change again after the spin_lock() but only if it was
* already changed before the spin_lock(). It cannot,
* however, change back to the original value. Therefore
* we can detect whether we acquired the correct lock.
*/
if (unlikely(lock_ptr != q->lock_ptr)) {
spin_unlock(lock_ptr);
goto retry;
}
__unqueue_futex(q);
BUG_ON(q->pi_state);
spin_unlock(lock_ptr);
ret = 1;
}
drop_futex_key_refs(&q->key);
return ret;
} | 0 | [
"CWE-190"
] | linux | fbe0e839d1e22d88810f3ee3e2f1479be4c0aa4a | 319,770,705,413,058,850,000,000,000,000,000,000,000 | 43 | futex: Prevent overflow by strengthen input validation
UBSAN reports signed integer overflow in kernel/futex.c:
UBSAN: Undefined behaviour in kernel/futex.c:2041:18
signed integer overflow:
0 - -2147483648 cannot be represented in type 'int'
Add a sanity check to catch negative values of nr_wake and nr_requeue.
Signed-off-by: Li Jinyue <lijinyue@huawei.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: peterz@infradead.org
Cc: dvhart@infradead.org
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/1513242294-31786-1-git-send-email-lijinyue@huawei.com |
setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
stack_t stack;
int err = 0;
if (!frame)
return 1;
err |= copy_siginfo_to_user(&frame->info, info);
__put_user_error(0, &frame->sig.uc.uc_flags, err);
__put_user_error(NULL, &frame->sig.uc.uc_link, err);
memset(&stack, 0, sizeof(stack));
stack.ss_sp = (void __user *)current->sas_ss_sp;
stack.ss_flags = sas_ss_flags(regs->ARM_sp);
stack.ss_size = current->sas_ss_size;
err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
err |= setup_sigframe(&frame->sig, regs, set);
if (err == 0)
err = setup_return(regs, ka, frame->sig.retcode, frame, usig);
if (err == 0) {
/*
* For realtime signals we must also set the second and third
* arguments for the signal handler.
* -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
*/
regs->ARM_r1 = (unsigned long)&frame->info;
regs->ARM_r2 = (unsigned long)&frame->sig.uc;
}
return err;
} | 0 | [] | linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 223,929,349,214,602,360,000,000,000,000,000,000,000 | 37 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: James Morris <jmorris@namei.org> |
static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
u32 flags, const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table = ctx->table;
struct nft_chain *chain = ctx->chain;
struct nft_base_chain *basechain;
struct nft_stats *stats = NULL;
struct nft_chain_hook hook;
struct nf_hook_ops *ops;
struct nft_trans *trans;
int err;
if (chain->flags ^ flags)
return -EOPNOTSUPP;
if (nla[NFTA_CHAIN_HOOK]) {
if (!nft_is_base_chain(chain)) {
NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
extack, false);
if (err < 0)
return err;
basechain = nft_base_chain(chain);
if (basechain->type != hook.type) {
nft_chain_release_hook(&hook);
NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
if (nft_base_chain_netdev(ctx->family, hook.num)) {
if (!nft_hook_list_equal(&basechain->hook_list,
&hook.list)) {
nft_chain_release_hook(&hook);
NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
} else {
ops = &basechain->ops;
if (ops->hooknum != hook.num ||
ops->priority != hook.priority) {
nft_chain_release_hook(&hook);
NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
}
nft_chain_release_hook(&hook);
}
if (nla[NFTA_CHAIN_HANDLE] &&
nla[NFTA_CHAIN_NAME]) {
struct nft_chain *chain2;
chain2 = nft_chain_lookup(ctx->net, table,
nla[NFTA_CHAIN_NAME], genmask);
if (!IS_ERR(chain2)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
return -EEXIST;
}
}
if (nla[NFTA_CHAIN_COUNTERS]) {
if (!nft_is_base_chain(chain))
return -EOPNOTSUPP;
stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
if (IS_ERR(stats))
return PTR_ERR(stats);
}
err = -ENOMEM;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
sizeof(struct nft_trans_chain));
if (trans == NULL)
goto err;
nft_trans_chain_stats(trans) = stats;
nft_trans_chain_update(trans) = true;
if (nla[NFTA_CHAIN_POLICY])
nft_trans_chain_policy(trans) = policy;
else
nft_trans_chain_policy(trans) = -1;
if (nla[NFTA_CHAIN_HANDLE] &&
nla[NFTA_CHAIN_NAME]) {
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
struct nft_trans *tmp;
char *name;
err = -ENOMEM;
name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL_ACCOUNT);
if (!name)
goto err;
err = -EEXIST;
list_for_each_entry(tmp, &nft_net->commit_list, list) {
if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
tmp->ctx.table == table &&
nft_trans_chain_update(tmp) &&
nft_trans_chain_name(tmp) &&
strcmp(name, nft_trans_chain_name(tmp)) == 0) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
kfree(name);
goto err;
}
}
nft_trans_chain_name(trans) = name;
}
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err:
free_percpu(stats);
kfree(trans);
return err;
} | 0 | [] | net | 520778042ccca019f3ffa136dd0ca565c486cedd | 243,297,828,181,021,000,000,000,000,000,000,000,000 | 122 | netfilter: nf_tables: disallow non-stateful expression in sets earlier
Since 3e135cd499bf ("netfilter: nft_dynset: dynamic stateful expression
instantiation"), it is possible to attach stateful expressions to set
elements.
cd5125d8f518 ("netfilter: nf_tables: split set destruction in deactivate
and destroy phase") introduces conditional destruction on the object to
accomodate transaction semantics.
nft_expr_init() calls expr->ops->init() first, then check for
NFT_STATEFUL_EXPR, this stills allows to initialize a non-stateful
lookup expressions which points to a set, which might lead to UAF since
the set is not properly detached from the set->binding for this case.
Anyway, this combination is non-sense from nf_tables perspective.
This patch fixes this problem by checking for NFT_STATEFUL_EXPR before
expr->ops->init() is called.
The reporter provides a KASAN splat and a poc reproducer (similar to
those autogenerated by syzbot to report use-after-free errors). It is
unknown to me if they are using syzbot or if they use similar automated
tool to locate the bug that they are reporting.
For the record, this is the KASAN splat.
[ 85.431824] ==================================================================
[ 85.432901] BUG: KASAN: use-after-free in nf_tables_bind_set+0x81b/0xa20
[ 85.433825] Write of size 8 at addr ffff8880286f0e98 by task poc/776
[ 85.434756]
[ 85.434999] CPU: 1 PID: 776 Comm: poc Tainted: G W 5.18.0+ #2
[ 85.436023] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
Fixes: 0b2d8a7b638b ("netfilter: nf_tables: add helper functions for expression handling")
Reported-and-tested-by: Aaron Adams <edg-e@nccgroup.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> |
int mbedtls_ssl_read_record( mbedtls_ssl_context *ssl,
unsigned update_hs_digest )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> read record" ) );
if( ssl->keep_current_message == 0 )
{
do {
ret = ssl_consume_current_message( ssl );
if( ret != 0 )
return( ret );
if( ssl_record_is_in_progress( ssl ) == 0 )
{
#if defined(MBEDTLS_SSL_PROTO_DTLS)
int have_buffered = 0;
/* We only check for buffered messages if the
* current datagram is fully consumed. */
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
ssl_next_record_is_in_datagram( ssl ) == 0 )
{
if( ssl_load_buffered_message( ssl ) == 0 )
have_buffered = 1;
}
if( have_buffered == 0 )
#endif /* MBEDTLS_SSL_PROTO_DTLS */
{
ret = ssl_get_next_record( ssl );
if( ret == MBEDTLS_ERR_SSL_CONTINUE_PROCESSING )
continue;
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_get_next_record" ), ret );
return( ret );
}
}
}
ret = mbedtls_ssl_handle_message_type( ssl );
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ret == MBEDTLS_ERR_SSL_EARLY_MESSAGE )
{
/* Buffer future message */
ret = ssl_buffer_message( ssl );
if( ret != 0 )
return( ret );
ret = MBEDTLS_ERR_SSL_CONTINUE_PROCESSING;
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
} while( MBEDTLS_ERR_SSL_NON_FATAL == ret ||
MBEDTLS_ERR_SSL_CONTINUE_PROCESSING == ret );
if( 0 != ret )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "mbedtls_ssl_handle_message_type" ), ret );
return( ret );
}
if( ssl->in_msgtype == MBEDTLS_SSL_MSG_HANDSHAKE &&
update_hs_digest == 1 )
{
mbedtls_ssl_update_handshake_status( ssl );
}
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "reuse previously read message" ) );
ssl->keep_current_message = 0;
}
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= read record" ) );
return( 0 );
} | 0 | [
"CWE-787"
] | mbedtls | f333dfab4a6c2d8a604a61558a8f783145161de4 | 137,409,074,213,334,230,000,000,000,000,000,000,000 | 83 | More SSL debug messages for ClientHello parsing
In particular, be verbose when checking the ClientHello cookie in a possible
DTLS reconnection.
Signed-off-by: Gilles Peskine <Gilles.Peskine@arm.com> |
yang_read_require_instance(struct ly_ctx *ctx, struct yang_type *stype, int req)
{
if (stype->base && (stype->base != LY_TYPE_LEAFREF)) {
LOGVAL(ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "require-instance");
return EXIT_FAILURE;
}
if (stype->type->info.lref.req) {
LOGVAL(ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "require-instance", "type");
return EXIT_FAILURE;
}
stype->type->info.lref.req = req;
stype->base = LY_TYPE_LEAFREF;
return EXIT_SUCCESS;
} | 0 | [
"CWE-415"
] | libyang | d9feacc4a590d35dbc1af21caf9080008b4450ed | 223,384,544,291,447,000,000,000,000,000,000,000,000 | 14 | yang parser BUGFIX double free
Fixes #742 |
DECODE_JSON(SByte) {
CHECK_TOKEN_BOUNDS;
CHECK_PRIMITIVE;
size_t tokenSize;
char* tokenData;
GET_TOKEN(tokenData, tokenSize);
UA_Int64 out = 0;
UA_StatusCode s = parseSignedInteger(tokenData, tokenSize, &out);
*dst = (UA_SByte)out;
if(moveToken)
parseCtx->index++;
return s;
} | 0 | [
"CWE-703",
"CWE-787"
] | open62541 | c800e2987b10bb3af6ef644b515b5d6392f8861d | 315,256,849,184,080,860,000,000,000,000,000,000,000 | 15 | fix(json): Check max recursion depth in more places |
static char *basename(char *path)
{
char *fname;
fname = path + strlen(path) - 1;
while (fname >= path) {
if (*fname == '/') {
fname++;
break;
}
fname--;
}
return fname;
} | 0 | [
"CWE-120",
"CWE-703"
] | u-boot | 5d14ee4e53a81055d34ba280cb8fd90330f22a96 | 112,617,746,794,031,240,000,000,000,000,000,000,000 | 14 | CVE-2019-14196: nfs: fix unbounded memcpy with a failed length check at nfs_lookup_reply
This patch adds a check to rpc_pkt.u.reply.data at nfs_lookup_reply.
Signed-off-by: Cheng Liu <liucheng32@huawei.com>
Reported-by: Fermín Serna <fermin@semmle.com>
Acked-by: Joe Hershberger <joe.hershberger@ni.com> |
static void __net_init ping_v6_proc_exit_net(struct net *net)
{
return ping_proc_unregister(net, &ping_v6_seq_afinfo);
} | 0 | [
"CWE-20"
] | net | 85fbaa75037d0b6b786ff18658ddf0b4014ce2a4 | 202,610,538,502,784,060,000,000,000,000,000,000,000 | 4 | inet: fix addr_len/msg->msg_namelen assignment in recv_error and rxpmtu functions
Commit bceaa90240b6019ed73b49965eac7d167610be69 ("inet: prevent leakage
of uninitialized memory to user in recv syscalls") conditionally updated
addr_len if the msg_name is written to. The recv_error and rxpmtu
functions relied on the recvmsg functions to set up addr_len before.
As this does not happen any more we have to pass addr_len to those
functions as well and set it to the size of the corresponding sockaddr
length.
This broke traceroute and such.
Fixes: bceaa90240b6 ("inet: prevent leakage of uninitialized memory to user in recv syscalls")
Reported-by: Brad Spengler <spender@grsecurity.net>
Reported-by: Tom Labanowski
Cc: mpb <mpb.mail@gmail.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net> |
parse_tag_line(
char_u *lbuf, // line to be parsed
#ifdef FEAT_EMACS_TAGS
int is_etag,
#endif
tagptrs_T *tagp)
{
char_u *p;
#ifdef FEAT_EMACS_TAGS
if (is_etag)
// emacs-style tag file
return emacs_tags_parse_line(lbuf, tagp);
#endif
// Isolate the tagname, from lbuf up to the first white
tagp->tagname = lbuf;
p = vim_strchr(lbuf, TAB);
if (p == NULL)
return FAIL;
tagp->tagname_end = p;
// Isolate file name, from first to second white space
if (*p != NUL)
++p;
tagp->fname = p;
p = vim_strchr(p, TAB);
if (p == NULL)
return FAIL;
tagp->fname_end = p;
// find start of search command, after second white space
if (*p != NUL)
++p;
if (*p == NUL)
return FAIL;
tagp->command = p;
return OK;
} | 0 | [
"CWE-416"
] | vim | adce965162dd89bf29ee0e5baf53652e7515762c | 195,639,299,741,598,360,000,000,000,000,000,000,000 | 40 | patch 9.0.0246: using freed memory when 'tagfunc' deletes the buffer
Problem: Using freed memory when 'tagfunc' deletes the buffer.
Solution: Make a copy of the tag name. |
static int vm_stat_clear(void *_offset, u64 val)
{
unsigned offset = (long)_offset;
struct kvm *kvm;
if (val)
return -EINVAL;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_clear_stat_per_vm(kvm, offset);
}
mutex_unlock(&kvm_lock);
return 0;
} | 0 | [
"CWE-416"
] | linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 222,542,183,254,366,100,000,000,000,000,000,000,000 | 16 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <cai@lca.pw>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320205546.2396-2-sean.j.christopherson@intel.com>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
ex_resize(exarg_T *eap)
{
int n;
win_T *wp = curwin;
if (eap->addr_count > 0)
{
n = eap->line2;
for (wp = firstwin; wp->w_next != NULL && --n > 0; wp = wp->w_next)
;
}
# ifdef FEAT_GUI
need_mouse_correct = TRUE;
# endif
n = atol((char *)eap->arg);
if (cmdmod.split & WSP_VERT)
{
if (*eap->arg == '-' || *eap->arg == '+')
n += curwin->w_width;
else if (n == 0 && eap->arg[0] == NUL) /* default is very wide */
n = 9999;
win_setwidth_win((int)n, wp);
}
else
{
if (*eap->arg == '-' || *eap->arg == '+')
n += curwin->w_height;
else if (n == 0 && eap->arg[0] == NUL) /* default is very high */
n = 9999;
win_setheight_win((int)n, wp);
}
} | 0 | [
"CWE-78"
] | vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 175,041,798,568,935,400,000,000,000,000,000,000,000 | 33 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC
{
nedpool *ret;
if(!(ret=(nedpool *) nedpcalloc(0, 1, sizeof(nedpool)))) return 0;
if(!InitPool(ret, capacity, threads))
{
nedpfree(0, ret);
return 0;
}
return ret;
} | 0 | [
"CWE-119",
"CWE-787"
] | git | 34fa79a6cde56d6d428ab0d3160cb094ebad3305 | 37,991,782,231,439,560,000,000,000,000,000,000,000 | 11 | prefer memcpy to strcpy
When we already know the length of a string (e.g., because
we just malloc'd to fit it), it's nicer to use memcpy than
strcpy, as it makes it more obvious that we are not going to
overflow the buffer (because the size we pass matches the
size in the allocation).
This also eliminates calls to strcpy, which make auditing
the code base harder.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com> |
static void check_no_capabilities(const char *line, int len)
{
if (strlen(line) != len)
warning(_("ignoring capabilities after first line '%s'"),
line + strlen(line));
} | 0 | [] | git | a02ea577174ab8ed18f847cf1693f213e0b9c473 | 32,574,065,773,049,750,000,000,000,000,000,000,000 | 6 | git_connect_git(): forbid newlines in host and path
When we connect to a git:// server, we send an initial request that
looks something like:
002dgit-upload-pack repo.git\0host=example.com
If the repo path contains a newline, then it's included literally, and
we get:
002egit-upload-pack repo
.git\0host=example.com
This works fine if you really do have a newline in your repository name;
the server side uses the pktline framing to parse the string, not
newlines. However, there are many _other_ protocols in the wild that do
parse on newlines, such as HTTP. So a carefully constructed git:// URL
can actually turn into a valid HTTP request. For example:
git://localhost:1234/%0d%0a%0d%0aGET%20/%20HTTP/1.1 %0d%0aHost:localhost%0d%0a%0d%0a
becomes:
0050git-upload-pack /
GET / HTTP/1.1
Host:localhost
host=localhost:1234
on the wire. Again, this isn't a problem for a real Git server, but it
does mean that feeding a malicious URL to Git (e.g., through a
submodule) can cause it to make unexpected cross-protocol requests.
Since repository names with newlines are presumably quite rare (and
indeed, we already disallow them in git-over-http), let's just disallow
them over this protocol.
Hostnames could likewise inject a newline, but this is unlikely a
problem in practice; we'd try resolving the hostname with a newline in
it, which wouldn't work. Still, it doesn't hurt to err on the side of
caution there, since we would not expect them to work in the first
place.
The ssh and local code paths are unaffected by this patch. In both cases
we're trying to run upload-pack via a shell, and will quote the newline
so that it makes it intact. An attacker can point an ssh url at an
arbitrary port, of course, but unless there's an actual ssh server
there, we'd never get as far as sending our shell command anyway. We
_could_ similarly restrict newlines in those protocols out of caution,
but there seems little benefit to doing so.
The new test here is run alongside the git-daemon tests, which cover the
same protocol, but it shouldn't actually contact the daemon at all. In
theory we could make the test more robust by setting up an actual
repository with a newline in it (so that our clone would succeed if our
new check didn't kick in). But a repo directory with newline in it is
likely not portable across all filesystems. Likewise, we could check
git-daemon's log that it was not contacted at all, but we do not
currently record the log (and anyway, it would make the test racy with
the daemon's log write). We'll just check the client-side stderr to make
sure we hit the expected code path.
Reported-by: Harold Kim <h.kim@flatt.tech>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com> |
static void ffs_data_clear(struct ffs_data *ffs)
{
ENTER();
ffs_closed(ffs);
BUG_ON(ffs->gadget);
if (ffs->epfiles)
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
if (ffs->ffs_eventfd)
eventfd_ctx_put(ffs->ffs_eventfd);
kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
kfree(ffs->stringtabs);
} | 0 | [
"CWE-416",
"CWE-362"
] | linux | 38740a5b87d53ceb89eb2c970150f6e94e00373a | 33,517,435,986,751,307,000,000,000,000,000,000,000 | 18 | usb: gadget: f_fs: Fix use-after-free
When using asynchronous read or write operations on the USB endpoints the
issuer of the IO request is notified by calling the ki_complete() callback
of the submitted kiocb when the URB has been completed.
Calling this ki_complete() callback will free kiocb. Make sure that the
structure is no longer accessed beyond that point, otherwise undefined
behaviour might occur.
Fixes: 2e4c7553cd6f ("usb: gadget: f_fs: add aio support")
Cc: <stable@vger.kernel.org> # v3.15+
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com> |
void rtnl_unregister_all(int protocol)
{
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
kfree(rtnl_msg_handlers[protocol]);
rtnl_msg_handlers[protocol] = NULL;
} | 0 | [
"CWE-399"
] | linux-2.6 | 84d73cd3fb142bf1298a8c13fd4ca50fd2432372 | 115,454,561,935,789,420,000,000,000,000,000,000,000 | 7 | rtnl: fix info leak on RTM_GETLINK request for VF devices
Initialize the mac address buffer with 0 as the driver specific function
will probably not fill the whole buffer. In fact, all in-kernel drivers
fill only ETH_ALEN of the MAX_ADDR_LEN bytes, i.e. 6 of the 32 possible
bytes. Therefore we currently leak 26 bytes of stack memory to userland
via the netlink interface.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
_PUBLIC_ bool process_exists_by_pid(pid_t pid)
{
/* Doing kill with a non-positive pid causes messages to be
* sent to places we don't want. */
if (pid <= 0) {
return false;
}
return(kill(pid,0) == 0 || errno != ESRCH);
} | 0 | [] | samba | 8eae8d28bce2c3f6a323d3dc48ed10c2e6bb1ba5 | 231,457,938,701,604,160,000,000,000,000,000,000,000 | 9 | CVE-2013-4476: lib-util: add file_check_permissions()
Bug: https://bugzilla.samba.org/show_bug.cgi?id=10234
Signed-off-by: Björn Baumbach <bb@sernet.de>
Reviewed-by: Stefan Metzmacher <metze@samba.org> |
bool isPrefixOf(StringData first, StringData second) {
if (first.size() >= second.size()) {
return false;
}
return second.startsWith(first) && second[first.size()] == '.';
} | 0 | [
"CWE-732"
] | mongo | cd583b6c4d8aa2364f255992708b9bb54e110cf4 | 100,208,243,312,219,210,000,000,000,000,000,000,000 | 7 | SERVER-53929 Add stricter parser checks around positional projection |
static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int max_irr;
bool max_irr_updated;
WARN_ON(!vcpu->arch.apicv_active);
if (pi_test_on(&vmx->pi_desc)) {
pi_clear_on(&vmx->pi_desc);
/*
* IOMMU can write to PIR.ON, so the barrier matters even on UP.
* But on x86 this is just a compiler barrier anyway.
*/
smp_mb__after_atomic();
max_irr_updated =
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
/*
* If we are running L2 and L1 has a new pending interrupt
* which can be injected, we should re-evaluate
* what should be done with this new L1 interrupt.
* If L1 intercepts external-interrupts, we should
* exit from L2 to L1. Otherwise, interrupt should be
* delivered directly to L2.
*/
if (is_guest_mode(vcpu) && max_irr_updated) {
if (nested_exit_on_intr(vcpu))
kvm_vcpu_exiting_guest_mode(vcpu);
else
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
} else {
max_irr = kvm_lapic_find_highest_irr(vcpu);
}
vmx_hwapic_irr_update(vcpu, max_irr);
return max_irr;
} | 0 | [
"CWE-284"
] | linux | 727ba748e110b4de50d142edca9d6a9b7e6111d8 | 157,381,563,466,610,100,000,000,000,000,000,000,000 | 37 | kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: stable@vger.kernel.org
Signed-off-by: Felix Wilhelm <fwilhelm@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
static int ext3_check_descriptors(struct super_block *sb)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
int i;
ext3_debug ("Checking group descriptors");
for (i = 0; i < sbi->s_groups_count; i++) {
struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL);
ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i);
ext3_fsblk_t last_block;
if (i == sbi->s_groups_count - 1)
last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
else
last_block = first_block +
(EXT3_BLOCKS_PER_GROUP(sb) - 1);
if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
le32_to_cpu(gdp->bg_block_bitmap) > last_block)
{
ext3_error (sb, "ext3_check_descriptors",
"Block bitmap for group %d"
" not in group (block %lu)!",
i, (unsigned long)
le32_to_cpu(gdp->bg_block_bitmap));
return 0;
}
if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
{
ext3_error (sb, "ext3_check_descriptors",
"Inode bitmap for group %d"
" not in group (block %lu)!",
i, (unsigned long)
le32_to_cpu(gdp->bg_inode_bitmap));
return 0;
}
if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
last_block)
{
ext3_error (sb, "ext3_check_descriptors",
"Inode table for group %d"
" not in group (block %lu)!",
i, (unsigned long)
le32_to_cpu(gdp->bg_inode_table));
return 0;
}
}
sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb));
sbi->s_es->s_free_inodes_count=cpu_to_le32(ext3_count_free_inodes(sb));
return 1;
} | 0 | [
"CWE-20"
] | linux | 8d0c2d10dd72c5292eda7a06231056a4c972e4cc | 273,081,817,640,403,220,000,000,000,000,000,000,000 | 55 | ext3: Fix format string issues
ext3_msg() takes the printk prefix as the second parameter and the
format string as the third parameter. Two callers of ext3_msg omit the
prefix and pass the format string as the second parameter and the first
parameter to the format string as the third parameter. In both cases
this string comes from an arbitrary source. Which means the string may
contain format string characters, which will
lead to undefined and potentially harmful behavior.
The issue was introduced in commit 4cf46b67eb("ext3: Unify log messages
in ext3") and is fixed by this patch.
CC: stable@vger.kernel.org
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Jan Kara <jack@suse.cz> |
static bool svm_pku_supported(void)
{
return false;
} | 0 | [
"CWE-401"
] | linux | d80b64ff297e40c2b6f7d7abc1b3eba70d22a068 | 296,308,897,821,654,300,000,000,000,000,000,000,000 | 4 | KVM: SVM: Fix potential memory leak in svm_cpu_init()
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
static diy_fp_t double2diy_fp(double d)
{
uint64_t d64 = double_to_uint64(d);
int biased_e = (d64 & DP_EXPONENT_MASK) >> DP_SIGNIFICAND_SIZE;
uint64_t significand = (d64 & DP_SIGNIFICAND_MASK);
diy_fp_t res;
if (biased_e != 0) {
res.f = significand + DP_HIDDEN_BIT;
res.e = biased_e - DP_EXPONENT_BIAS;
} else {
res.f = significand;
res.e = DP_MIN_EXPONENT + 1;
}
return res;
} | 0 | [
"CWE-190"
] | mujs | 25821e6d74fab5fcc200fe5e818362e03e114428 | 186,218,498,492,024,600,000,000,000,000,000,000,000 | 15 | Fix 698920: Guard jsdtoa from integer overflow wreaking havoc. |
wkbConvPointToShape(wkbObj *w, shapeObj *shape)
{
int type;
lineObj line;
/*endian = */wkbReadChar(w);
type = wkbTypeMap(w,wkbReadInt(w));
if( type != WKB_POINT ) return MS_FAILURE;
if( ! (shape->type == MS_SHAPE_POINT) ) return MS_FAILURE;
line.numpoints = 1;
line.point = msSmallMalloc(sizeof(pointObj));
line.point[0] = wkbReadPoint(w);
msAddLineDirectly(shape, &line);
return MS_SUCCESS;
} | 0 | [
"CWE-89"
] | mapserver | 3a10f6b829297dae63492a8c63385044bc6953ed | 7,774,104,320,201,004,000,000,000,000,000,000,000 | 17 | Fix potential SQL Injection with postgis TIME filters (#4834) |
static void _ion_buffer_destroy(struct kref *kref)
{
struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_heap *heap = buffer->heap;
struct ion_device *dev = buffer->dev;
mutex_lock(&dev->buffer_lock);
rb_erase(&buffer->node, &dev->buffers);
mutex_unlock(&dev->buffer_lock);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_freelist_add(heap, buffer);
else
ion_buffer_destroy(buffer);
} | 0 | [
"CWE-416",
"CWE-284"
] | linux | 9590232bb4f4cc824f3425a6e1349afbe6d6d2b7 | 207,497,474,684,472,700,000,000,000,000,000,000,000 | 15 | staging/android/ion : fix a race condition in the ion driver
There is a use-after-free problem in the ion driver.
This is caused by a race condition in the ion_ioctl()
function.
A handle has ref count of 1 and two tasks on different
cpus calls ION_IOC_FREE simultaneously.
cpu 0 cpu 1
-------------------------------------------------------
ion_handle_get_by_id()
(ref == 2)
ion_handle_get_by_id()
(ref == 3)
ion_free()
(ref == 2)
ion_handle_put()
(ref == 1)
ion_free()
(ref == 0 so ion_handle_destroy() is
called
and the handle is freed.)
ion_handle_put() is called and it
decreases the slub's next free pointer
The problem is detected as an unaligned access in the
spin lock functions since it uses load exclusive
instruction. In some cases it corrupts the slub's
free pointer which causes a mis-aligned access to the
next free pointer.(kmalloc returns a pointer like
ffffc0745b4580aa). And it causes lots of other
hard-to-debug problems.
This symptom is caused since the first member in the
ion_handle structure is the reference count and the
ion driver decrements the reference after it has been
freed.
To fix this problem client->lock mutex is extended
to protect all the codes that uses the handle.
Signed-off-by: Eun Taik Lee <eun.taik.lee@samsung.com>
Reviewed-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
T toNumber(const std::vector<char> &data, bool mostSignificantByteFirst)
{
T sum = 0;
if(data.size() <= 0) {
debug("ByteVectorMirror::toNumber<T>() -- data is empty, returning 0");
return sum;
}
uint size = sizeof(T);
uint last = data.size() > size ? size - 1 : data.size() - 1;
for(uint i = 0; i <= last; i++)
sum |= (T) uchar(data[i]) << ((mostSignificantByteFirst ? last - i : i) * 8);
return sum;
} | 0 | [
"CWE-189"
] | taglib | dcdf4fd954e3213c355746fa15b7480461972308 | 268,102,497,216,937,100,000,000,000,000,000,000,000 | 17 | Avoid uint overflow in case the length + index is over UINT_MAX |
static RzList *recurse_bb(RzCore *core, ut64 addr, RzAnalysisBlock *dest) {
RzAnalysisBlock *bb = rz_analysis_find_most_relevant_block_in(core->analysis, addr);
if (bb == dest) {
eprintf("path found!");
return NULL;
}
return recurse(core, bb, dest);
} | 0 | [
"CWE-703"
] | rizin | 6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939 | 315,582,691,721,004,440,000,000,000,000,000,000,000 | 8 | Initialize retctx,ctx before freeing the inner elements
In rz_core_analysis_type_match retctx structure was initialized on the
stack only after a "goto out_function", where a field of that structure
was freed. When the goto path is taken, the field is not properly
initialized and it cause cause a crash of Rizin or have other effects.
Fixes: CVE-2021-4022 |
void CWebServer::Cmd_GetSerialDevices(WebEmSession & session, const request& req, Json::Value &root)
{
root["status"] = "OK";
root["title"] = "GetSerialDevices";
bool bUseDirectPath = false;
std::vector<std::string> serialports = GetSerialPorts(bUseDirectPath);
int ii = 0;
for (const auto & itt : serialports)
{
root["result"][ii]["name"] = itt;
root["result"][ii]["value"] = ii;
ii++;
}
} | 0 | [
"CWE-89"
] | domoticz | ee70db46f81afa582c96b887b73bcd2a86feda00 | 263,846,653,662,275,650,000,000,000,000,000,000,000 | 15 | Fixed possible SQL Injection Vulnerability (Thanks to Fabio Carretto!) |
static void FVMenuShowDependentRefs(GWindow gw, struct gmenuitem *UNUSED(mi), GEvent *UNUSED(e)) {
FontView *fv = (FontView *) GDrawGetUserData(gw);
int pos = FVAnyCharSelected(fv);
SplineChar *sc;
if ( pos<0 || fv->b.map->map[pos]==-1 )
return;
sc = fv->b.sf->glyphs[fv->b.map->map[pos]];
if ( sc==NULL || sc->dependents==NULL )
return;
SCRefBy(sc);
} | 0 | [
"CWE-119",
"CWE-787"
] | fontforge | 626f751752875a0ddd74b9e217b6f4828713573c | 11,520,036,835,393,484,000,000,000,000,000,000,000 | 12 | Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846. |
zwcheck(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
int code = access_check(i_ctx_p, a_write, false);
if (code >= 0)
make_bool(op, code), code = 0;
return code;
} | 0 | [] | ghostpdl | 0edd3d6c634a577db261615a9dc2719bca7f6e01 | 11,986,994,120,118,004,000,000,000,000,000,000,000 | 9 | Bug 699659: Don't just assume an object is a t_(a)struct |
static void get_type_str(xmlNodePtr node, const char* ns, const char* type, smart_str* ret)
{
TSRMLS_FETCH();
if (ns) {
xmlNsPtr xmlns;
if (SOAP_GLOBAL(soap_version) == SOAP_1_2 &&
strcmp(ns,SOAP_1_1_ENC_NAMESPACE) == 0) {
ns = SOAP_1_2_ENC_NAMESPACE;
} else if (SOAP_GLOBAL(soap_version) == SOAP_1_1 &&
strcmp(ns,SOAP_1_2_ENC_NAMESPACE) == 0) {
ns = SOAP_1_1_ENC_NAMESPACE;
}
xmlns = encode_add_ns(node, ns);
smart_str_appends(ret, (char*)xmlns->prefix);
smart_str_appendc(ret, ':');
}
smart_str_appendl(ret, type, strlen(type));
smart_str_0(ret);
} | 0 | [
"CWE-19"
] | php-src | c8eaca013a3922e8383def6158ece2b63f6ec483 | 214,502,043,462,525,900,000,000,000,000,000,000,000 | 20 | Added type checks |
TEST_F(QueryPlannerTest, LockstepOrEnumerationSanityCheckTwoChildrenOneIndexEach) {
params.options =
QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::ENUMERATE_OR_CHILDREN_LOCKSTEP;
addIndex(BSON("a" << 1 << "b" << 1));
addIndex(BSON("a" << 1 << "c" << 1));
runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1, $or: [{b: 1}, {c: 2}]}}"));
assertNumSolutions(3U);
assertSolutionExists(
"{fetch: {filter: null, node: {or: {nodes: [{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: "
"{pattern: {a: 1, c: 1}}}]}}}}");
assertSolutionExists(
"{fetch: {filter: {$or: [{b: {$eq: 1}}, {c: {$eq: 2}}]}, node: {ixscan: {pattern: {a: 1, "
"b: 1}}}}}}}");
assertSolutionExists(
"{fetch: {filter: {$or: [{b: {$eq: 1}}, {c: {$eq: 2}}]}, node: {ixscan: {pattern: {a: 1, "
"c: 1}}}}}}}");
} | 0 | [] | mongo | 64095239f41e9f3841d8be9088347db56d35c891 | 136,357,904,830,911,580,000,000,000,000,000,000,000 | 20 | SERVER-51083 Reject invalid UTF-8 from $regex match expressions |
join_read_const(JOIN_TAB *tab)
{
int error;
TABLE *table= tab->table;
if (table->status & STATUS_GARBAGE) // If first read
{
table->status= 0;
if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
error=HA_ERR_KEY_NOT_FOUND;
else
{
error= table->file->ha_index_read_idx_map(table->record[0],tab->ref.key,
(uchar*) tab->ref.key_buff,
make_prev_keypart_map(tab->ref.key_parts),
HA_READ_KEY_EXACT);
}
if (error)
{
table->status= STATUS_NOT_FOUND;
mark_as_null_row(tab->table);
empty_record(table);
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1;
}
store_record(table,record[1]);
}
else if (!(table->status & ~STATUS_NULL_ROW)) // Only happens with left join
{
table->status=0;
restore_record(table,record[1]); // restore old record
}
table->null_row=0;
return table->status ? -1 : 0;
} | 0 | [
"CWE-89"
] | server | 5ba77222e9fe7af8ff403816b5338b18b342053c | 41,229,219,278,345,970,000,000,000,000,000,000,000 | 35 | MDEV-21028 Server crashes in Query_arena::set_query_arena upon SELECT from view
if the view has algorithm=temptable it is not updatable,
so DEFAULT() for its fields is meaningless,
and thus it's NULL or 0/'' for NOT NULL columns. |
static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
struct flowi4 fl4;
__u8 tos, ttl;
__be16 df = 0;
__be16 sport;
int err;
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
if (IS_ERR(rt))
return PTR_ERR(rt);
err = skb_tunnel_check_pmtu(skb, &rt->dst,
GENEVE_IPV4_HLEN + info->options_len,
netif_is_any_bridge_port(dev));
if (err < 0) {
dst_release(&rt->dst);
return err;
} else if (err) {
struct ip_tunnel_info *info;
info = skb_tunnel_info(skb);
if (info) {
info->key.u.ipv4.dst = fl4.saddr;
info->key.u.ipv4.src = fl4.daddr;
}
if (!pskb_may_pull(skb, ETH_HLEN)) {
dst_release(&rt->dst);
return -EINVAL;
}
skb->protocol = eth_type_trans(skb, geneve->dev);
netif_rx(skb);
dst_release(&rt->dst);
return -EMSGSIZE;
}
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->cfg.collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
else
ttl = key->ttl;
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
if (geneve->cfg.df == GENEVE_DF_SET) {
df = htons(IP_DF);
} else if (geneve->cfg.df == GENEVE_DF_INHERIT) {
struct ethhdr *eth = eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_IPV6) {
df = htons(IP_DF);
} else if (ntohs(eth->h_proto) == ETH_P_IP) {
struct iphdr *iph = ip_hdr(skb);
if (iph->frag_off & htons(IP_DF))
df = htons(IP_DF);
}
}
}
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
if (unlikely(err))
return err;
udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
tos, ttl, df, sport, geneve->cfg.info.key.tp_dst,
!net_eq(geneve->net, dev_net(geneve->dev)),
!(info->key.tun_flags & TUNNEL_CSUM));
return 0;
} | 1 | [
"CWE-319"
] | net | 34beb21594519ce64a55a498c2fe7d567bc1ca20 | 178,050,373,547,606,380,000,000,000,000,000,000,000 | 84 | geneve: add transport ports in route lookup for geneve
This patch adds transport ports information for route lookup so that
IPsec can select Geneve tunnel traffic to do encryption. This is
needed for OVS/OVN IPsec with encrypted Geneve tunnels.
This can be tested by configuring a host-host VPN using an IKE
daemon and specifying port numbers. For example, for an
Openswan-type configuration, the following parameters should be
configured on both hosts and IPsec set up as-per normal:
$ cat /etc/ipsec.conf
conn in
...
left=$IP1
right=$IP2
...
leftprotoport=udp/6081
rightprotoport=udp
...
conn out
...
left=$IP1
right=$IP2
...
leftprotoport=udp
rightprotoport=udp/6081
...
The tunnel can then be setup using "ip" on both hosts (but
changing the relevant IP addresses):
$ ip link add tun type geneve id 1000 remote $IP2
$ ip addr add 192.168.0.1/24 dev tun
$ ip link set tun up
This can then be tested by pinging from $IP1:
$ ping 192.168.0.2
Without this patch the traffic is unencrypted on the wire.
Fixes: 2d07dc79fe04 ("geneve: add initial netdev driver for GENEVE tunnels")
Signed-off-by: Qiuyu Xiao <qiuyu.xiao.qyx@gmail.com>
Signed-off-by: Mark Gray <mark.d.gray@redhat.com>
Reviewed-by: Greg Rose <gvrose8192@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
static inline int sctp_peer_needs_update(struct sctp_association *asoc)
{
switch (asoc->state) {
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_RECEIVED:
case SCTP_STATE_SHUTDOWN_SENT:
if ((asoc->rwnd > asoc->a_rwnd) &&
((asoc->rwnd - asoc->a_rwnd) >=
min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pathmtu)))
return 1;
break;
default:
break;
}
return 0;
} | 0 | [
"CWE-287"
] | linux-2.6 | add52379dde2e5300e2d574b172e62c6cf43b3d3 | 76,248,221,053,476,510,000,000,000,000,000,000,000 | 17 | sctp: Fix oops when INIT-ACK indicates that peer doesn't support AUTH
If INIT-ACK is received with SupportedExtensions parameter which
indicates that the peer does not support AUTH, the packet will be
silently ignore, and sctp_process_init() do cleanup all of the
transports in the association.
When T1-Init timer is expires, OOPS happen while we try to choose
a different init transport.
The solution is to only clean up the non-active transports, i.e
the ones that the peer added. However, that introduces a problem
with sctp_connectx(), because we don't mark the proper state for
the transports provided by the user. So, we'll simply mark
user-provided transports as ACTIVE. That will allow INIT
retransmissions to work properly in the sctp_connectx() context
and prevent the crash.
Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
size_t Item_sum::ram_limitation(THD *thd)
{
return (size_t)MY_MIN(thd->variables.tmp_memory_table_size,
thd->variables.max_heap_table_size);
} | 0 | [
"CWE-120"
] | server | eca207c46293bc72dd8d0d5622153fab4d3fccf1 | 332,303,623,663,751,730,000,000,000,000,000,000,000 | 5 | MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix. |
xsltParseRemoveWhitespace(xmlNodePtr node)
{
if ((node == NULL) || (node->children == NULL))
return(0);
else {
xmlNodePtr delNode = NULL, child = node->children;
do {
if (delNode) {
xmlUnlinkNode(delNode);
xmlFreeNode(delNode);
delNode = NULL;
}
if (((child->type == XML_TEXT_NODE) ||
(child->type == XML_CDATA_SECTION_NODE)) &&
(IS_BLANK_NODE(child)))
delNode = child;
child = child->next;
} while (child != NULL);
if (delNode) {
xmlUnlinkNode(delNode);
xmlFreeNode(delNode);
delNode = NULL;
}
}
return(0);
} | 0 | [] | libxslt | 7089a62b8f133b42a2981cf1f920a8b3fe9a8caa | 61,349,841,717,053,680,000,000,000,000,000,000,000 | 27 | Crash compiling stylesheet with DTD
* libxslt/xslt.c: when a stylesheet embbeds a DTD the compilation
process could get seriously wrong |
xmlBufAttrSerializeTxtContent(xmlBufPtr buf, xmlDocPtr doc,
xmlAttrPtr attr, const xmlChar * string)
{
xmlChar *base, *cur;
if (string == NULL)
return;
base = cur = (xmlChar *) string;
while (*cur != 0) {
if (*cur == '\n') {
if (base != cur)
xmlBufAdd(buf, base, cur - base);
xmlBufAdd(buf, BAD_CAST " ", 5);
cur++;
base = cur;
} else if (*cur == '\r') {
if (base != cur)
xmlBufAdd(buf, base, cur - base);
xmlBufAdd(buf, BAD_CAST " ", 5);
cur++;
base = cur;
} else if (*cur == '\t') {
if (base != cur)
xmlBufAdd(buf, base, cur - base);
xmlBufAdd(buf, BAD_CAST "	", 4);
cur++;
base = cur;
} else if (*cur == '"') {
if (base != cur)
xmlBufAdd(buf, base, cur - base);
xmlBufAdd(buf, BAD_CAST """, 6);
cur++;
base = cur;
} else if (*cur == '<') {
if (base != cur)
xmlBufAdd(buf, base, cur - base);
xmlBufAdd(buf, BAD_CAST "<", 4);
cur++;
base = cur;
} else if (*cur == '>') {
if (base != cur)
xmlBufAdd(buf, base, cur - base);
xmlBufAdd(buf, BAD_CAST ">", 4);
cur++;
base = cur;
} else if (*cur == '&') {
if (base != cur)
xmlBufAdd(buf, base, cur - base);
xmlBufAdd(buf, BAD_CAST "&", 5);
cur++;
base = cur;
} else if ((*cur >= 0x80) && (cur[1] != 0) &&
((doc == NULL) || (doc->encoding == NULL))) {
/*
* We assume we have UTF-8 content.
*/
unsigned char tmp[12];
int val = 0, l = 1;
if (base != cur)
xmlBufAdd(buf, base, cur - base);
if (*cur < 0xC0) {
xmlSaveErr(XML_SAVE_NOT_UTF8, (xmlNodePtr) attr, NULL);
if (doc != NULL)
doc->encoding = xmlStrdup(BAD_CAST "ISO-8859-1");
xmlSerializeHexCharRef(tmp, *cur);
xmlBufAdd(buf, (xmlChar *) tmp, -1);
cur++;
base = cur;
continue;
} else if (*cur < 0xE0) {
val = (cur[0]) & 0x1F;
val <<= 6;
val |= (cur[1]) & 0x3F;
l = 2;
} else if ((*cur < 0xF0) && (cur [2] != 0)) {
val = (cur[0]) & 0x0F;
val <<= 6;
val |= (cur[1]) & 0x3F;
val <<= 6;
val |= (cur[2]) & 0x3F;
l = 3;
} else if ((*cur < 0xF8) && (cur [2] != 0) && (cur[3] != 0)) {
val = (cur[0]) & 0x07;
val <<= 6;
val |= (cur[1]) & 0x3F;
val <<= 6;
val |= (cur[2]) & 0x3F;
val <<= 6;
val |= (cur[3]) & 0x3F;
l = 4;
}
if ((l == 1) || (!IS_CHAR(val))) {
xmlSaveErr(XML_SAVE_CHAR_INVALID, (xmlNodePtr) attr, NULL);
if (doc != NULL)
doc->encoding = xmlStrdup(BAD_CAST "ISO-8859-1");
xmlSerializeHexCharRef(tmp, *cur);
xmlBufAdd(buf, (xmlChar *) tmp, -1);
cur++;
base = cur;
continue;
}
/*
* We could do multiple things here. Just save
* as a char ref
*/
xmlSerializeHexCharRef(tmp, val);
xmlBufAdd(buf, (xmlChar *) tmp, -1);
cur += l;
base = cur;
} else {
cur++;
}
}
if (base != cur)
xmlBufAdd(buf, base, cur - base);
} | 0 | [
"CWE-502"
] | libxml2 | c97750d11bb8b6f3303e7131fe526a61ac65bcfd | 230,895,568,099,009,900,000,000,000,000,000,000,000 | 118 | Avoid an out of bound access when serializing malformed strings
For https://bugzilla.gnome.org/show_bug.cgi?id=766414
* xmlsave.c: xmlBufAttrSerializeTxtContent() if an attribute value
is not UTF-8 be more careful when serializing it as we may do an
out of bound access as a result. |
bool Inflator::DecodeBody()
{
bool blockEnd = false;
switch (m_blockType)
{
case 0: // stored
CRYPTOPP_ASSERT(m_reader.BitsBuffered() == 0);
while (!m_inQueue.IsEmpty() && !blockEnd)
{
size_t size;
const byte *block = m_inQueue.Spy(size);
size = UnsignedMin(m_storedLen, size);
CRYPTOPP_ASSERT(size <= 0xffff);
OutputString(block, size);
m_inQueue.Skip(size);
m_storedLen = m_storedLen - (word16)size;
if (m_storedLen == 0)
blockEnd = true;
}
break;
case 1: // fixed codes
case 2: // dynamic codes
static const unsigned int lengthStarts[] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258};
static const unsigned int lengthExtraBits[] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0};
static const unsigned int distanceStarts[] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
8193, 12289, 16385, 24577};
static const unsigned int distanceExtraBits[] = {
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
12, 12, 13, 13};
const HuffmanDecoder& literalDecoder = GetLiteralDecoder();
const HuffmanDecoder& distanceDecoder = GetDistanceDecoder();
switch (m_nextDecode)
{
case LITERAL:
while (true)
{
if (!literalDecoder.Decode(m_reader, m_literal))
{
m_nextDecode = LITERAL;
break;
}
if (m_literal < 256)
OutputByte((byte)m_literal);
else if (m_literal == 256) // end of block
{
blockEnd = true;
break;
}
else
{
if (m_literal > 285)
throw BadBlockErr();
unsigned int bits;
case LENGTH_BITS:
bits = lengthExtraBits[m_literal-257];
if (!m_reader.FillBuffer(bits))
{
m_nextDecode = LENGTH_BITS;
break;
}
m_literal = m_reader.GetBits(bits) + lengthStarts[m_literal-257];
case DISTANCE:
if (!distanceDecoder.Decode(m_reader, m_distance))
{
m_nextDecode = DISTANCE;
break;
}
case DISTANCE_BITS:
// TODO: this surfaced during fuzzing. What do we do???
CRYPTOPP_ASSERT(m_distance < COUNTOF(distanceExtraBits));
if (m_distance >= COUNTOF(distanceExtraBits))
throw BadDistanceErr();
bits = distanceExtraBits[m_distance];
if (!m_reader.FillBuffer(bits))
{
m_nextDecode = DISTANCE_BITS;
break;
}
// TODO: this surfaced during fuzzing. What do we do???
CRYPTOPP_ASSERT(m_distance < COUNTOF(distanceStarts));
if (m_distance >= COUNTOF(distanceStarts))
throw BadDistanceErr();
m_distance = m_reader.GetBits(bits) + distanceStarts[m_distance];
OutputPast(m_literal, m_distance);
}
}
break;
default:
CRYPTOPP_ASSERT(0);
}
}
if (blockEnd)
{
if (m_eof)
{
FlushOutput();
m_reader.SkipBits(m_reader.BitsBuffered()%8);
if (m_reader.BitsBuffered())
{
// undo too much lookahead
SecBlockWithHint<byte, 4> buffer(m_reader.BitsBuffered() / 8);
for (unsigned int i=0; i<buffer.size(); i++)
buffer[i] = (byte)m_reader.GetBits(8);
m_inQueue.Unget(buffer, buffer.size());
}
m_state = POST_STREAM;
}
else
m_state = WAIT_HEADER;
}
return blockEnd;
}
| 0 | [
"CWE-190",
"CWE-125"
] | cryptopp | 07dbcc3d9644b18e05c1776db2a57fe04d780965 | 198,899,092,927,718,220,000,000,000,000,000,000,000 | 122 | Add Inflator::BadDistanceErr exception (Issue 414)
The improved validation and excpetion clears the Address Sanitizer and Undefined Behavior Sanitizer findings |
void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
{
struct rdma_id_private *id_priv;
struct cma_multicast *mc;
id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irq(&id_priv->lock);
list_for_each_entry(mc, &id_priv->mc_list, list) {
if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
list_del(&mc->list);
spin_unlock_irq(&id_priv->lock);
if (id->qp)
ib_detach_mcast(id->qp,
&mc->multicast.ib->rec.mgid,
be16_to_cpu(mc->multicast.ib->rec.mlid));
if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
switch (rdma_port_get_link_layer(id->device, id->port_num)) {
case IB_LINK_LAYER_INFINIBAND:
ib_sa_free_multicast(mc->multicast.ib);
kfree(mc);
break;
case IB_LINK_LAYER_ETHERNET:
kref_put(&mc->mcref, release_mc);
break;
default:
break;
}
}
return;
}
}
spin_unlock_irq(&id_priv->lock);
} | 0 | [
"CWE-20"
] | linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | 15,464,750,188,356,183,000,000,000,000,000,000,000 | 34 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <monis@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com> |
static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
{
int rem, err = -EINVAL;
struct nlattr *vf;
const struct net_device_ops *ops = dev->netdev_ops;
nla_for_each_nested(vf, attr, rem) {
switch (nla_type(vf)) {
case IFLA_VF_MAC: {
struct ifla_vf_mac *ivm;
ivm = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_mac)
err = ops->ndo_set_vf_mac(dev, ivm->vf,
ivm->mac);
break;
}
case IFLA_VF_VLAN: {
struct ifla_vf_vlan *ivv;
ivv = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf,
ivv->vlan,
ivv->qos);
break;
}
case IFLA_VF_TX_RATE: {
struct ifla_vf_tx_rate *ivt;
ivt = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_tx_rate)
err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
ivt->rate);
break;
}
case IFLA_VF_SPOOFCHK: {
struct ifla_vf_spoofchk *ivs;
ivs = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_spoofchk)
err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
ivs->setting);
break;
}
default:
err = -EINVAL;
break;
}
if (err)
break;
}
return err;
} | 0 | [
"CWE-399"
] | linux-2.6 | 84d73cd3fb142bf1298a8c13fd4ca50fd2432372 | 126,629,266,471,190,440,000,000,000,000,000,000,000 | 54 | rtnl: fix info leak on RTM_GETLINK request for VF devices
Initialize the mac address buffer with 0 as the driver specific function
will probably not fill the whole buffer. In fact, all in-kernel drivers
fill only ETH_ALEN of the MAX_ADDR_LEN bytes, i.e. 6 of the 32 possible
bytes. Therefore we currently leak 26 bytes of stack memory to userland
via the netlink interface.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
MODRET set_useralias(cmd_rec *cmd) {
config_rec *c = NULL;
CHECK_ARGS(cmd, 2);
CHECK_CONF(cmd, CONF_ROOT|CONF_VIRTUAL|CONF_GLOBAL|CONF_ANON);
/* Make sure that the given names differ. */
if (strcmp(cmd->argv[1], cmd->argv[2]) == 0)
CONF_ERROR(cmd, "alias and real user names must differ");
c = add_config_param_str(cmd->argv[0], 2, cmd->argv[1], cmd->argv[2]);
/* Note: only merge this directive down if it is not appearing in an
* <Anonymous> context.
*/
if (!check_context(cmd, CONF_ANON)) {
c->flags |= CF_MERGEDOWN_MULTI;
}
return PR_HANDLED(cmd);
} | 0 | [
"CWE-59",
"CWE-61"
] | proftpd | ecff21e0d0e84f35c299ef91d7fda088e516d4ed | 300,689,511,580,243,800,000,000,000,000,000,000,000 | 21 | Backporting recursive handling of DefaultRoot path, when AllowChrootSymlinks
is off, to 1.3.5 branch. |
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{
struct file *filp;
struct inotify_device *dev;
int ret, fput_needed;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
if (unlikely(filp->f_op != &inotify_fops)) {
ret = -EINVAL;
goto out;
}
dev = filp->private_data;
/* we free our watch data when we get IN_IGNORED */
ret = inotify_rm_wd(dev->ih, wd);
out:
fput_light(filp, fput_needed);
return ret;
} | 0 | [
"CWE-399"
] | linux-2.6 | 3632dee2f8b8a9720329f29eeaa4ec4669a3aff8 | 195,646,196,696,232,250,000,000,000,000,000,000,000 | 25 | inotify: clean up inotify_read and fix locking problems
If userspace supplies an invalid pointer to a read() of an inotify
instance, the inotify device's event list mutex is unlocked twice.
This causes an unbalance which effectively leaves the data structure
unprotected, and we can trigger oopses by accessing the inotify
instance from different tasks concurrently.
The best fix (contributed largely by Linus) is a total rewrite
of the function in question:
On Thu, Jan 22, 2009 at 7:05 AM, Linus Torvalds wrote:
> The thing to notice is that:
>
> - locking is done in just one place, and there is no question about it
> not having an unlock.
>
> - that whole double-while(1)-loop thing is gone.
>
> - use multiple functions to make nesting and error handling sane
>
> - do error testing after doing the things you always need to do, ie do
> this:
>
> mutex_lock(..)
> ret = function_call();
> mutex_unlock(..)
>
> .. test ret here ..
>
> instead of doing conditional exits with unlocking or freeing.
>
> So if the code is written in this way, it may still be buggy, but at least
> it's not buggy because of subtle "forgot to unlock" or "forgot to free"
> issues.
>
> This _always_ unlocks if it locked, and it always frees if it got a
> non-error kevent.
Cc: John McCutchan <ttb@tentacle.dhs.org>
Cc: Robert Love <rlove@google.com>
Cc: <stable@kernel.org>
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
TEST_F(ConnectionManagerUtilityTest, MtlsSanitizeSetClientCert) {
auto ssl = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();
ON_CALL(*ssl, peerCertificatePresented()).WillByDefault(Return(true));
const std::vector<std::string> local_uri_sans{"test://foo.com/be"};
EXPECT_CALL(*ssl, uriSanLocalCertificate()).WillOnce(Return(local_uri_sans));
std::string expected_sha("abcdefg");
EXPECT_CALL(*ssl, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha));
std::string peer_subject("/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=test.lyft.com");
EXPECT_CALL(*ssl, subjectPeerCertificate()).WillOnce(ReturnRef(peer_subject));
const std::vector<std::string> peer_uri_sans{"test://foo.com/fe"};
EXPECT_CALL(*ssl, uriSanPeerCertificate()).WillRepeatedly(Return(peer_uri_sans));
std::string expected_pem("abcde=");
EXPECT_CALL(*ssl, urlEncodedPemEncodedPeerCertificate()).WillOnce(ReturnRef(expected_pem));
std::string expected_chain_pem(expected_pem + "lmnop=");
EXPECT_CALL(*ssl, urlEncodedPemEncodedPeerCertificateChain())
.WillOnce(ReturnRef(expected_chain_pem));
ON_CALL(connection_, ssl()).WillByDefault(Return(ssl));
ON_CALL(config_, forwardClientCert())
.WillByDefault(Return(Http::ForwardClientCertType::SanitizeSet));
std::vector<Http::ClientCertDetailsType> details = std::vector<Http::ClientCertDetailsType>();
details.push_back(Http::ClientCertDetailsType::Subject);
details.push_back(Http::ClientCertDetailsType::URI);
details.push_back(Http::ClientCertDetailsType::Cert);
details.push_back(Http::ClientCertDetailsType::Chain);
ON_CALL(config_, setCurrentClientCertDetails()).WillByDefault(ReturnRef(details));
TestRequestHeaderMapImpl headers{
{"x-forwarded-client-cert", "By=test://foo.com/fe;URI=test://bar.com/be"}};
EXPECT_EQ((MutateRequestRet{"10.0.0.3:50000", false, Tracing::Reason::NotTraceable}),
callMutateRequestHeaders(headers, Protocol::Http2));
EXPECT_TRUE(headers.has("x-forwarded-client-cert"));
EXPECT_EQ("By=test://foo.com/be;Hash=abcdefg;Subject=\"/C=US/ST=CA/L=San "
"Francisco/OU=Lyft/CN=test.lyft.com\";URI=test://foo.com/"
"fe;Cert=\"abcde=\";Chain=\"abcde=lmnop=\"",
headers.get_("x-forwarded-client-cert"));
} | 0 | [
"CWE-22"
] | envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 21,248,693,440,533,375,000,000,000,000,000,000,000 | 36 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <yavlasov@google.com> |
static void agent_connect(UdscsConnection *conn)
{
struct agent_data *agent_data;
agent_data = g_new0(struct agent_data, 1);
GError *err = NULL;
gint pid;
if (session_info) {
pid = vdagent_connection_get_peer_pid(VDAGENT_CONNECTION(conn), &err);
if (err || pid <= 0) {
static const char msg[] = "Could not get peer PID, disconnecting new client";
if (err) {
syslog(LOG_ERR, "%s: %s", msg, err->message);
g_error_free(err);
} else {
syslog(LOG_ERR, "%s", msg);
}
agent_data_destroy(agent_data);
udscs_server_destroy_connection(server, conn);
return;
}
agent_data->session = session_info_session_for_pid(session_info, pid);
}
g_object_set_data_full(G_OBJECT(conn), "agent_data", agent_data,
(GDestroyNotify) agent_data_destroy);
udscs_write(conn, VDAGENTD_VERSION, 0, 0,
(uint8_t *)VERSION, strlen(VERSION) + 1);
update_active_session_connection(conn);
if (device_info) {
forward_data_to_session_agent(VDAGENTD_GRAPHICS_DEVICE_INFO,
(uint8_t *) device_info, device_info_size);
}
} | 1 | [
"CWE-362"
] | spice-vd_agent | 51c415df82a52e9ec033225783c77df95f387891 | 331,940,420,441,421,550,000,000,000,000,000,000,000 | 36 | Avoids user session hijacking
Avoids user hijacking sessions by reusing PID.
In theory an attacker could:
- open a connection to the daemon;
- fork and exit the process but keep the file descriptor open
(inheriting or duplicating it in forked process);
- force OS to recycle the initial PID, by creating many short lived
processes.
Daemon would detect the old PID as having the new session.
Check the user to avoid such replacements.
This issue was reported by SUSE security team.
Signed-off-by: Frediano Ziglio <freddy77@gmail.com>
Acked-by: Uri Lublin <uril@redhat.com> |
static OPJ_BOOL opj_j2k_write_SQcd_SQcc(opj_j2k_t *p_j2k,
OPJ_UINT32 p_tile_no,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_header_size,
struct opj_event_mgr * p_manager)
{
OPJ_UINT32 l_header_size;
OPJ_UINT32 l_band_no, l_num_bands;
OPJ_UINT32 l_expn, l_mant;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_tccp_t *l_tccp = 00;
/* preconditions */
assert(p_j2k != 00);
assert(p_header_size != 00);
assert(p_manager != 00);
assert(p_data != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_tile_no];
l_tccp = &l_tcp->tccps[p_comp_no];
/* preconditions again */
assert(p_tile_no < l_cp->tw * l_cp->th);
assert(p_comp_no < p_j2k->m_private_image->numcomps);
l_num_bands = (l_tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) ? 1 :
(l_tccp->numresolutions * 3 - 2);
if (l_tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) {
l_header_size = 1 + l_num_bands;
if (*p_header_size < l_header_size) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing SQcd SQcc element\n");
return OPJ_FALSE;
}
opj_write_bytes(p_data, l_tccp->qntsty + (l_tccp->numgbits << 5),
1); /* Sqcx */
++p_data;
for (l_band_no = 0; l_band_no < l_num_bands; ++l_band_no) {
l_expn = (OPJ_UINT32)l_tccp->stepsizes[l_band_no].expn;
opj_write_bytes(p_data, l_expn << 3, 1); /* SPqcx_i */
++p_data;
}
} else {
l_header_size = 1 + 2 * l_num_bands;
if (*p_header_size < l_header_size) {
opj_event_msg(p_manager, EVT_ERROR, "Error writing SQcd SQcc element\n");
return OPJ_FALSE;
}
opj_write_bytes(p_data, l_tccp->qntsty + (l_tccp->numgbits << 5),
1); /* Sqcx */
++p_data;
for (l_band_no = 0; l_band_no < l_num_bands; ++l_band_no) {
l_expn = (OPJ_UINT32)l_tccp->stepsizes[l_band_no].expn;
l_mant = (OPJ_UINT32)l_tccp->stepsizes[l_band_no].mant;
opj_write_bytes(p_data, (l_expn << 11) + l_mant, 2); /* SPqcx_i */
p_data += 2;
}
}
*p_header_size = *p_header_size - l_header_size;
return OPJ_TRUE;
} | 0 | [
"CWE-416",
"CWE-787"
] | openjpeg | 4241ae6fbbf1de9658764a80944dc8108f2b4154 | 308,650,657,929,155,940,000,000,000,000,000,000,000 | 74 | Fix assertion in debug mode / heap-based buffer overflow in opj_write_bytes_LE for Cinema profiles with numresolutions = 1 (#985) |
virDomainShmemDefEquals(virDomainShmemDefPtr src,
virDomainShmemDefPtr dst)
{
if (STRNEQ_NULLABLE(src->name, dst->name))
return false;
if (src->size != dst->size)
return false;
if (src->model != dst->model)
return false;
if (src->server.enabled != dst->server.enabled)
return false;
if (src->server.enabled) {
if (STRNEQ_NULLABLE(src->server.chr.data.nix.path,
dst->server.chr.data.nix.path))
return false;
}
if (src->msi.enabled != dst->msi.enabled)
return false;
if (src->msi.enabled) {
if (src->msi.vectors != dst->msi.vectors)
return false;
if (src->msi.ioeventfd != dst->msi.ioeventfd)
return false;
}
if (src->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_NONE &&
!virDomainDeviceInfoAddressIsEqual(&src->info, &dst->info))
return false;
return true;
} | 0 | [
"CWE-212"
] | libvirt | a5b064bf4b17a9884d7d361733737fb614ad8979 | 95,026,214,460,681,130,000,000,000,000,000,000,000 | 37 | conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <hhan@redhat.com>
Signed-off-by: Peter Krempa <pkrempa@redhat.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com> |
virtual Status checkAuthForCommand(Client* client,
const std::string& dbname,
const BSONObj& cmdObj) {
return auth::checkAuthForInvalidateUserCacheCommand(client);
} | 0 | [
"CWE-613"
] | mongo | db19e7ce84cfd702a4ba9983ee2ea5019f470f82 | 238,589,438,247,938,600,000,000,000,000,000,000,000 | 5 | SERVER-38984 Validate unique User ID on UserCache hit
(cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7) |
template<typename t>
CImg<T>& max(const CImg<t>& img) {
const ulongT siz = size(), isiz = img.size();
if (siz && isiz) {
if (is_overlapped(img)) return max(+img);
T *ptrd = _data, *const ptre = _data + siz;
if (siz>isiz) for (ulongT n = siz/isiz; n; --n)
for (const t *ptrs = img._data, *ptrs_end = ptrs + isiz; ptrs<ptrs_end; ++ptrd)
*ptrd = std::max((T)*(ptrs++),*ptrd);
for (const t *ptrs = img._data; ptrd<ptre; ++ptrd) *ptrd = std::max((T)*(ptrs++),*ptrd);
}
return *this; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 294,565,965,572,993,560,000,000,000,000,000,000,000 | 12 | Fix other issues in 'CImg<T>::load_bmp()'. |
srs_free(srs_t *srs)
{
int i;
for (i = 0; i < srs->numsecrets; i++) {
memset(srs->secrets[i], 0, strlen(srs->secrets[i]));
srs_f_free(srs->secrets[i]);
srs->secrets[i] = 0;
}
srs_f_free(srs);
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-834"
] | postsrsd | 4733fb11f6bec6524bb8518c5e1a699288c26bac | 287,896,488,462,734,600,000,000,000,000,000,000,000 | 10 | SECURITY: Fix potential denial of service attack against PostSRSd
I discovered that PostSRSd could be tricked into consuming a lot of CPU
time with an SRS address that has an excessively long time stamp tag,
e.g.
SRS0=HHHH=TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT=0@example.com |
static inline void nvme_blk_write(BlockBackend *blk, int64_t offset,
BlockCompletionFunc *cb, NvmeRequest *req)
{
assert(req->sg.flags & NVME_SG_ALLOC);
if (req->sg.flags & NVME_SG_DMA) {
req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
cb, req);
} else {
req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req);
}
} | 0 | [] | qemu | 736b01642d85be832385063f278fe7cd4ffb5221 | 167,427,870,937,837,050,000,000,000,000,000,000,000 | 12 | hw/nvme: fix CVE-2021-3929
This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
device itself. This still allows DMA to MMIO regions of other devices
(e.g. doing P2P DMA to the controller memory buffer of another NVMe
device).
Fixes: CVE-2021-3929
Reported-by: Qiuhao Li <Qiuhao.Li@outlook.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com> |
TEST(IndexBoundsBuilderTest, SimpleNE) {
auto testIndex = buildSimpleIndexEntry();
BSONObj obj = BSON("a" << BSON("$ne" << 3));
auto expr = parseMatchExpression(obj);
BSONElement elt = obj.firstElement();
OrderedIntervalList oil;
IndexBoundsBuilder::BoundsTightness tightness;
IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
ASSERT_EQUALS(oil.name, "a");
ASSERT_EQUALS(oil.intervals.size(), 2U);
ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
oil.intervals[0].compare(Interval(minKeyIntObj(3), true, false)));
ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
oil.intervals[1].compare(Interval(maxKeyIntObj(3), false, true)));
ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
} | 0 | [
"CWE-754"
] | mongo | f8f55e1825ee5c7bdb3208fc7c5b54321d172732 | 176,380,343,223,915,880,000,000,000,000,000,000,000 | 16 | SERVER-44377 generate correct plan for indexed inequalities to null |
void push_back(const T &value) {
if (size_ == capacity_)
grow(size_ + 1);
ptr_[size_++] = value;
} | 0 | [
"CWE-134",
"CWE-119",
"CWE-787"
] | fmt | 8cf30aa2be256eba07bb1cefb998c52326e846e7 | 123,774,316,433,341,330,000,000,000,000,000,000,000 | 5 | Fix segfault on complex pointer formatting (#642) |
void __init early_fixmap_init(void)
{
pgd_t *pgdp, pgd;
pud_t *pudp;
pmd_t *pmdp;
unsigned long addr = FIXADDR_START;
pgdp = pgd_offset_k(addr);
pgd = READ_ONCE(*pgdp);
if (CONFIG_PGTABLE_LEVELS > 3 &&
!(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
/*
* We only end up here if the kernel mapping and the fixmap
* share the top level pgd entry, which should only happen on
* 16k/4 levels configurations.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
pudp = pud_offset_kimg(pgdp, addr);
} else {
if (pgd_none(pgd))
__pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
pudp = fixmap_pud(addr);
}
if (pud_none(READ_ONCE(*pudp)))
__pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
pmdp = fixmap_pmd(addr);
__pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
/*
* The boot-ioremap range spans multiple pmds, for which
* we are not prepared:
*/
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
|| pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
WARN_ON(1);
pr_warn("pmdp %p != %p, %p\n",
pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
fix_to_virt(FIX_BTMAP_BEGIN));
pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
fix_to_virt(FIX_BTMAP_END));
pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
}
} | 0 | [] | linux | 15122ee2c515a253b0c66a3e618bc7ebe35105eb | 270,517,324,528,791,050,000,000,000,000,000,000,000 | 50 | arm64: Enforce BBM for huge IO/VMAP mappings
ioremap_page_range doesn't honour break-before-make and attempts to put
down huge mappings (using p*d_set_huge) over the top of pre-existing
table entries. This leads to us leaking page table memory and also gives
rise to TLB conflicts and spurious aborts, which have been seen in
practice on Cortex-A75.
Until this has been resolved, refuse to put block mappings when the
existing entry is found to be present.
Fixes: 324420bf91f60 ("arm64: add support for ioremap() block mappings")
Reported-by: Hanjun Guo <hanjun.guo@linaro.org>
Reported-by: Lei Li <lious.lilei@hisilicon.com>
Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> |
cmsBool CMSEXPORT cmsIsToneCurveDescending(const cmsToneCurve* t)
{
_cmsAssert(t != NULL);
return t ->Table16[0] > t ->Table16[t ->nEntries-1];
} | 0 | [] | Little-CMS | 9cf2d61867375f867e6e80906a571d222bc2cbf3 | 215,233,478,891,144,500,000,000,000,000,000,000,000 | 6 | Memory squeezing fix: LCMS2: AllocateToneCurveStruct
Simply add an extra check on the last allocation to avoid returning a
partially built struct. |
vhost_check_queue_inflights_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq)
{
uint16_t i;
uint16_t resubmit_num = 0, old_used_idx, num;
struct rte_vhost_resubmit_info *resubmit;
struct rte_vhost_inflight_info_packed *inflight_packed;
if (!(dev->protocol_features &
(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
return RTE_VHOST_MSG_RESULT_OK;
/* The frontend may still not support the inflight feature
* although we negotiate the protocol feature.
*/
if ((!vq->inflight_packed))
return RTE_VHOST_MSG_RESULT_OK;
if (!vq->inflight_packed->version) {
vq->inflight_packed->version = INFLIGHT_VERSION;
return RTE_VHOST_MSG_RESULT_OK;
}
if (vq->resubmit_inflight)
return RTE_VHOST_MSG_RESULT_OK;
inflight_packed = vq->inflight_packed;
vq->global_counter = 0;
old_used_idx = inflight_packed->old_used_idx;
if (inflight_packed->used_idx != old_used_idx) {
if (inflight_packed->desc[old_used_idx].inflight == 0) {
inflight_packed->old_used_idx =
inflight_packed->used_idx;
inflight_packed->old_used_wrap_counter =
inflight_packed->used_wrap_counter;
inflight_packed->old_free_head =
inflight_packed->free_head;
} else {
inflight_packed->used_idx =
inflight_packed->old_used_idx;
inflight_packed->used_wrap_counter =
inflight_packed->old_used_wrap_counter;
inflight_packed->free_head =
inflight_packed->old_free_head;
}
}
for (i = 0; i < inflight_packed->desc_num; i++) {
if (inflight_packed->desc[i].inflight == 1)
resubmit_num++;
}
if (resubmit_num) {
resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
0, vq->numa_node);
if (resubmit == NULL) {
VHOST_LOG_CONFIG(ERR,
"(%s) failed to allocate memory for resubmit info.\n",
dev->ifname);
return RTE_VHOST_MSG_RESULT_ERR;
}
resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list",
resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
0, vq->numa_node);
if (resubmit->resubmit_list == NULL) {
VHOST_LOG_CONFIG(ERR,
"(%s) failed to allocate memory for resubmit desc.\n",
dev->ifname);
rte_free(resubmit);
return RTE_VHOST_MSG_RESULT_ERR;
}
num = 0;
for (i = 0; i < inflight_packed->desc_num; i++) {
if (vq->inflight_packed->desc[i].inflight == 1) {
resubmit->resubmit_list[num].index = i;
resubmit->resubmit_list[num].counter =
inflight_packed->desc[i].counter;
num++;
}
}
resubmit->resubmit_num = num;
if (resubmit->resubmit_num > 1)
qsort(resubmit->resubmit_list, resubmit->resubmit_num,
sizeof(struct rte_vhost_resubmit_desc),
resubmit_desc_compare);
vq->global_counter = resubmit->resubmit_list[0].counter + 1;
vq->resubmit_inflight = resubmit;
}
return RTE_VHOST_MSG_RESULT_OK;
} | 0 | [
"CWE-125",
"CWE-787"
] | dpdk | 6442c329b9d2ded0f44b27d2016aaba8ba5844c5 | 325,268,070,984,611,200,000,000,000,000,000,000,000 | 96 | vhost: fix queue number check when setting inflight FD
In function vhost_user_set_inflight_fd, queue number in inflight
message is used to access virtqueue. However, queue number could
be larger than VHOST_MAX_VRING and cause write OOB as this number
will be used to write inflight info in virtqueue structure. This
patch checks the queue number to avoid the issue and also make
sure virtqueues are allocated before setting inflight information.
Fixes: ad0a4ae491fe ("vhost: checkout resubmit inflight information")
Cc: stable@dpdk.org
Reported-by: Wenxiang Qian <leonwxqian@gmail.com>
Signed-off-by: Chenbo Xia <chenbo.xia@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com> |
START_TEST(virgl_test_transfer_write_no_iov)
{
struct virgl_box box;
struct virgl_renderer_resource_create_args res;
int ret;
testvirgl_init_simple_1d_resource(&res, 1);
ret = virgl_renderer_resource_create(&res, NULL, 0);
ck_assert_int_eq(ret, 0);
virgl_renderer_ctx_attach_resource(1, res.handle);
ret = virgl_renderer_transfer_write_iov(1, 1, 0, 1, 1, &box, 0, NULL, 0);
ck_assert_int_eq(ret, EINVAL);
virgl_renderer_ctx_detach_resource(1, res.handle);
virgl_renderer_resource_unref(1);
} | 0 | [
"CWE-909"
] | virglrenderer | b05bb61f454eeb8a85164c8a31510aeb9d79129c | 266,285,920,052,759,040,000,000,000,000,000,000,000 | 19 | vrend: clear memory when allocating a host-backed memory resource
Closes: #249
Signed-off-by: Gert Wollny <gert.wollny@collabora.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com> |
CtPtr ProtocolV2::server_ready() {
ldout(cct, 20) << __func__ << dendl;
if (connection->delay_state) {
ceph_assert(connection->delay_state->ready());
}
return ready();
} | 0 | [
"CWE-323"
] | ceph | 20b7bb685c5ea74c651ca1ea547ac66b0fee7035 | 275,851,946,646,575,850,000,000,000,000,000,000,000 | 9 | msg/async/ProtocolV2: avoid AES-GCM nonce reuse vulnerabilities
The secure mode uses AES-128-GCM with 96-bit nonces consisting of a
32-bit counter followed by a 64-bit salt. The counter is incremented
after processing each frame, the salt is fixed for the duration of
the session. Both are initialized from the session key generated
during session negotiation, so the counter starts with essentially
a random value. It is allowed to wrap, and, after 2**32 frames, it
repeats, resulting in nonce reuse (the actual sequence numbers that
the messenger works with are 64-bit, so the session continues on).
Because of how GCM works, this completely breaks both confidentiality
and integrity aspects of the secure mode. A single nonce reuse reveals
the XOR of two plaintexts and almost completely reveals the subkey
used for producing authentication tags. After a few nonces get used
twice, all confidentiality and integrity goes out the window and the
attacker can potentially encrypt-authenticate plaintext of their
choice.
We can't easily change the nonce format to extend the counter to
64 bits (and possibly XOR it with a longer salt). Instead, just
remember the initial nonce and cut the session before it repeats,
forcing renegotiation.
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
Reviewed-by: Sage Weil <sage@redhat.com>
Conflicts:
src/msg/async/ProtocolV2.h [ context: commit ed3ec4c01d17
("msg: Build target 'common' without using namespace in
headers") not in octopus ] |
static const char *qxl_mode_to_string(int mode)
{
switch (mode) {
case QXL_MODE_COMPAT:
return "compat";
case QXL_MODE_NATIVE:
return "native";
case QXL_MODE_UNDEFINED:
return "undefined";
case QXL_MODE_VGA:
return "vga";
}
return "INVALID";
} | 0 | [] | qemu-kvm | 5ff4e36c804157bd84af43c139f8cd3a59722db9 | 75,983,370,895,913,610,000,000,000,000,000,000,000 | 14 | qxl: async io support using new spice api
Some of the QXL port i/o commands are waiting for the spice server to
complete certain actions. Add async versions for these commands, so we
don't block the vcpu while the spice server processses the command.
Instead the qxl device will raise an IRQ when done.
The async command processing relies on an added QXLInterface::async_complete
and added QXLWorker::*_async additions, in spice server qxl >= 3.1
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Alon Levy <alevy@redhat.com> |
profCallgraphAdd(xsltTemplatePtr templ, xsltTemplatePtr parent)
{
int i;
if (templ->templMax == 0) {
templ->templMax = 4;
templ->templCalledTab =
(xsltTemplatePtr *) xmlMalloc(templ->templMax *
sizeof(templ->templCalledTab[0]));
templ->templCountTab =
(int *) xmlMalloc(templ->templMax *
sizeof(templ->templCountTab[0]));
if (templ->templCalledTab == NULL || templ->templCountTab == NULL) {
xmlGenericError(xmlGenericErrorContext, "malloc failed !\n");
return;
}
}
else if (templ->templNr >= templ->templMax) {
templ->templMax *= 2;
templ->templCalledTab =
(xsltTemplatePtr *) xmlRealloc(templ->templCalledTab,
templ->templMax *
sizeof(templ->templCalledTab[0]));
templ->templCountTab =
(int *) xmlRealloc(templ->templCountTab,
templ->templMax *
sizeof(templ->templCountTab[0]));
if (templ->templCalledTab == NULL || templ->templCountTab == NULL) {
xmlGenericError(xmlGenericErrorContext, "realloc failed !\n");
return;
}
}
for (i = 0; i < templ->templNr; i++) {
if (templ->templCalledTab[i] == parent) {
templ->templCountTab[i]++;
break;
}
}
if (i == templ->templNr) {
/* not found, add new one */
templ->templCalledTab[templ->templNr] = parent;
templ->templCountTab[templ->templNr] = 1;
templ->templNr++;
}
} | 0 | [] | libxslt | e03553605b45c88f0b4b2980adfbbb8f6fca2fd6 | 38,823,463,014,827,680,000,000,000,000,000,000,000 | 46 | Fix security framework bypass
xsltCheckRead and xsltCheckWrite return -1 in case of error but callers
don't check for this condition and allow access. With a specially
crafted URL, xsltCheckRead could be tricked into returning an error
because of a supposedly invalid URL that would still be loaded
succesfully later on.
Fixes #12.
Thanks to Felix Wilhelm for the report. |
void LEX::save_values_list_state()
{
current_select->save_many_values= many_values;
current_select->save_insert_list= insert_list;
} | 0 | [
"CWE-703"
] | server | 39feab3cd31b5414aa9b428eaba915c251ac34a2 | 38,743,961,767,209,796,000,000,000,000,000,000,000 | 5 | MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <sanja@mariadb.com> |
static bool ipv4_datagram_support_cmsg(const struct sock *sk,
struct sk_buff *skb,
int ee_origin)
{
struct in_pktinfo *info;
if (ee_origin == SO_EE_ORIGIN_ICMP)
return true;
if (ee_origin == SO_EE_ORIGIN_LOCAL)
return false;
/* Support IP_PKTINFO on tstamp packets if requested, to correlate
* timestamp with egress dev. Not possible for packets without dev
* or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
*/
if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
(!skb->dev))
return false;
info = PKTINFO_SKB_CB(skb);
info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
info->ipi_ifindex = skb->dev->ifindex;
return true;
} | 0 | [
"CWE-476",
"CWE-284"
] | linux | 34b2cef20f19c87999fff3da4071e66937db9644 | 208,996,310,477,556,950,000,000,000,000,000,000,000 | 25 | ipv4: keep skb->dst around in presence of IP options
Andrey Konovalov got crashes in __ip_options_echo() when a NULL skb->dst
is accessed.
ipv4_pktinfo_prepare() should not drop the dst if (evil) IP options
are present.
We could refine the test to the presence of ts_needtime or srr,
but IP options are not often used, so let's be conservative.
Thanks to syzkaller team for finding this bug.
Fixes: d826eb14ecef ("ipv4: PKTINFO doesnt need dst reference")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
Curl_mbedtls_connect(struct connectdata *conn,
int sockindex)
{
CURLcode retcode;
bool done = FALSE;
retcode = mbed_connect_common(conn, sockindex, FALSE, &done);
if(retcode)
return retcode;
DEBUGASSERT(done);
return CURLE_OK;
} | 0 | [
"CWE-20"
] | curl | 6efd2fa529a189bf41736a610f6184cd8ad94b4d | 83,263,072,772,225,150,000,000,000,000,000,000,000 | 14 | mbedtls/polarssl: set "hostname" unconditionally
...as otherwise the TLS libs will skip the CN/SAN check and just allow
connection to any server. curl previously skipped this function when SNI
wasn't used or when connecting to an IP address specified host.
CVE-2016-3739
Bug: https://curl.haxx.se/docs/adv_20160518A.html
Reported-by: Moti Avrahami |
void CLASS canon_600_fixed_wb (int temp)
{
static const short mul[4][5] = {
{ 667, 358,397,565,452 },
{ 731, 390,367,499,517 },
{ 1119, 396,348,448,537 },
{ 1399, 485,431,508,688 } };
int lo, hi, i;
float frac=0;
for (lo=4; --lo; )
if (*mul[lo] <= temp) break;
for (hi=0; hi < 3; hi++)
if (*mul[hi] >= temp) break;
if (lo != hi)
frac = (float) (temp - *mul[lo]) / (*mul[hi] - *mul[lo]);
for (i=1; i < 5; i++)
pre_mul[i-1] = 1 / (frac * mul[hi][i] + (1-frac) * mul[lo][i]);
} | 0 | [
"CWE-703"
] | LibRaw | 11909cc59e712e09b508dda729b99aeaac2b29ad | 94,833,775,697,307,090,000,000,000,000,000,000,000 | 19 | cumulated data checks patch |
blobValidate(
Syntax *syntax,
struct berval *in )
{
/* any value allowed */
return LDAP_SUCCESS;
} | 0 | [
"CWE-617"
] | openldap | 67670f4544e28fb09eb7319c39f404e1d3229e65 | 141,945,644,583,851,300,000,000,000,000,000,000,000 | 7 | ITS#9383 remove assert in certificateListValidate |
conn_to_ct_dpif_entry(const struct conn *conn, struct ct_dpif_entry *entry,
long long now, int bkt)
{
struct ct_l4_proto *class;
long long expiration;
memset(entry, 0, sizeof *entry);
conn_key_to_tuple(&conn->key, &entry->tuple_orig);
conn_key_to_tuple(&conn->rev_key, &entry->tuple_reply);
entry->zone = conn->key.zone;
entry->mark = conn->mark;
memcpy(&entry->labels, &conn->label, sizeof entry->labels);
/* Not implemented yet */
entry->timestamp.start = 0;
entry->timestamp.stop = 0;
expiration = conn->expiration - now;
entry->timeout = (expiration > 0) ? expiration / 1000 : 0;
class = l4_protos[conn->key.nw_proto];
if (class->conn_get_protoinfo) {
class->conn_get_protoinfo(conn, &entry->protoinfo);
}
entry->bkt = bkt;
if (conn->alg) {
/* Caller is responsible for freeing. */
entry->helper.name = xstrdup(conn->alg);
}
} | 0 | [
"CWE-400"
] | ovs | 35c280072c1c3ed58202745b7d27fbbd0736999b | 288,766,973,438,109,600,000,000,000,000,000,000,000 | 32 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <joakim.hindersson@elastx.se>
Acked-by: Ilya Maximets <i.maximets@ovn.org>
Signed-off-by: Flavio Leitner <fbl@sysclose.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org> |
get_decompress_stream(packet_info* pinfo)
{
const blip_conversation_entry_t* blip_convo = get_blip_conversation(pinfo);
// Store compression state per srcport/destport.
guint32 hash_key = (pinfo->srcport << 16) | pinfo->destport;
z_stream* decompress_stream = (z_stream *)wmem_map_lookup(blip_convo->decompress_streams, GUINT_TO_POINTER(hash_key));
if(decompress_stream) {
return decompress_stream;
}
decompress_stream = wmem_new0(wmem_file_scope(), z_stream);
wmem_map_insert(blip_convo->decompress_streams, GUINT_TO_POINTER(hash_key), decompress_stream);
wmem_register_callback(wmem_file_scope(), z_stream_destroy_cb, decompress_stream);
return decompress_stream;
} | 0 | [
"CWE-476"
] | wireshark | 4a948427100b6c109f4ec7b4361f0d2aec5e5c3f | 168,987,657,490,541,340,000,000,000,000,000,000,000 | 17 | BLIP: Fix decompression buffer bug
Until now, mistakenly, the buffer for decompressing compressed BLIP messages
has been statically allocated as 16 Kb, but that is not valid behavior.
16 Kb is the maximum size of a _compressed_ frame. In theory, due to the
ability to zipbomb, there is virtually no upper bound on what the maximum
size of an uncompressed frame could be. However, to keep sanity, it has
been made into a preference with a reasonable default that is not likely to
be exceeded (64 Kb). The behavior before for this was that wireshark would
crash because the dissector would return NULL for a decompressed buffer due
to error and then try to deference it later. A null check has been added,
so that the behavior is now that the packet will show
'<Error decompressing message>' instead, and log why it couldn't handle the
compressed message. Closes #16866. |
TEST(CudnnRNNOpsTest, ForwardLstm_ShapeFn) {
int seq_length = 2;
int batch_size = 3;
int num_units = 4;
int num_layers = 5;
int dir_count = 1;
std::vector<int> input_shape = {seq_length, batch_size, num_units};
std::vector<int> input_h_shape = {num_layers * dir_count, batch_size,
num_units};
std::vector<int> output_shape = {seq_length, batch_size,
num_units * dir_count};
auto shape_to_str = [](const std::vector<int>& v) {
return strings::StrCat("[", absl::StrJoin(v, ","), "]");
};
string input_shapes_desc = strings::StrCat(
shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";",
shape_to_str(input_h_shape), ";", "[?]");
string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in1;?";
ShapeInferenceTestOp op("CudnnRNN");
TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNN")
.Input({"input", 0, DT_FLOAT})
.Input({"input_h", 0, DT_FLOAT})
.Input({"input_c", 0, DT_FLOAT})
.Input({"params", 0, DT_FLOAT})
.Attr("rnn_mode", "lstm")
.Attr("input_mode", "auto_select")
.Attr("direction", "unidirectional")
.Finalize(&op.node_def));
INFER_OK(op, input_shapes_desc, output_shapes_desc);
} | 1 | [
"CWE-416",
"CWE-787"
] | tensorflow | af5fcebb37c8b5d71c237f4e59c6477015c78ce6 | 197,436,449,671,576,500,000,000,000,000,000,000,000 | 31 | Fix access to undefined memory during shape inference of Cudnn*.
PiperOrigin-RevId: 400324259
Change-Id: Ie3b7859d19ae24ee9ac2adf413bdc1e851bbc604 |
imap_client_notify_starttls(struct client *client,
bool success, const char *text)
{
if (success)
client_send_reply(client, IMAP_CMD_REPLY_OK, text);
else
client_send_reply(client, IMAP_CMD_REPLY_BAD, text);
} | 0 | [] | core | 62061e8cf68f506c0ccaaba21fd4174764ca875f | 46,804,368,708,724,290,000,000,000,000,000,000,000 | 8 | imap-login: Split off client_invalid_command() |
void ConnectDialog::on_qtwServers_itemExpanded(QTreeWidgetItem *item) {
if (qtwServers->siPublic != NULL && item == qtwServers->siPublic) {
initList();
fillList();
}
ServerItem *p = static_cast<ServerItem *>(item);
foreach(ServerItem *si, p->qlChildren)
startDns(si);
} | 0 | [
"CWE-59",
"CWE-61"
] | mumble | e59ee87abe249f345908c7d568f6879d16bfd648 | 134,601,808,298,924,680,000,000,000,000,000,000,000 | 11 | FIX(client): Only allow "http"/"https" for URLs in ConnectDialog
Our public server list registration script doesn't have an URL scheme
whitelist for the website field.
Turns out a malicious server can register itself with a dangerous URL in
an attempt to attack a user's machine.
User interaction is required, as the URL has to be opened by
right-clicking on the server entry and clicking on "Open Webpage".
This commit introduces a client-side whitelist, which only allows "http"
and "https" schemes. We will also implement it in our public list.
In future we should probably add a warning QMessageBox informing the
user that there's no guarantee the URL is safe (regardless of the
scheme).
Thanks a lot to https://positive.security for reporting the RCE
vulnerability to us privately. |
bit_write_BLL (Bit_Chain *dat, BITCODE_BLL value)
{
// 64bit into how many bytes?
int i;
int len = 0;
BITCODE_BLL umax = 0xf000000000000000ULL;
for (i = 16; i; i--, umax >>= 8)
{
if (value & umax)
{
len = i;
break;
}
}
bit_write_BB (dat, len << 2);
bit_write_B (dat, len & 1);
for (i = 0; i < len; i++)
{
// least significant byte first
bit_write_RC (dat, value & 0xFF);
value >>= 8;
}
} | 0 | [
"CWE-703",
"CWE-125"
] | libredwg | 95cc9300430d35feb05b06a9badf678419463dbe | 236,660,070,900,200,960,000,000,000,000,000,000,000 | 23 | encode: protect from stack under-flow
From GH #178 fuzzing |
bool samba_private_attr_name(const char *unix_ea_name)
{
static const char * const prohibited_ea_names[] = {
SAMBA_POSIX_INHERITANCE_EA_NAME,
SAMBA_XATTR_DOS_ATTRIB,
SAMBA_XATTR_MARKER,
XATTR_NTACL_NAME,
NULL
};
int i;
for (i = 0; prohibited_ea_names[i]; i++) {
if (strequal( prohibited_ea_names[i], unix_ea_name))
return true;
}
if (strncasecmp_m(unix_ea_name, SAMBA_XATTR_DOSSTREAM_PREFIX,
strlen(SAMBA_XATTR_DOSSTREAM_PREFIX)) == 0) {
return true;
}
return false;
} | 1 | [
"CWE-787"
] | samba | 22b4091924977f6437b59627f33a8e6f02b41011 | 89,952,104,762,470,840,000,000,000,000,000,000,000 | 22 | CVE-2021-44142: smbd: add Netatalk xattr used by vfs_fruit to the list of private Samba xattrs
This is an internal xattr that should not be user visible.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14914
Signed-off-by: Ralph Boehme <slow@samba.org>
Reviewed-by: Jeremy Allison <jra@samba.org> |
static int mb_find_extent(struct ext4_buddy *e4b, int block,
int needed, struct ext4_free_extent *ex)
{
int next = block;
int max, order;
void *buddy;
assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
BUG_ON(ex == NULL);
buddy = mb_find_buddy(e4b, 0, &max);
BUG_ON(buddy == NULL);
BUG_ON(block >= max);
if (mb_test_bit(block, buddy)) {
ex->fe_len = 0;
ex->fe_start = 0;
ex->fe_group = 0;
return 0;
}
/* find actual order */
order = mb_find_order_for_block(e4b, block);
block = block >> order;
ex->fe_len = 1 << order;
ex->fe_start = block << order;
ex->fe_group = e4b->bd_group;
/* calc difference from given start */
next = next - ex->fe_start;
ex->fe_len -= next;
ex->fe_start += next;
while (needed > ex->fe_len &&
mb_find_buddy(e4b, order, &max)) {
if (block + 1 >= max)
break;
next = (block + 1) * (1 << order);
if (mb_test_bit(next, e4b->bd_bitmap))
break;
order = mb_find_order_for_block(e4b, next);
block = next >> order;
ex->fe_len += 1 << order;
}
if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) {
/* Should never happen! (but apparently sometimes does?!?) */
WARN_ON(1);
ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
"block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
block, order, needed, ex->fe_group, ex->fe_start,
ex->fe_len, ex->fe_logical);
ex->fe_len = 0;
ex->fe_start = 0;
ex->fe_group = 0;
}
return ex->fe_len;
} | 0 | [
"CWE-416"
] | linux | 8844618d8aa7a9973e7b527d038a2a589665002c | 141,207,721,851,837,730,000,000,000,000,000,000,000 | 62 | ext4: only look at the bg_flags field if it is valid
The bg_flags field in the block group descripts is only valid if the
uninit_bg or metadata_csum feature is enabled. We were not
consistently looking at this field; fix this.
Also block group #0 must never have uninitialized allocation bitmaps,
or need to be zeroed, since that's where the root inode, and other
special inodes are set up. Check for these conditions and mark the
file system as corrupted if they are detected.
This addresses CVE-2018-10876.
https://bugzilla.kernel.org/show_bug.cgi?id=199403
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: stable@kernel.org |
static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *data)
{
struct nfs4_getdeviceinfo_res *res = data;
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status != 0)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status != 0)
goto out;
status = decode_getdeviceinfo(xdr, res);
out:
return status;
} | 0 | [
"CWE-787"
] | linux | b4487b93545214a9db8cbf32e86411677b0cca21 | 223,394,051,193,888,360,000,000,000,000,000,000,000 | 18 | nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <jeffrey.mitchell@starlab.io>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> |
static int mov_write_ctts_tag(AVFormatContext *s, AVIOContext *pb, MOVTrack *track)
{
MOVMuxContext *mov = s->priv_data;
MOVStts *ctts_entries;
uint32_t entries = 0;
uint32_t atom_size;
int i;
ctts_entries = av_malloc_array((track->entry + 1), sizeof(*ctts_entries)); /* worst case */
if (!ctts_entries)
return AVERROR(ENOMEM);
ctts_entries[0].count = 1;
ctts_entries[0].duration = track->cluster[0].cts;
for (i = 1; i < track->entry; i++) {
if (track->cluster[i].cts == ctts_entries[entries].duration) {
ctts_entries[entries].count++; /* compress */
} else {
entries++;
ctts_entries[entries].duration = track->cluster[i].cts;
ctts_entries[entries].count = 1;
}
}
entries++; /* last one */
atom_size = 16 + (entries * 8);
avio_wb32(pb, atom_size); /* size */
ffio_wfourcc(pb, "ctts");
if (mov->flags & FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS)
avio_w8(pb, 1); /* version */
else
avio_w8(pb, 0); /* version */
avio_wb24(pb, 0); /* flags */
avio_wb32(pb, entries); /* entry count */
for (i = 0; i < entries; i++) {
avio_wb32(pb, ctts_entries[i].count);
avio_wb32(pb, ctts_entries[i].duration);
}
av_free(ctts_entries);
return atom_size;
} | 0 | [
"CWE-125"
] | FFmpeg | 95556e27e2c1d56d9e18f5db34d6f756f3011148 | 231,435,983,499,305,400,000,000,000,000,000,000,000 | 39 | avformat/movenc: Do not pass AVCodecParameters in avpriv_request_sample
Fixes: out of array read
Fixes: ffmpeg_crash_8.avi
Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc> |
irc_server_copy (struct t_irc_server *server, const char *new_name)
{
struct t_irc_server *new_server;
struct t_infolist *infolist;
char *mask, *pos;
const char *option_name;
int length, index_option;
/* check if another server exists with this name */
if (irc_server_casesearch (new_name))
return NULL;
new_server = irc_server_alloc (new_name);
if (new_server)
{
/* duplicate options */
length = 32 + strlen (server->name) + 1;
mask = malloc (length);
if (!mask)
return 0;
snprintf (mask, length, "irc.server.%s.*", server->name);
infolist = weechat_infolist_get ("option", NULL, mask);
free (mask);
if (infolist)
{
while (weechat_infolist_next (infolist))
{
if (!weechat_infolist_integer (infolist, "value_is_null"))
{
option_name = weechat_infolist_string (infolist,
"option_name");
pos = strrchr (option_name, '.');
if (pos)
{
index_option = irc_server_search_option (pos + 1);
if (index_option >= 0)
{
weechat_config_option_set (
new_server->options[index_option],
weechat_infolist_string (infolist, "value"),
1);
}
}
}
}
weechat_infolist_free (infolist);
}
}
return new_server;
} | 0 | [
"CWE-120",
"CWE-787"
] | weechat | 40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f | 225,468,641,513,736,730,000,000,000,000,000,000,000 | 51 | irc: fix crash when a new message 005 is received with longer nick prefixes
Thanks to Stuart Nevans Locke for reporting the issue. |
static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd;
u32 *p = arg;
vfd = video_devdata(file);
*p = v4l2_prio_max(vfd->prio);
return 0;
} | 0 | [
"CWE-401"
] | linux | fb18802a338b36f675a388fc03d2aa504a0d0899 | 335,379,231,828,906,600,000,000,000,000,000,000,000 | 10 | media: v4l: ioctl: Fix memory leak in video_usercopy
When an IOCTL with argument size larger than 128 that also used array
arguments were handled, two memory allocations were made but alas, only
the latter one of them was released. This happened because there was only
a single local variable to hold such a temporary allocation.
Fix this by adding separate variables to hold the pointers to the
temporary allocations.
Reported-by: Arnd Bergmann <arnd@kernel.org>
Reported-by: syzbot+1115e79c8df6472c612b@syzkaller.appspotmail.com
Fixes: d14e6d76ebf7 ("[media] v4l: Add multi-planar ioctl handling code")
Cc: stable@vger.kernel.org
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org> |
static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
gfp_t allocation, struct sock *sk)
{
int err = -ENOBUFS;
sock_hold(sk);
if (*skb2 == NULL) {
if (atomic_read(&skb->users) != 1) {
*skb2 = skb_clone(skb, allocation);
} else {
*skb2 = skb;
atomic_inc(&skb->users);
}
}
if (*skb2 != NULL) {
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
skb_set_owner_r(*skb2, sk);
skb_queue_tail(&sk->sk_receive_queue, *skb2);
sk->sk_data_ready(sk, (*skb2)->len);
*skb2 = NULL;
err = 0;
}
}
sock_put(sk);
return err;
} | 0 | [
"CWE-20",
"CWE-269"
] | linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 13,629,978,195,721,818,000,000,000,000,000,000,000 | 26 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <davem@davemloft.net>
Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net> |
static void cleanup_a(struct list_head *actions)
{
struct tc_action *a, *tmp;
list_for_each_entry_safe(a, tmp, actions, list) {
list_del(&a->list);
kfree(a);
}
} | 0 | [
"CWE-264"
] | net | 90f62cf30a78721641e08737bda787552428061e | 304,523,361,258,951,400,000,000,000,000,000,000,000 | 9 | net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
ldns_str2rdf_period(ldns_rdf **rd,const char *period)
{
uint32_t p;
const char *end;
/* Allocate required space... */
p = ldns_str2period(period, &end);
if (*end != 0) {
return LDNS_STATUS_ERR;
} else {
p = (uint32_t) htonl(p);
*rd = ldns_rdf_new_frm_data(
LDNS_RDF_TYPE_PERIOD, sizeof(uint32_t), &p);
}
return *rd?LDNS_STATUS_OK:LDNS_STATUS_MEM_ERR;
} | 0 | [] | ldns | 3bdeed02505c9bbacb3b64a97ddcb1de967153b7 | 242,786,676,456,856,660,000,000,000,000,000,000,000 | 17 | bugfix #1257: Free after reallocing to 0 size
Thanks Stephan Zeisberg |
static inline void clr_dr_intercepts(struct vcpu_svm *svm)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_dr = 0;
recalc_intercepts(svm);
} | 0 | [] | kvm | 854e8bb1aa06c578c2c9145fa6bfe3680ef63b23 | 68,632,674,660,841,480,000,000,000,000,000,000,000 | 8 | KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: stable@vger.kernel.org
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
ModuleExport size_t RegisterSVGImage(void)
{
char
version[MaxTextExtent];
MagickInfo
*entry;
*version='\0';
#if defined(LIBXML_DOTTED_VERSION)
(void) CopyMagickString(version,"XML " LIBXML_DOTTED_VERSION,MaxTextExtent);
#endif
#if defined(MAGICKCORE_RSVG_DELEGATE)
#if !GLIB_CHECK_VERSION(2,35,0)
g_type_init();
#endif
(void) FormatLocaleString(version,MaxTextExtent,"RSVG %d.%d.%d",
LIBRSVG_MAJOR_VERSION,LIBRSVG_MINOR_VERSION,LIBRSVG_MICRO_VERSION);
#endif
#if defined(MAGICKCORE_XML_DELEGATE)
xmlInitParser();
#endif
entry=SetMagickInfo("SVG");
entry->decoder=(DecodeImageHandler *) ReadSVGImage;
entry->encoder=(EncodeImageHandler *) WriteSVGImage;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickFalse;
#if defined(MAGICKCORE_RSVG_DELEGATE)
entry->thread_support=MagickFalse;
#endif
entry->description=ConstantString("Scalable Vector Graphics");
entry->mime_type=ConstantString("image/svg+xml");
if (*version != '\0')
entry->version=ConstantString(version);
entry->magick=(IsImageFormatHandler *) IsSVG;
entry->magick_module=ConstantString("SVG");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("SVGZ");
#if defined(MAGICKCORE_XML_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadSVGImage;
#endif
entry->encoder=(EncodeImageHandler *) WriteSVGImage;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickFalse;
#if defined(MAGICKCORE_RSVG_DELEGATE)
entry->thread_support=MagickFalse;
#endif
entry->description=ConstantString("Compressed Scalable Vector Graphics");
entry->mime_type=ConstantString("image/svg+xml");
if (*version != '\0')
entry->version=ConstantString(version);
entry->magick=(IsImageFormatHandler *) IsSVG;
entry->magick_module=ConstantString("SVG");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("MSVG");
#if defined(MAGICKCORE_XML_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadSVGImage;
#endif
entry->encoder=(EncodeImageHandler *) WriteSVGImage;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickFalse;
#if defined(MAGICKCORE_RSVG_DELEGATE)
entry->thread_support=MagickFalse;
#endif
entry->description=ConstantString("ImageMagick's own SVG internal renderer");
entry->magick=(IsImageFormatHandler *) IsSVG;
entry->magick_module=ConstantString("SVG");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
} | 0 | [
"CWE-674"
] | ImageMagick6 | 6ba5e4b1673d75988d8dde38118b495a342823c8 | 249,906,090,230,400,300,000,000,000,000,000,000,000 | 70 | [FG-VD-19-136] ImageMagick Convert SVG MacOS Denial Of Service |
int ssh_scp_leave_directory(ssh_scp scp)
{
char buffer[] = "E\n";
int rc;
uint8_t code;
if (scp == NULL) {
return SSH_ERROR;
}
if (scp->state != SSH_SCP_WRITE_INITED) {
ssh_set_error(scp->session, SSH_FATAL,
"ssh_scp_leave_directory called under invalid state");
return SSH_ERROR;
}
rc = ssh_channel_write(scp->channel, buffer, strlen(buffer));
if (rc == SSH_ERROR) {
scp->state = SSH_SCP_ERROR;
return SSH_ERROR;
}
rc = ssh_channel_read(scp->channel, &code, 1, 0);
if (rc <= 0) {
ssh_set_error(scp->session, SSH_FATAL, "Error reading status code: %s",
ssh_get_error(scp->session));
scp->state = SSH_SCP_ERROR;
return SSH_ERROR;
}
if (code != 0) {
ssh_set_error(scp->session, SSH_FATAL, "scp status code %ud not valid",
code);
scp->state = SSH_SCP_ERROR;
return SSH_ERROR;
}
return SSH_OK;
} | 1 | [] | libssh | 82c375b7c99141a5495e62060e0b7f9c97981e7e | 135,977,056,502,294,370,000,000,000,000,000,000,000 | 39 | CVE-2019-14889: scp: Log SCP warnings received from the server
Fixes T181
Previously, warnings received from the server were ignored. With this
change the warning message sent by the server will be logged.
Signed-off-by: Anderson Toshiyuki Sasaki <ansasaki@redhat.com>
Reviewed-by: Andreas Schneider <asn@cryptomilk.org>
(cherry picked from commit c75d417d06867fd792b788e6281334621c2cd335) |
static bool is_pointer_value(struct verifier_env *env, int regno)
{
if (env->allow_ptr_leaks)
return false;
switch (env->cur_state.regs[regno].type) {
case UNKNOWN_VALUE:
case CONST_IMM:
return false;
default:
return true;
}
} | 0 | [
"CWE-200"
] | linux | a1b14d27ed0965838350f1377ff97c93ee383492 | 77,984,607,317,126,090,000,000,000,000,000,000,000 | 13 | bpf: fix branch offset adjustment on backjumps after patching ctx expansion
When ctx access is used, the kernel often needs to expand/rewrite
instructions, so after that patching, branch offsets have to be
adjusted for both forward and backward jumps in the new eBPF program,
but for backward jumps it fails to account the delta. Meaning, for
example, if the expansion happens exactly on the insn that sits at
the jump target, it doesn't fix up the back jump offset.
Analysis on what the check in adjust_branches() is currently doing:
/* adjust offset of jmps if necessary */
if (i < pos && i + insn->off + 1 > pos)
insn->off += delta;
else if (i > pos && i + insn->off + 1 < pos)
insn->off -= delta;
First condition (forward jumps):
Before: After:
insns[0] insns[0]
insns[1] <--- i/insn insns[1] <--- i/insn
insns[2] <--- pos insns[P] <--- pos
insns[3] insns[P] `------| delta
insns[4] <--- target_X insns[P] `-----|
insns[5] insns[3]
insns[4] <--- target_X
insns[5]
First case is if we cross pos-boundary and the jump instruction was
before pos. This is handeled correctly. I.e. if i == pos, then this
would mean our jump that we currently check was the patchlet itself
that we just injected. Since such patchlets are self-contained and
have no awareness of any insns before or after the patched one, the
delta is correctly not adjusted. Also, for the second condition in
case of i + insn->off + 1 == pos, means we jump to that newly patched
instruction, so no offset adjustment are needed. That part is correct.
Second condition (backward jumps):
Before: After:
insns[0] insns[0]
insns[1] <--- target_X insns[1] <--- target_X
insns[2] <--- pos <-- target_Y insns[P] <--- pos <-- target_Y
insns[3] insns[P] `------| delta
insns[4] <--- i/insn insns[P] `-----|
insns[5] insns[3]
insns[4] <--- i/insn
insns[5]
Second interesting case is where we cross pos-boundary and the jump
instruction was after pos. Backward jump with i == pos would be
impossible and pose a bug somewhere in the patchlet, so the first
condition checking i > pos is okay only by itself. However, i +
insn->off + 1 < pos does not always work as intended to trigger the
adjustment. It works when jump targets would be far off where the
delta wouldn't matter. But, for example, where the fixed insn->off
before pointed to pos (target_Y), it now points to pos + delta, so
that additional room needs to be taken into account for the check.
This means that i) both tests here need to be adjusted into pos + delta,
and ii) for the second condition, the test needs to be <= as pos
itself can be a target in the backjump, too.
Fixes: 9bac3d6d548e ("bpf: allow extended BPF programs access skb fields")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net> |
TEST_F(OptimizePipeline, ComputedProjectThenSortPushedDown) {
auto pipeline = Pipeline::parse(
makeVector(fromjson("{$_internalUnpackBucket: { exclude: [], timeField: 'time', metaField: "
"'myMeta', bucketMaxSpanSeconds: 3600}}"),
fromjson("{$project: {myMeta: '$myMeta.a'}}"),
fromjson("{$sort: {myMeta: 1}}")),
getExpCtx());
ASSERT_EQ(3u, pipeline->getSources().size());
pipeline->optimizePipeline();
// We should push down the $project and internalize the remaining project, but we can't do the
// sort pushdown due to the renaming.
auto serialized = pipeline->serializeToBson();
ASSERT_EQ(3u, serialized.size());
ASSERT_BSONOBJ_EQ(fromjson("{$addFields: {myMeta: '$meta.a'}}"), serialized[0]);
ASSERT_BSONOBJ_EQ(
fromjson(
"{$_internalUnpackBucket: { include: ['_id', 'myMeta'], timeField: 'time', metaField: "
"'myMeta', bucketMaxSpanSeconds: 3600, computedMetaProjFields: ['myMeta']}}"),
serialized[1]);
ASSERT_BSONOBJ_EQ(fromjson("{$sort: {myMeta: 1}}"), serialized[2]);
} | 0 | [] | mongo | b3107d73a2c58d7e016b834dae0acfd01c0db8d7 | 264,609,468,078,664,800,000,000,000,000,000,000,000 | 23 | SERVER-59299: Flatten top-level nested $match stages in doOptimizeAt
(cherry picked from commit 4db5eceda2cff697f35c84cd08232bac8c33beec) |
struct resource *platform_get_resource_byname(struct platform_device *dev,
unsigned int type,
const char *name)
{
u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
if (unlikely(!r->name))
continue;
if (type == resource_type(r) && !strcmp(r->name, name))
return r;
}
return NULL;
} | 0 | [
"CWE-787"
] | linux | aa838896d87af561a33ecefea1caa4c15a68bc47 | 185,285,337,908,855,730,000,000,000,000,000,000,000 | 17 | drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <joe@perches.com>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
_g_mime_type_get_from_content (char *buffer,
gsize buffer_size)
{
#if ENABLE_MAGIC
static magic_t magic = NULL;
if (magic == NULL) {
magic = magic_open (MAGIC_MIME_TYPE);
if (magic != NULL)
magic_load (magic, NULL);
else
g_warning ("unable to open magic database");
}
if (magic != NULL) {
const char * mime_type;
mime_type = magic_buffer (magic, buffer, buffer_size);
if (mime_type)
return mime_type;
g_warning ("unable to detect filetype from magic: %s", magic_error (magic));
}
#else
static const struct magic {
const unsigned int off;
const unsigned int len;
const char * const id;
const char * const mime_type;
}
magic_ids [] = {
/* magic ids taken from magic/Magdir/archive from the file-4.21 tarball */
{ 0, 6, "7z\274\257\047\034", "application/x-7z-compressed" },
{ 7, 7, "**ACE**", "application/x-ace" },
{ 0, 2, "\x60\xea", "application/x-arj" },
{ 0, 3, "BZh", "application/x-bzip2" },
{ 0, 2, "\037\213", "application/x-gzip" },
{ 0, 4, "LZIP", "application/x-lzip" },
{ 0, 9, "\x89\x4c\x5a\x4f\x00\x0d\x0a\x1a\x0a", "application/x-lzop", },
{ 0, 4, "Rar!", "application/x-rar" },
{ 0, 4, "RZIP", "application/x-rzip" },
{ 0, 6, "\3757zXZ\000", "application/x-xz" },
{ 20, 4, "\xdc\xa7\xc4\xfd", "application/x-zoo", },
{ 0, 4, "PK\003\004", "application/zip" },
{ 0, 8, "PK00PK\003\004", "application/zip" },
{ 0, 4, "LRZI", "application/x-lrzip" },
};
int i;
for (i = 0; i < G_N_ELEMENTS (magic_ids); i++) {
const struct magic * const magic = &magic_ids[i];
if ((magic->off + magic->len) > buffer_size)
g_warning ("buffer underrun for mime-type '%s' magic", magic->mime_type);
else if (! memcmp (buffer + magic->off, magic->id, magic->len))
return magic->mime_type;
}
#endif
return NULL;
} | 0 | [
"CWE-22"
] | file-roller | b147281293a8307808475e102a14857055f81631 | 55,563,981,224,574,240,000,000,000,000,000,000,000 | 66 | libarchive: sanitize filenames before extracting |
TEST_F(ConnectionHandlerTest, ContinueOnListenerFilterTimeout) {
InSequence s;
TestListener* test_listener =
addListener(1, true, false, "test_listener", Network::Address::SocketType::Stream,
std::chrono::milliseconds(15000), true);
Network::MockListener* listener = new Network::MockListener();
Network::ListenerCallbacks* listener_callbacks;
EXPECT_CALL(dispatcher_, createListener_(_, _, _))
.WillOnce(
Invoke([&](Network::Socket&, Network::ListenerCallbacks& cb, bool) -> Network::Listener* {
listener_callbacks = &cb;
return listener;
}));
EXPECT_CALL(test_listener->socket_, localAddress());
handler_->addListener(*test_listener);
Network::MockListenerFilter* test_filter = new Network::MockListenerFilter();
EXPECT_CALL(factory_, createListenerFilterChain(_))
.WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {
manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter});
return true;
}));
EXPECT_CALL(*test_filter, onAccept(_))
.WillOnce(Invoke([&](Network::ListenerFilterCallbacks&) -> Network::FilterStatus {
return Network::FilterStatus::StopIteration;
}));
Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();
Network::IoSocketHandleImpl io_handle{42};
EXPECT_CALL(*accepted_socket, ioHandle()).WillRepeatedly(ReturnRef(io_handle));
Event::MockTimer* timeout = new Event::MockTimer(&dispatcher_);
EXPECT_CALL(*timeout, enableTimer(std::chrono::milliseconds(15000), _));
listener_callbacks->onAccept(Network::ConnectionSocketPtr{accepted_socket});
Stats::Gauge& downstream_pre_cx_active =
stats_store_.gauge("downstream_pre_cx_active", Stats::Gauge::ImportMode::Accumulate);
EXPECT_EQ(1UL, downstream_pre_cx_active.value());
EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr));
EXPECT_CALL(*timeout, disableTimer());
timeout->invokeCallback();
dispatcher_.clearDeferredDeleteList();
EXPECT_EQ(0UL, downstream_pre_cx_active.value());
EXPECT_EQ(1UL, stats_store_.counter("downstream_pre_cx_timeout").value());
// Make sure we continued to try create connection.
EXPECT_EQ(1UL, stats_store_.counter("no_filter_chain_match").value());
EXPECT_CALL(*listener, onDestroy());
} | 1 | [
"CWE-835"
] | envoy | c8de199e2971f79cbcbc6b5eadc8c566b28705d1 | 292,475,617,864,940,530,000,000,000,000,000,000,000 | 49 | listener: clean up accept filter before creating connection (#8922)
Signed-off-by: Yuchen Dai <silentdai@gmail.com> |
static inline void ConvertLuvToXYZ(const double L,const double u,const double v,
double *X,double *Y,double *Z)
{
double
gamma;
assert(X != (double *) NULL);
assert(Y != (double *) NULL);
assert(Z != (double *) NULL);
if (L > (CIEK*CIEEpsilon))
*Y=(double) pow((L+16.0)/116.0,3.0);
else
*Y=L/CIEK;
gamma=PerceptibleReciprocal((((52.0*L/(u+13.0*L*(4.0*D65X/(D65X+15.0*D65Y+
3.0*D65Z))))-1.0)/3.0)-(-1.0/3.0));
*X=gamma*((*Y*((39.0*L/(v+13.0*L*(9.0*D65Y/(D65X+15.0*D65Y+3.0*D65Z))))-5.0))+
5.0*(*Y));
*Z=(*X*(((52.0*L/(u+13.0*L*(4.0*D65X/(D65X+15.0*D65Y+3.0*D65Z))))-1.0)/3.0))-
5.0*(*Y);
} | 1 | [] | ImageMagick | a855d3ad660f307fdb071794351822f9ce878c4e | 38,417,910,825,899,277,000,000,000,000,000,000,000 | 20 | https://github.com/ImageMagick/ImageMagick/issues/3317 |
static void network_send_buffer_signed(sockent_t *se, /* {{{ */
const char *in_buffer,
size_t in_buffer_size) {
char buffer[BUFF_SIG_SIZE + in_buffer_size];
size_t buffer_offset;
size_t username_len;
gcry_md_hd_t hd;
gcry_error_t err;
unsigned char *hash;
hd = NULL;
err = gcry_md_open(&hd, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC);
if (err != 0) {
ERROR("network plugin: Creating HMAC object failed: %s",
gcry_strerror(err));
return;
}
err = gcry_md_setkey(hd, se->data.client.password,
strlen(se->data.client.password));
if (err != 0) {
ERROR("network plugin: gcry_md_setkey failed: %s", gcry_strerror(err));
gcry_md_close(hd);
return;
}
username_len = strlen(se->data.client.username);
if (username_len > (BUFF_SIG_SIZE - PART_SIGNATURE_SHA256_SIZE)) {
ERROR("network plugin: Username too long: %s", se->data.client.username);
return;
}
memcpy(buffer + PART_SIGNATURE_SHA256_SIZE, se->data.client.username,
username_len);
memcpy(buffer + PART_SIGNATURE_SHA256_SIZE + username_len, in_buffer,
in_buffer_size);
/* Initialize the `ps' structure. */
part_signature_sha256_t ps = {
.head.type = htons(TYPE_SIGN_SHA256),
.head.length = htons(PART_SIGNATURE_SHA256_SIZE + username_len)};
/* Calculate the hash value. */
gcry_md_write(hd, buffer + PART_SIGNATURE_SHA256_SIZE,
username_len + in_buffer_size);
hash = gcry_md_read(hd, GCRY_MD_SHA256);
if (hash == NULL) {
ERROR("network plugin: gcry_md_read failed.");
gcry_md_close(hd);
return;
}
memcpy(ps.hash, hash, sizeof(ps.hash));
/* Add the header */
buffer_offset = 0;
BUFFER_ADD(&ps.head.type, sizeof(ps.head.type));
BUFFER_ADD(&ps.head.length, sizeof(ps.head.length));
BUFFER_ADD(ps.hash, sizeof(ps.hash));
assert(buffer_offset == PART_SIGNATURE_SHA256_SIZE);
gcry_md_close(hd);
hd = NULL;
buffer_offset = PART_SIGNATURE_SHA256_SIZE + username_len + in_buffer_size;
network_send_buffer_plain(se, buffer, buffer_offset);
} /* }}} void network_send_buffer_signed */ | 0 | [
"CWE-835"
] | collectd | f6be4f9b49b949b379326c3d7002476e6ce4f211 | 102,521,483,034,557,880,000,000,000,000,000,000,000 | 69 | network plugin: Fix endless loop DOS in parse_packet()
When correct 'Signature part' is received by Collectd, configured without
AuthFile option, condition for endless loop occurs due to missing increase
of pointer to next unprocessed part.
This is a forward-port of #2233.
Fixes: CVE-2017-7401
Closes: #2174
Signed-off-by: Florian Forster <octo@collectd.org> |
virtual int getChar()
{ return (bufIdx >= bufSize && !fillBuf()) ? EOF : buf[bufIdx++]; } | 0 | [] | poppler | abf167af8b15e5f3b510275ce619e6fdb42edd40 | 315,030,500,302,298,370,000,000,000,000,000,000,000 | 2 | Implement tiling/patterns in SplashOutputDev
Fixes bug 13518 |
do_auxv_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type,
int swap, uint32_t namesz __attribute__((__unused__)),
uint32_t descsz __attribute__((__unused__)),
size_t noff __attribute__((__unused__)), size_t doff,
int *flags, size_t size __attribute__((__unused__)), int clazz,
int fd, off_t ph_off, int ph_num, off_t fsize)
{
#ifdef ELFCORE
Aux32Info auxv32;
Aux64Info auxv64;
size_t elsize = xauxv_sizeof;
const char *tag;
int is_string;
size_t nval;
if ((*flags & (FLAGS_IS_CORE|FLAGS_DID_CORE_STYLE)) !=
(FLAGS_IS_CORE|FLAGS_DID_CORE_STYLE))
return 0;
switch (*flags & FLAGS_CORE_STYLE) {
case OS_STYLE_SVR4:
if (type != NT_AUXV)
return 0;
break;
#ifdef notyet
case OS_STYLE_NETBSD:
if (type != NT_NETBSD_CORE_AUXV)
return 0;
break;
case OS_STYLE_FREEBSD:
if (type != NT_FREEBSD_PROCSTAT_AUXV)
return 0;
break;
#endif
default:
return 0;
}
*flags |= FLAGS_DID_AUXV;
nval = 0;
for (size_t off = 0; off + elsize <= descsz; off += elsize) {
memcpy(xauxv_addr, &nbuf[doff + off], xauxv_sizeof);
/* Limit processing to 50 vector entries to prevent DoS */
if (nval++ >= 50) {
file_error(ms, 0, "Too many ELF Auxv elements");
return 1;
}
switch(xauxv_type) {
case AT_LINUX_EXECFN:
is_string = 1;
tag = "execfn";
break;
case AT_LINUX_PLATFORM:
is_string = 1;
tag = "platform";
break;
case AT_LINUX_UID:
is_string = 0;
tag = "real uid";
break;
case AT_LINUX_GID:
is_string = 0;
tag = "real gid";
break;
case AT_LINUX_EUID:
is_string = 0;
tag = "effective uid";
break;
case AT_LINUX_EGID:
is_string = 0;
tag = "effective gid";
break;
default:
is_string = 0;
tag = NULL;
break;
}
if (tag == NULL)
continue;
if (is_string) {
char buf[256];
ssize_t buflen;
buflen = get_string_on_virtaddr(ms, swap, clazz, fd,
ph_off, ph_num, fsize, xauxv_val, buf, sizeof(buf));
if (buflen == 0)
continue;
if (file_printf(ms, ", %s: '%s'", tag, buf) == -1)
return 0;
} else {
if (file_printf(ms, ", %s: %d", tag, (int) xauxv_val)
== -1)
return 0;
}
}
return 1;
#else
return 0;
#endif
} | 0 | [
"CWE-787"
] | file | d65781527c8134a1202b2649695d48d5701ac60b | 285,961,543,139,075,930,000,000,000,000,000,000,000 | 105 | PR/62: spinpx: limit size of file_printable. |
PHP_MINIT_FUNCTION(curl)
{
le_curl = zend_register_list_destructors_ex(_php_curl_close, NULL, "curl", module_number);
le_curl_multi_handle = zend_register_list_destructors_ex(_php_curl_multi_close, NULL, "curl_multi", module_number);
le_curl_share_handle = zend_register_list_destructors_ex(_php_curl_share_close, NULL, "curl_share", module_number);
REGISTER_INI_ENTRIES();
/* See http://curl.haxx.se/lxr/source/docs/libcurl/symbols-in-versions
or curl src/docs/libcurl/symbols-in-versions for a (almost) complete list
of options and which version they were introduced */
/* Constants for curl_setopt() */
REGISTER_CURL_CONSTANT(CURLOPT_AUTOREFERER);
REGISTER_CURL_CONSTANT(CURLOPT_BINARYTRANSFER);
REGISTER_CURL_CONSTANT(CURLOPT_BUFFERSIZE);
REGISTER_CURL_CONSTANT(CURLOPT_CAINFO);
REGISTER_CURL_CONSTANT(CURLOPT_CAPATH);
REGISTER_CURL_CONSTANT(CURLOPT_CONNECTTIMEOUT);
REGISTER_CURL_CONSTANT(CURLOPT_COOKIE);
REGISTER_CURL_CONSTANT(CURLOPT_COOKIEFILE);
REGISTER_CURL_CONSTANT(CURLOPT_COOKIEJAR);
REGISTER_CURL_CONSTANT(CURLOPT_COOKIESESSION);
REGISTER_CURL_CONSTANT(CURLOPT_CRLF);
REGISTER_CURL_CONSTANT(CURLOPT_CUSTOMREQUEST);
REGISTER_CURL_CONSTANT(CURLOPT_DNS_CACHE_TIMEOUT);
REGISTER_CURL_CONSTANT(CURLOPT_DNS_USE_GLOBAL_CACHE);
REGISTER_CURL_CONSTANT(CURLOPT_EGDSOCKET);
REGISTER_CURL_CONSTANT(CURLOPT_ENCODING);
REGISTER_CURL_CONSTANT(CURLOPT_FAILONERROR);
REGISTER_CURL_CONSTANT(CURLOPT_FILE);
REGISTER_CURL_CONSTANT(CURLOPT_FILETIME);
REGISTER_CURL_CONSTANT(CURLOPT_FOLLOWLOCATION);
REGISTER_CURL_CONSTANT(CURLOPT_FORBID_REUSE);
REGISTER_CURL_CONSTANT(CURLOPT_FRESH_CONNECT);
REGISTER_CURL_CONSTANT(CURLOPT_FTPAPPEND);
REGISTER_CURL_CONSTANT(CURLOPT_FTPLISTONLY);
REGISTER_CURL_CONSTANT(CURLOPT_FTPPORT);
REGISTER_CURL_CONSTANT(CURLOPT_FTP_USE_EPRT);
REGISTER_CURL_CONSTANT(CURLOPT_FTP_USE_EPSV);
REGISTER_CURL_CONSTANT(CURLOPT_HEADER);
REGISTER_CURL_CONSTANT(CURLOPT_HEADERFUNCTION);
REGISTER_CURL_CONSTANT(CURLOPT_HTTP200ALIASES);
REGISTER_CURL_CONSTANT(CURLOPT_HTTPGET);
REGISTER_CURL_CONSTANT(CURLOPT_HTTPHEADER);
REGISTER_CURL_CONSTANT(CURLOPT_HTTPPROXYTUNNEL);
REGISTER_CURL_CONSTANT(CURLOPT_HTTP_VERSION);
REGISTER_CURL_CONSTANT(CURLOPT_INFILE);
REGISTER_CURL_CONSTANT(CURLOPT_INFILESIZE);
REGISTER_CURL_CONSTANT(CURLOPT_INTERFACE);
REGISTER_CURL_CONSTANT(CURLOPT_KRB4LEVEL);
REGISTER_CURL_CONSTANT(CURLOPT_LOW_SPEED_LIMIT);
REGISTER_CURL_CONSTANT(CURLOPT_LOW_SPEED_TIME);
REGISTER_CURL_CONSTANT(CURLOPT_MAXCONNECTS);
REGISTER_CURL_CONSTANT(CURLOPT_MAXREDIRS);
REGISTER_CURL_CONSTANT(CURLOPT_NETRC);
REGISTER_CURL_CONSTANT(CURLOPT_NOBODY);
REGISTER_CURL_CONSTANT(CURLOPT_NOPROGRESS);
REGISTER_CURL_CONSTANT(CURLOPT_NOSIGNAL);
REGISTER_CURL_CONSTANT(CURLOPT_PORT);
REGISTER_CURL_CONSTANT(CURLOPT_POST);
REGISTER_CURL_CONSTANT(CURLOPT_POSTFIELDS);
REGISTER_CURL_CONSTANT(CURLOPT_POSTQUOTE);
REGISTER_CURL_CONSTANT(CURLOPT_PREQUOTE);
REGISTER_CURL_CONSTANT(CURLOPT_PRIVATE);
REGISTER_CURL_CONSTANT(CURLOPT_PROGRESSFUNCTION);
REGISTER_CURL_CONSTANT(CURLOPT_PROXY);
REGISTER_CURL_CONSTANT(CURLOPT_PROXYPORT);
REGISTER_CURL_CONSTANT(CURLOPT_PROXYTYPE);
REGISTER_CURL_CONSTANT(CURLOPT_PROXYUSERPWD);
REGISTER_CURL_CONSTANT(CURLOPT_PUT);
REGISTER_CURL_CONSTANT(CURLOPT_QUOTE);
REGISTER_CURL_CONSTANT(CURLOPT_RANDOM_FILE);
REGISTER_CURL_CONSTANT(CURLOPT_RANGE);
REGISTER_CURL_CONSTANT(CURLOPT_READDATA);
REGISTER_CURL_CONSTANT(CURLOPT_READFUNCTION);
REGISTER_CURL_CONSTANT(CURLOPT_REFERER);
REGISTER_CURL_CONSTANT(CURLOPT_RESUME_FROM);
REGISTER_CURL_CONSTANT(CURLOPT_RETURNTRANSFER);
REGISTER_CURL_CONSTANT(CURLOPT_SHARE);
REGISTER_CURL_CONSTANT(CURLOPT_SSLCERT);
REGISTER_CURL_CONSTANT(CURLOPT_SSLCERTPASSWD);
REGISTER_CURL_CONSTANT(CURLOPT_SSLCERTTYPE);
REGISTER_CURL_CONSTANT(CURLOPT_SSLENGINE);
REGISTER_CURL_CONSTANT(CURLOPT_SSLENGINE_DEFAULT);
REGISTER_CURL_CONSTANT(CURLOPT_SSLKEY);
REGISTER_CURL_CONSTANT(CURLOPT_SSLKEYPASSWD);
REGISTER_CURL_CONSTANT(CURLOPT_SSLKEYTYPE);
REGISTER_CURL_CONSTANT(CURLOPT_SSLVERSION);
REGISTER_CURL_CONSTANT(CURLOPT_SSL_CIPHER_LIST);
REGISTER_CURL_CONSTANT(CURLOPT_SSL_VERIFYHOST);
REGISTER_CURL_CONSTANT(CURLOPT_SSL_VERIFYPEER);
REGISTER_CURL_CONSTANT(CURLOPT_STDERR);
REGISTER_CURL_CONSTANT(CURLOPT_TELNETOPTIONS);
REGISTER_CURL_CONSTANT(CURLOPT_TIMECONDITION);
REGISTER_CURL_CONSTANT(CURLOPT_TIMEOUT);
REGISTER_CURL_CONSTANT(CURLOPT_TIMEVALUE);
REGISTER_CURL_CONSTANT(CURLOPT_TRANSFERTEXT);
REGISTER_CURL_CONSTANT(CURLOPT_UNRESTRICTED_AUTH);
REGISTER_CURL_CONSTANT(CURLOPT_UPLOAD);
REGISTER_CURL_CONSTANT(CURLOPT_URL);
REGISTER_CURL_CONSTANT(CURLOPT_USERAGENT);
REGISTER_CURL_CONSTANT(CURLOPT_USERPWD);
REGISTER_CURL_CONSTANT(CURLOPT_VERBOSE);
REGISTER_CURL_CONSTANT(CURLOPT_WRITEFUNCTION);
REGISTER_CURL_CONSTANT(CURLOPT_WRITEHEADER);
/* */
REGISTER_CURL_CONSTANT(CURLE_ABORTED_BY_CALLBACK);
REGISTER_CURL_CONSTANT(CURLE_BAD_CALLING_ORDER);
REGISTER_CURL_CONSTANT(CURLE_BAD_CONTENT_ENCODING);
REGISTER_CURL_CONSTANT(CURLE_BAD_DOWNLOAD_RESUME);
REGISTER_CURL_CONSTANT(CURLE_BAD_FUNCTION_ARGUMENT);
REGISTER_CURL_CONSTANT(CURLE_BAD_PASSWORD_ENTERED);
REGISTER_CURL_CONSTANT(CURLE_COULDNT_CONNECT);
REGISTER_CURL_CONSTANT(CURLE_COULDNT_RESOLVE_HOST);
REGISTER_CURL_CONSTANT(CURLE_COULDNT_RESOLVE_PROXY);
REGISTER_CURL_CONSTANT(CURLE_FAILED_INIT);
REGISTER_CURL_CONSTANT(CURLE_FILE_COULDNT_READ_FILE);
REGISTER_CURL_CONSTANT(CURLE_FTP_ACCESS_DENIED);
REGISTER_CURL_CONSTANT(CURLE_FTP_BAD_DOWNLOAD_RESUME);
REGISTER_CURL_CONSTANT(CURLE_FTP_CANT_GET_HOST);
REGISTER_CURL_CONSTANT(CURLE_FTP_CANT_RECONNECT);
REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_GET_SIZE);
REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_RETR_FILE);
REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_SET_ASCII);
REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_SET_BINARY);
REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_STOR_FILE);
REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_USE_REST);
REGISTER_CURL_CONSTANT(CURLE_FTP_PARTIAL_FILE);
REGISTER_CURL_CONSTANT(CURLE_FTP_PORT_FAILED);
REGISTER_CURL_CONSTANT(CURLE_FTP_QUOTE_ERROR);
REGISTER_CURL_CONSTANT(CURLE_FTP_USER_PASSWORD_INCORRECT);
REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_227_FORMAT);
REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_PASS_REPLY);
REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_PASV_REPLY);
REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_SERVER_REPLY);
REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_USER_REPLY);
REGISTER_CURL_CONSTANT(CURLE_FTP_WRITE_ERROR);
REGISTER_CURL_CONSTANT(CURLE_FUNCTION_NOT_FOUND);
REGISTER_CURL_CONSTANT(CURLE_GOT_NOTHING);
REGISTER_CURL_CONSTANT(CURLE_HTTP_NOT_FOUND);
REGISTER_CURL_CONSTANT(CURLE_HTTP_PORT_FAILED);
REGISTER_CURL_CONSTANT(CURLE_HTTP_POST_ERROR);
REGISTER_CURL_CONSTANT(CURLE_HTTP_RANGE_ERROR);
REGISTER_CURL_CONSTANT(CURLE_HTTP_RETURNED_ERROR);
REGISTER_CURL_CONSTANT(CURLE_LDAP_CANNOT_BIND);
REGISTER_CURL_CONSTANT(CURLE_LDAP_SEARCH_FAILED);
REGISTER_CURL_CONSTANT(CURLE_LIBRARY_NOT_FOUND);
REGISTER_CURL_CONSTANT(CURLE_MALFORMAT_USER);
REGISTER_CURL_CONSTANT(CURLE_OBSOLETE);
REGISTER_CURL_CONSTANT(CURLE_OK);
REGISTER_CURL_CONSTANT(CURLE_OPERATION_TIMEDOUT);
REGISTER_CURL_CONSTANT(CURLE_OPERATION_TIMEOUTED);
REGISTER_CURL_CONSTANT(CURLE_OUT_OF_MEMORY);
REGISTER_CURL_CONSTANT(CURLE_PARTIAL_FILE);
REGISTER_CURL_CONSTANT(CURLE_READ_ERROR);
REGISTER_CURL_CONSTANT(CURLE_RECV_ERROR);
REGISTER_CURL_CONSTANT(CURLE_SEND_ERROR);
REGISTER_CURL_CONSTANT(CURLE_SHARE_IN_USE);
REGISTER_CURL_CONSTANT(CURLE_SSL_CACERT);
REGISTER_CURL_CONSTANT(CURLE_SSL_CERTPROBLEM);
REGISTER_CURL_CONSTANT(CURLE_SSL_CIPHER);
REGISTER_CURL_CONSTANT(CURLE_SSL_CONNECT_ERROR);
REGISTER_CURL_CONSTANT(CURLE_SSL_ENGINE_NOTFOUND);
REGISTER_CURL_CONSTANT(CURLE_SSL_ENGINE_SETFAILED);
REGISTER_CURL_CONSTANT(CURLE_SSL_PEER_CERTIFICATE);
REGISTER_CURL_CONSTANT(CURLE_TELNET_OPTION_SYNTAX);
REGISTER_CURL_CONSTANT(CURLE_TOO_MANY_REDIRECTS);
REGISTER_CURL_CONSTANT(CURLE_UNKNOWN_TELNET_OPTION);
REGISTER_CURL_CONSTANT(CURLE_UNSUPPORTED_PROTOCOL);
REGISTER_CURL_CONSTANT(CURLE_URL_MALFORMAT);
REGISTER_CURL_CONSTANT(CURLE_URL_MALFORMAT_USER);
REGISTER_CURL_CONSTANT(CURLE_WRITE_ERROR);
/* cURL info constants */
REGISTER_CURL_CONSTANT(CURLINFO_CONNECT_TIME);
REGISTER_CURL_CONSTANT(CURLINFO_CONTENT_LENGTH_DOWNLOAD);
REGISTER_CURL_CONSTANT(CURLINFO_CONTENT_LENGTH_UPLOAD);
REGISTER_CURL_CONSTANT(CURLINFO_CONTENT_TYPE);
REGISTER_CURL_CONSTANT(CURLINFO_EFFECTIVE_URL);
REGISTER_CURL_CONSTANT(CURLINFO_FILETIME);
REGISTER_CURL_CONSTANT(CURLINFO_HEADER_OUT);
REGISTER_CURL_CONSTANT(CURLINFO_HEADER_SIZE);
REGISTER_CURL_CONSTANT(CURLINFO_HTTP_CODE);
REGISTER_CURL_CONSTANT(CURLINFO_LASTONE);
REGISTER_CURL_CONSTANT(CURLINFO_NAMELOOKUP_TIME);
REGISTER_CURL_CONSTANT(CURLINFO_PRETRANSFER_TIME);
REGISTER_CURL_CONSTANT(CURLINFO_PRIVATE);
REGISTER_CURL_CONSTANT(CURLINFO_REDIRECT_COUNT);
REGISTER_CURL_CONSTANT(CURLINFO_REDIRECT_TIME);
REGISTER_CURL_CONSTANT(CURLINFO_REQUEST_SIZE);
REGISTER_CURL_CONSTANT(CURLINFO_SIZE_DOWNLOAD);
REGISTER_CURL_CONSTANT(CURLINFO_SIZE_UPLOAD);
REGISTER_CURL_CONSTANT(CURLINFO_SPEED_DOWNLOAD);
REGISTER_CURL_CONSTANT(CURLINFO_SPEED_UPLOAD);
REGISTER_CURL_CONSTANT(CURLINFO_SSL_VERIFYRESULT);
REGISTER_CURL_CONSTANT(CURLINFO_STARTTRANSFER_TIME);
REGISTER_CURL_CONSTANT(CURLINFO_TOTAL_TIME);
/* Other */
REGISTER_CURL_CONSTANT(CURLMSG_DONE);
REGISTER_CURL_CONSTANT(CURLVERSION_NOW);
/* Curl Multi Constants */
REGISTER_CURL_CONSTANT(CURLM_BAD_EASY_HANDLE);
REGISTER_CURL_CONSTANT(CURLM_BAD_HANDLE);
REGISTER_CURL_CONSTANT(CURLM_CALL_MULTI_PERFORM);
REGISTER_CURL_CONSTANT(CURLM_INTERNAL_ERROR);
REGISTER_CURL_CONSTANT(CURLM_OK);
REGISTER_CURL_CONSTANT(CURLM_OUT_OF_MEMORY);
#if LIBCURL_VERSION_NUM >= 0x072001 /* Available since 7.32.1 */
REGISTER_CURL_CONSTANT(CURLM_ADDED_ALREADY);
#endif
/* Curl proxy constants */
REGISTER_CURL_CONSTANT(CURLPROXY_HTTP);
REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS4);
REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS5);
/* Curl Share constants */
REGISTER_CURL_CONSTANT(CURLSHOPT_NONE);
REGISTER_CURL_CONSTANT(CURLSHOPT_SHARE);
REGISTER_CURL_CONSTANT(CURLSHOPT_UNSHARE);
/* Curl Http Version constants (CURLOPT_HTTP_VERSION) */
REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_1_0);
REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_1_1);
REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_NONE);
/* Curl Lock constants */
REGISTER_CURL_CONSTANT(CURL_LOCK_DATA_COOKIE);
REGISTER_CURL_CONSTANT(CURL_LOCK_DATA_DNS);
REGISTER_CURL_CONSTANT(CURL_LOCK_DATA_SSL_SESSION);
/* Curl NETRC constants (CURLOPT_NETRC) */
REGISTER_CURL_CONSTANT(CURL_NETRC_IGNORED);
REGISTER_CURL_CONSTANT(CURL_NETRC_OPTIONAL);
REGISTER_CURL_CONSTANT(CURL_NETRC_REQUIRED);
/* Curl SSL Version constants (CURLOPT_SSLVERSION) */
REGISTER_CURL_CONSTANT(CURL_SSLVERSION_DEFAULT);
REGISTER_CURL_CONSTANT(CURL_SSLVERSION_SSLv2);
REGISTER_CURL_CONSTANT(CURL_SSLVERSION_SSLv3);
REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1);
/* Curl TIMECOND constants (CURLOPT_TIMECONDITION) */
REGISTER_CURL_CONSTANT(CURL_TIMECOND_IFMODSINCE);
REGISTER_CURL_CONSTANT(CURL_TIMECOND_IFUNMODSINCE);
REGISTER_CURL_CONSTANT(CURL_TIMECOND_LASTMOD);
REGISTER_CURL_CONSTANT(CURL_TIMECOND_NONE);
/* Curl version constants */
REGISTER_CURL_CONSTANT(CURL_VERSION_IPV6);
REGISTER_CURL_CONSTANT(CURL_VERSION_KERBEROS4);
REGISTER_CURL_CONSTANT(CURL_VERSION_LIBZ);
REGISTER_CURL_CONSTANT(CURL_VERSION_SSL);
#if LIBCURL_VERSION_NUM >= 0x070a06 /* Available since 7.10.6 */
REGISTER_CURL_CONSTANT(CURLOPT_HTTPAUTH);
/* http authentication options */
REGISTER_CURL_CONSTANT(CURLAUTH_ANY);
REGISTER_CURL_CONSTANT(CURLAUTH_ANYSAFE);
REGISTER_CURL_CONSTANT(CURLAUTH_BASIC);
REGISTER_CURL_CONSTANT(CURLAUTH_DIGEST);
REGISTER_CURL_CONSTANT(CURLAUTH_GSSNEGOTIATE);
REGISTER_CURL_CONSTANT(CURLAUTH_NONE);
REGISTER_CURL_CONSTANT(CURLAUTH_NTLM);
#endif
#if LIBCURL_VERSION_NUM >= 0x070a07 /* Available since 7.10.7 */
REGISTER_CURL_CONSTANT(CURLINFO_HTTP_CONNECTCODE);
REGISTER_CURL_CONSTANT(CURLOPT_FTP_CREATE_MISSING_DIRS);
REGISTER_CURL_CONSTANT(CURLOPT_PROXYAUTH);
#endif
#if LIBCURL_VERSION_NUM >= 0x070a08 /* Available since 7.10.8 */
REGISTER_CURL_CONSTANT(CURLE_FILESIZE_EXCEEDED);
REGISTER_CURL_CONSTANT(CURLE_LDAP_INVALID_URL);
REGISTER_CURL_CONSTANT(CURLINFO_HTTPAUTH_AVAIL);
REGISTER_CURL_CONSTANT(CURLINFO_RESPONSE_CODE);
REGISTER_CURL_CONSTANT(CURLINFO_PROXYAUTH_AVAIL);
REGISTER_CURL_CONSTANT(CURLOPT_FTP_RESPONSE_TIMEOUT);
REGISTER_CURL_CONSTANT(CURLOPT_IPRESOLVE);
REGISTER_CURL_CONSTANT(CURLOPT_MAXFILESIZE);
REGISTER_CURL_CONSTANT(CURL_IPRESOLVE_V4);
REGISTER_CURL_CONSTANT(CURL_IPRESOLVE_V6);
REGISTER_CURL_CONSTANT(CURL_IPRESOLVE_WHATEVER);
#endif
#if LIBCURL_VERSION_NUM >= 0x070b00 /* Available since 7.11.0 */
REGISTER_CURL_CONSTANT(CURLE_FTP_SSL_FAILED);
REGISTER_CURL_CONSTANT(CURLFTPSSL_ALL);
REGISTER_CURL_CONSTANT(CURLFTPSSL_CONTROL);
REGISTER_CURL_CONSTANT(CURLFTPSSL_NONE);
REGISTER_CURL_CONSTANT(CURLFTPSSL_TRY);
REGISTER_CURL_CONSTANT(CURLOPT_FTP_SSL);
REGISTER_CURL_CONSTANT(CURLOPT_NETRC_FILE);
#endif
#if LIBCURL_VERSION_NUM >= 0x070c02 /* Available since 7.12.2 */
REGISTER_CURL_CONSTANT(CURLFTPAUTH_DEFAULT);
REGISTER_CURL_CONSTANT(CURLFTPAUTH_SSL);
REGISTER_CURL_CONSTANT(CURLFTPAUTH_TLS);
REGISTER_CURL_CONSTANT(CURLOPT_FTPSSLAUTH);
#endif
#if LIBCURL_VERSION_NUM >= 0x070d00 /* Available since 7.13.0 */
REGISTER_CURL_CONSTANT(CURLOPT_FTP_ACCOUNT);
#endif
#if LIBCURL_VERSION_NUM >= 0x070b02 /* Available since 7.11.2 */
REGISTER_CURL_CONSTANT(CURLOPT_TCP_NODELAY);
#endif
#if LIBCURL_VERSION_NUM >= 0x070c02 /* Available since 7.12.2 */
REGISTER_CURL_CONSTANT(CURLINFO_OS_ERRNO);
#endif
#if LIBCURL_VERSION_NUM >= 0x070c03 /* Available since 7.12.3 */
REGISTER_CURL_CONSTANT(CURLINFO_NUM_CONNECTS);
REGISTER_CURL_CONSTANT(CURLINFO_SSL_ENGINES);
#endif
#if LIBCURL_VERSION_NUM >= 0x070e01 /* Available since 7.14.1 */
REGISTER_CURL_CONSTANT(CURLINFO_COOKIELIST);
REGISTER_CURL_CONSTANT(CURLOPT_COOKIELIST);
REGISTER_CURL_CONSTANT(CURLOPT_IGNORE_CONTENT_LENGTH);
#endif
#if LIBCURL_VERSION_NUM >= 0x070f00 /* Available since 7.15.0 */
REGISTER_CURL_CONSTANT(CURLOPT_FTP_SKIP_PASV_IP);
#endif
#if LIBCURL_VERSION_NUM >= 0x070f01 /* Available since 7.15.1 */
REGISTER_CURL_CONSTANT(CURLOPT_FTP_FILEMETHOD);
#endif
#if LIBCURL_VERSION_NUM >= 0x070f02 /* Available since 7.15.2 */
REGISTER_CURL_CONSTANT(CURLOPT_CONNECT_ONLY);
REGISTER_CURL_CONSTANT(CURLOPT_LOCALPORT);
REGISTER_CURL_CONSTANT(CURLOPT_LOCALPORTRANGE);
#endif
#if LIBCURL_VERSION_NUM >= 0x070f03 /* Available since 7.15.3 */
REGISTER_CURL_CONSTANT(CURLFTPMETHOD_MULTICWD);
REGISTER_CURL_CONSTANT(CURLFTPMETHOD_NOCWD);
REGISTER_CURL_CONSTANT(CURLFTPMETHOD_SINGLECWD);
#endif
#if LIBCURL_VERSION_NUM >= 0x070f04 /* Available since 7.15.4 */
REGISTER_CURL_CONSTANT(CURLINFO_FTP_ENTRY_PATH);
#endif
#if LIBCURL_VERSION_NUM >= 0x070f05 /* Available since 7.15.5 */
REGISTER_CURL_CONSTANT(CURLOPT_FTP_ALTERNATIVE_TO_USER);
REGISTER_CURL_CONSTANT(CURLOPT_MAX_RECV_SPEED_LARGE);
REGISTER_CURL_CONSTANT(CURLOPT_MAX_SEND_SPEED_LARGE);
#endif
#if LIBCURL_VERSION_NUM >= 0x071000 /* Available since 7.16.0 */
REGISTER_CURL_CONSTANT(CURLE_SSL_CACERT_BADFILE);
REGISTER_CURL_CONSTANT(CURLOPT_SSL_SESSIONID_CACHE);
REGISTER_CURL_CONSTANT(CURLMOPT_PIPELINING);
#endif
#if LIBCURL_VERSION_NUM >= 0x071001 /* Available since 7.16.1 */
REGISTER_CURL_CONSTANT(CURLE_SSH);
REGISTER_CURL_CONSTANT(CURLOPT_FTP_SSL_CCC);
REGISTER_CURL_CONSTANT(CURLOPT_SSH_AUTH_TYPES);
REGISTER_CURL_CONSTANT(CURLOPT_SSH_PRIVATE_KEYFILE);
REGISTER_CURL_CONSTANT(CURLOPT_SSH_PUBLIC_KEYFILE);
REGISTER_CURL_CONSTANT(CURLFTPSSL_CCC_ACTIVE);
REGISTER_CURL_CONSTANT(CURLFTPSSL_CCC_NONE);
REGISTER_CURL_CONSTANT(CURLFTPSSL_CCC_PASSIVE);
#endif
#if LIBCURL_VERSION_NUM >= 0x071002 /* Available since 7.16.2 */
REGISTER_CURL_CONSTANT(CURLOPT_CONNECTTIMEOUT_MS);
REGISTER_CURL_CONSTANT(CURLOPT_HTTP_CONTENT_DECODING);
REGISTER_CURL_CONSTANT(CURLOPT_HTTP_TRANSFER_DECODING);
REGISTER_CURL_CONSTANT(CURLOPT_TIMEOUT_MS);
#endif
#if LIBCURL_VERSION_NUM >= 0x071003 /* Available since 7.16.3 */
REGISTER_CURL_CONSTANT(CURLMOPT_MAXCONNECTS);
#endif
#if LIBCURL_VERSION_NUM >= 0x071004 /* Available since 7.16.4 */
REGISTER_CURL_CONSTANT(CURLOPT_KRBLEVEL);
REGISTER_CURL_CONSTANT(CURLOPT_NEW_DIRECTORY_PERMS);
REGISTER_CURL_CONSTANT(CURLOPT_NEW_FILE_PERMS);
#endif
#if LIBCURL_VERSION_NUM >= 0x071100 /* Available since 7.17.0 */
REGISTER_CURL_CONSTANT(CURLOPT_APPEND);
REGISTER_CURL_CONSTANT(CURLOPT_DIRLISTONLY);
REGISTER_CURL_CONSTANT(CURLOPT_USE_SSL);
/* Curl SSL Constants */
REGISTER_CURL_CONSTANT(CURLUSESSL_ALL);
REGISTER_CURL_CONSTANT(CURLUSESSL_CONTROL);
REGISTER_CURL_CONSTANT(CURLUSESSL_NONE);
REGISTER_CURL_CONSTANT(CURLUSESSL_TRY);
#endif
#if LIBCURL_VERSION_NUM >= 0x071101 /* Available since 7.17.1 */
REGISTER_CURL_CONSTANT(CURLOPT_SSH_HOST_PUBLIC_KEY_MD5);
#endif
#if LIBCURL_VERSION_NUM >= 0x071200 /* Available since 7.18.0 */
REGISTER_CURL_CONSTANT(CURLOPT_PROXY_TRANSFER_MODE);
REGISTER_CURL_CONSTANT(CURLPAUSE_ALL);
REGISTER_CURL_CONSTANT(CURLPAUSE_CONT);
REGISTER_CURL_CONSTANT(CURLPAUSE_RECV);
REGISTER_CURL_CONSTANT(CURLPAUSE_RECV_CONT);
REGISTER_CURL_CONSTANT(CURLPAUSE_SEND);
REGISTER_CURL_CONSTANT(CURLPAUSE_SEND_CONT);
REGISTER_CURL_CONSTANT(CURL_READFUNC_PAUSE);
REGISTER_CURL_CONSTANT(CURL_WRITEFUNC_PAUSE);
REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS4A);
REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS5_HOSTNAME);
#endif
#if LIBCURL_VERSION_NUM >= 0x071202 /* Available since 7.18.2 */
REGISTER_CURL_CONSTANT(CURLINFO_REDIRECT_URL);
#endif
#if LIBCURL_VERSION_NUM >= 0x071300 /* Available since 7.19.0 */
REGISTER_CURL_CONSTANT(CURLINFO_APPCONNECT_TIME);
REGISTER_CURL_CONSTANT(CURLINFO_PRIMARY_IP);
REGISTER_CURL_CONSTANT(CURLOPT_ADDRESS_SCOPE);
REGISTER_CURL_CONSTANT(CURLOPT_CRLFILE);
REGISTER_CURL_CONSTANT(CURLOPT_ISSUERCERT);
REGISTER_CURL_CONSTANT(CURLOPT_KEYPASSWD);
REGISTER_CURL_CONSTANT(CURLSSH_AUTH_ANY);
REGISTER_CURL_CONSTANT(CURLSSH_AUTH_DEFAULT);
REGISTER_CURL_CONSTANT(CURLSSH_AUTH_HOST);
REGISTER_CURL_CONSTANT(CURLSSH_AUTH_KEYBOARD);
REGISTER_CURL_CONSTANT(CURLSSH_AUTH_NONE);
REGISTER_CURL_CONSTANT(CURLSSH_AUTH_PASSWORD);
REGISTER_CURL_CONSTANT(CURLSSH_AUTH_PUBLICKEY);
#endif
#if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */
REGISTER_CURL_CONSTANT(CURLINFO_CERTINFO);
REGISTER_CURL_CONSTANT(CURLOPT_CERTINFO);
REGISTER_CURL_CONSTANT(CURLOPT_PASSWORD);
REGISTER_CURL_CONSTANT(CURLOPT_POSTREDIR);
REGISTER_CURL_CONSTANT(CURLOPT_PROXYPASSWORD);
REGISTER_CURL_CONSTANT(CURLOPT_PROXYUSERNAME);
REGISTER_CURL_CONSTANT(CURLOPT_USERNAME);
REGISTER_CURL_CONSTANT(CURL_REDIR_POST_301);
REGISTER_CURL_CONSTANT(CURL_REDIR_POST_302);
REGISTER_CURL_CONSTANT(CURL_REDIR_POST_ALL);
#endif
#if LIBCURL_VERSION_NUM >= 0x071303 /* Available since 7.19.3 */
REGISTER_CURL_CONSTANT(CURLAUTH_DIGEST_IE);
#endif
#if LIBCURL_VERSION_NUM >= 0x071304 /* Available since 7.19.4 */
REGISTER_CURL_CONSTANT(CURLINFO_CONDITION_UNMET);
REGISTER_CURL_CONSTANT(CURLOPT_NOPROXY);
REGISTER_CURL_CONSTANT(CURLOPT_PROTOCOLS);
REGISTER_CURL_CONSTANT(CURLOPT_REDIR_PROTOCOLS);
REGISTER_CURL_CONSTANT(CURLOPT_SOCKS5_GSSAPI_NEC);
REGISTER_CURL_CONSTANT(CURLOPT_SOCKS5_GSSAPI_SERVICE);
REGISTER_CURL_CONSTANT(CURLOPT_TFTP_BLKSIZE);
REGISTER_CURL_CONSTANT(CURLPROTO_ALL);
REGISTER_CURL_CONSTANT(CURLPROTO_DICT);
REGISTER_CURL_CONSTANT(CURLPROTO_FILE);
REGISTER_CURL_CONSTANT(CURLPROTO_FTP);
REGISTER_CURL_CONSTANT(CURLPROTO_FTPS);
REGISTER_CURL_CONSTANT(CURLPROTO_HTTP);
REGISTER_CURL_CONSTANT(CURLPROTO_HTTPS);
REGISTER_CURL_CONSTANT(CURLPROTO_LDAP);
REGISTER_CURL_CONSTANT(CURLPROTO_LDAPS);
REGISTER_CURL_CONSTANT(CURLPROTO_SCP);
REGISTER_CURL_CONSTANT(CURLPROTO_SFTP);
REGISTER_CURL_CONSTANT(CURLPROTO_TELNET);
REGISTER_CURL_CONSTANT(CURLPROTO_TFTP);
REGISTER_CURL_CONSTANT(CURLPROXY_HTTP_1_0);
REGISTER_CURL_CONSTANT(CURLFTP_CREATE_DIR);
REGISTER_CURL_CONSTANT(CURLFTP_CREATE_DIR_NONE);
REGISTER_CURL_CONSTANT(CURLFTP_CREATE_DIR_RETRY);
#endif
#if LIBCURL_VERSION_NUM >= 0x071306 /* Available since 7.19.6 */
REGISTER_CURL_CONSTANT(CURLOPT_SSH_KNOWNHOSTS);
#endif
#if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */
REGISTER_CURL_CONSTANT(CURLINFO_RTSP_CLIENT_CSEQ);
REGISTER_CURL_CONSTANT(CURLINFO_RTSP_CSEQ_RECV);
REGISTER_CURL_CONSTANT(CURLINFO_RTSP_SERVER_CSEQ);
REGISTER_CURL_CONSTANT(CURLINFO_RTSP_SESSION_ID);
REGISTER_CURL_CONSTANT(CURLOPT_FTP_USE_PRET);
REGISTER_CURL_CONSTANT(CURLOPT_MAIL_FROM);
REGISTER_CURL_CONSTANT(CURLOPT_MAIL_RCPT);
REGISTER_CURL_CONSTANT(CURLOPT_RTSP_CLIENT_CSEQ);
REGISTER_CURL_CONSTANT(CURLOPT_RTSP_REQUEST);
REGISTER_CURL_CONSTANT(CURLOPT_RTSP_SERVER_CSEQ);
REGISTER_CURL_CONSTANT(CURLOPT_RTSP_SESSION_ID);
REGISTER_CURL_CONSTANT(CURLOPT_RTSP_STREAM_URI);
REGISTER_CURL_CONSTANT(CURLOPT_RTSP_TRANSPORT);
REGISTER_CURL_CONSTANT(CURLPROTO_IMAP);
REGISTER_CURL_CONSTANT(CURLPROTO_IMAPS);
REGISTER_CURL_CONSTANT(CURLPROTO_POP3);
REGISTER_CURL_CONSTANT(CURLPROTO_POP3S);
REGISTER_CURL_CONSTANT(CURLPROTO_RTSP);
REGISTER_CURL_CONSTANT(CURLPROTO_SMTP);
REGISTER_CURL_CONSTANT(CURLPROTO_SMTPS);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_ANNOUNCE);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_DESCRIBE);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_GET_PARAMETER);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_OPTIONS);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_PAUSE);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_PLAY);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_RECEIVE);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_RECORD);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_SET_PARAMETER);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_SETUP);
REGISTER_CURL_CONSTANT(CURL_RTSPREQ_TEARDOWN);
#endif
#if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */
REGISTER_CURL_CONSTANT(CURLINFO_LOCAL_IP);
REGISTER_CURL_CONSTANT(CURLINFO_LOCAL_PORT);
REGISTER_CURL_CONSTANT(CURLINFO_PRIMARY_PORT);
REGISTER_CURL_CONSTANT(CURLOPT_FNMATCH_FUNCTION);
REGISTER_CURL_CONSTANT(CURLOPT_WILDCARDMATCH);
REGISTER_CURL_CONSTANT(CURLPROTO_RTMP);
REGISTER_CURL_CONSTANT(CURLPROTO_RTMPE);
REGISTER_CURL_CONSTANT(CURLPROTO_RTMPS);
REGISTER_CURL_CONSTANT(CURLPROTO_RTMPT);
REGISTER_CURL_CONSTANT(CURLPROTO_RTMPTE);
REGISTER_CURL_CONSTANT(CURLPROTO_RTMPTS);
REGISTER_CURL_CONSTANT(CURL_FNMATCHFUNC_FAIL);
REGISTER_CURL_CONSTANT(CURL_FNMATCHFUNC_MATCH);
REGISTER_CURL_CONSTANT(CURL_FNMATCHFUNC_NOMATCH);
#endif
#if LIBCURL_VERSION_NUM >= 0x071502 /* Available since 7.21.2 */
REGISTER_CURL_CONSTANT(CURLPROTO_GOPHER);
#endif
#if LIBCURL_VERSION_NUM >= 0x071503 /* Available since 7.21.3 */
REGISTER_CURL_CONSTANT(CURLAUTH_ONLY);
REGISTER_CURL_CONSTANT(CURLOPT_RESOLVE);
#endif
#if LIBCURL_VERSION_NUM >= 0x071504 /* Available since 7.21.4 */
REGISTER_CURL_CONSTANT(CURLOPT_TLSAUTH_PASSWORD);
REGISTER_CURL_CONSTANT(CURLOPT_TLSAUTH_TYPE);
REGISTER_CURL_CONSTANT(CURLOPT_TLSAUTH_USERNAME);
REGISTER_CURL_CONSTANT(CURL_TLSAUTH_SRP);
#endif
#if LIBCURL_VERSION_NUM >= 0x071506 /* Available since 7.21.6 */
REGISTER_CURL_CONSTANT(CURLOPT_ACCEPT_ENCODING);
REGISTER_CURL_CONSTANT(CURLOPT_TRANSFER_ENCODING);
#endif
#if LIBCURL_VERSION_NUM >= 0x071600 /* Available since 7.22.0 */
REGISTER_CURL_CONSTANT(CURLAUTH_NTLM_WB);
REGISTER_CURL_CONSTANT(CURLGSSAPI_DELEGATION_FLAG);
REGISTER_CURL_CONSTANT(CURLGSSAPI_DELEGATION_POLICY_FLAG);
REGISTER_CURL_CONSTANT(CURLOPT_GSSAPI_DELEGATION);
#endif
#if LIBCURL_VERSION_NUM >= 0x071800 /* Available since 7.24.0 */
REGISTER_CURL_CONSTANT(CURLOPT_ACCEPTTIMEOUT_MS);
REGISTER_CURL_CONSTANT(CURLOPT_DNS_SERVERS);
#endif
#if LIBCURL_VERSION_NUM >= 0x071900 /* Available since 7.25.0 */
REGISTER_CURL_CONSTANT(CURLOPT_MAIL_AUTH);
REGISTER_CURL_CONSTANT(CURLOPT_SSL_OPTIONS);
REGISTER_CURL_CONSTANT(CURLOPT_TCP_KEEPALIVE);
REGISTER_CURL_CONSTANT(CURLOPT_TCP_KEEPIDLE);
REGISTER_CURL_CONSTANT(CURLOPT_TCP_KEEPINTVL);
REGISTER_CURL_CONSTANT(CURLSSLOPT_ALLOW_BEAST);
#endif
#if LIBCURL_VERSION_NUM >= 0x071901 /* Available since 7.25.1 */
REGISTER_CURL_CONSTANT(CURL_REDIR_POST_303);
#endif
#if LIBCURL_VERSION_NUM >= 0x071c00 /* Available since 7.28.0 */
REGISTER_CURL_CONSTANT(CURLSSH_AUTH_AGENT);
#endif
#if LIBCURL_VERSION_NUM >= 0x071e00 /* Available since 7.30.0 */
REGISTER_CURL_CONSTANT(CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE);
REGISTER_CURL_CONSTANT(CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE);
REGISTER_CURL_CONSTANT(CURLMOPT_MAX_HOST_CONNECTIONS);
REGISTER_CURL_CONSTANT(CURLMOPT_MAX_PIPELINE_LENGTH);
REGISTER_CURL_CONSTANT(CURLMOPT_MAX_TOTAL_CONNECTIONS);
#endif
#if LIBCURL_VERSION_NUM >= 0x071f00 /* Available since 7.31.0 */
REGISTER_CURL_CONSTANT(CURLOPT_SASL_IR);
#endif
#if LIBCURL_VERSION_NUM >= 0x072100 /* Available since 7.33.0 */
REGISTER_CURL_CONSTANT(CURLOPT_DNS_INTERFACE);
REGISTER_CURL_CONSTANT(CURLOPT_DNS_LOCAL_IP4);
REGISTER_CURL_CONSTANT(CURLOPT_DNS_LOCAL_IP6);
REGISTER_CURL_CONSTANT(CURLOPT_XOAUTH2_BEARER);
REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2_0);
REGISTER_CURL_CONSTANT(CURL_VERSION_HTTP2);
#endif
#if LIBCURL_VERSION_NUM >= 0x072200 /* Available since 7.34.0 */
REGISTER_CURL_CONSTANT(CURLOPT_LOGIN_OPTIONS);
REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1_0);
REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1_1);
REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1_2);
#endif
#if LIBCURL_VERSION_NUM >= 0x072400 /* Available since 7.36.0 */
REGISTER_CURL_CONSTANT(CURLOPT_EXPECT_100_TIMEOUT_MS);
REGISTER_CURL_CONSTANT(CURLOPT_SSL_ENABLE_ALPN);
REGISTER_CURL_CONSTANT(CURLOPT_SSL_ENABLE_NPN);
#endif
#if LIBCURL_VERSION_NUM >= 0x072500 /* Available since 7.37.0 */
REGISTER_CURL_CONSTANT(CURLHEADER_SEPARATE);
REGISTER_CURL_CONSTANT(CURLHEADER_UNIFIED);
REGISTER_CURL_CONSTANT(CURLOPT_HEADEROPT);
REGISTER_CURL_CONSTANT(CURLOPT_PROXYHEADER);
#endif
#if LIBCURL_VERSION_NUM >= 0x072600 /* Available since 7.38.0 */
REGISTER_CURL_CONSTANT(CURLAUTH_NEGOTIATE);
#endif
#if LIBCURL_VERSION_NUM >= 0x072700 /* Available since 7.39.0 */
REGISTER_CURL_CONSTANT(CURLOPT_PINNEDPUBLICKEY);
#endif
#if LIBCURL_VERSION_NUM >= 0x072800 /* Available since 7.40.0 */
REGISTER_CURL_CONSTANT(CURLOPT_UNIX_SOCKET_PATH);
REGISTER_CURL_CONSTANT(CURLPROTO_SMB);
REGISTER_CURL_CONSTANT(CURLPROTO_SMBS);
#endif
#if LIBCURL_VERSION_NUM >= 0x072900 /* Available since 7.41.0 */
REGISTER_CURL_CONSTANT(CURLOPT_SSL_VERIFYSTATUS);
#endif
#if LIBCURL_VERSION_NUM >= 0x072a00 /* Available since 7.42.0 */
REGISTER_CURL_CONSTANT(CURLOPT_PATH_AS_IS);
REGISTER_CURL_CONSTANT(CURLOPT_SSL_FALSESTART);
#endif
#if LIBCURL_VERSION_NUM >= 0x072b00 /* Available since 7.43.0 */
REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2);
REGISTER_CURL_CONSTANT(CURLOPT_PIPEWAIT);
REGISTER_CURL_CONSTANT(CURLOPT_PROXY_SERVICE_NAME);
REGISTER_CURL_CONSTANT(CURLOPT_SERVICE_NAME);
REGISTER_CURL_CONSTANT(CURLPIPE_NOTHING);
REGISTER_CURL_CONSTANT(CURLPIPE_HTTP1);
REGISTER_CURL_CONSTANT(CURLPIPE_MULTIPLEX);
#endif
#if LIBCURL_VERSION_NUM >= 0x072c00 /* Available since 7.44.0 */
REGISTER_CURL_CONSTANT(CURLSSLOPT_NO_REVOKE);
#endif
#if LIBCURL_VERSION_NUM >= 0x072d00 /* Available since 7.45.0 */
REGISTER_CURL_CONSTANT(CURLOPT_DEFAULT_PROTOCOL);
#endif
#if LIBCURL_VERSION_NUM >= 0x072e00 /* Available since 7.46.0 */
REGISTER_CURL_CONSTANT(CURLOPT_STREAM_WEIGHT);
#endif
#if LIBCURL_VERSION_NUM >= 0x072f00 /* Available since 7.47.0 */
REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2TLS);
#endif
#if LIBCURL_VERSION_NUM >= 0x073000 /* Available since 7.48.0 */
REGISTER_CURL_CONSTANT(CURLOPT_TFTP_NO_OPTIONS);
#endif
#if LIBCURL_VERSION_NUM >= 0x073100 /* Available since 7.49.0 */
REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE);
REGISTER_CURL_CONSTANT(CURLOPT_CONNECT_TO);
REGISTER_CURL_CONSTANT(CURLOPT_TCP_FASTOPEN);
#endif
#if CURLOPT_FTPASCII != 0
REGISTER_CURL_CONSTANT(CURLOPT_FTPASCII);
#endif
#if CURLOPT_MUTE != 0
REGISTER_CURL_CONSTANT(CURLOPT_MUTE);
#endif
#if CURLOPT_PASSWDFUNCTION != 0
REGISTER_CURL_CONSTANT(CURLOPT_PASSWDFUNCTION);
#endif
REGISTER_CURL_CONSTANT(CURLOPT_SAFE_UPLOAD);
#ifdef PHP_CURL_NEED_OPENSSL_TSL
if (!CRYPTO_get_id_callback()) {
int i, c = CRYPTO_num_locks();
php_curl_openssl_tsl = malloc(c * sizeof(MUTEX_T));
if (!php_curl_openssl_tsl) {
return FAILURE;
}
for (i = 0; i < c; ++i) {
php_curl_openssl_tsl[i] = tsrm_mutex_alloc();
}
CRYPTO_set_id_callback(php_curl_ssl_id);
CRYPTO_set_locking_callback(php_curl_ssl_lock);
}
#endif
#ifdef PHP_CURL_NEED_GNUTLS_TSL
gcry_control(GCRYCTL_SET_THREAD_CBS, &php_curl_gnutls_tsl);
#endif
if (curl_global_init(CURL_GLOBAL_DEFAULT) != CURLE_OK) {
return FAILURE;
}
curlfile_register_class();
return SUCCESS;
} | 0 | [
"CWE-119",
"CWE-787"
] | php-src | 72dbb7f416160f490c4e9987040989a10ad431c7 | 93,128,066,816,918,160,000,000,000,000,000,000,000 | 744 | Fix bug #72674 - check both curl_escape and curl_unescape |
static ut64 initializeEsil(RCore *core) {
const char *name = r_reg_get_name (core->anal->reg, R_REG_NAME_PC);
RAnalEsil *esil = core->anal->esil;
int romem = r_config_get_i (core->config, "esil.romem");
int stats = r_config_get_i (core->config, "esil.stats");
int iotrap = r_config_get_i (core->config, "esil.iotrap");
int exectrap = r_config_get_i (core->config, "esil.exectrap");
int stacksize = r_config_get_i (core->config, "esil.stack.depth");
int noNULL = r_config_get_i (core->config, "esil.noNULL");
unsigned int addrsize = r_config_get_i (core->config, "esil.addr.size");
if (!(core->anal->esil = r_anal_esil_new (stacksize, iotrap, addrsize))) {
return UT64_MAX;
}
ut64 addr;
esil = core->anal->esil;
r_anal_esil_setup (esil, core->anal, romem, stats, noNULL); // setup io
esil->exectrap = exectrap;
RList *entries = r_bin_get_entries (core->bin);
RBinAddr *entry = NULL;
RBinInfo *info = NULL;
if (entries && !r_list_empty (entries)) {
entry = (RBinAddr *)r_list_pop (entries);
info = r_bin_get_info (core->bin);
addr = info->has_va? entry->vaddr: entry->paddr;
r_list_push (entries, entry);
} else {
addr = core->offset;
}
r_reg_setv (core->anal->reg, name, addr);
// set memory read only
return addr;
} | 0 | [
"CWE-416",
"CWE-908"
] | radare2 | 9d348bcc2c4bbd3805e7eec97b594be9febbdf9a | 309,690,041,950,588,070,000,000,000,000,000,000,000 | 32 | Fix #9943 - Invalid free on RAnal.avr |
__zzip_parse_root_directory(int fd,
struct _disk_trailer *trailer,
struct zzip_dir_hdr **hdr_return,
zzip_plugin_io_t io)
{
auto struct zzip_disk_entry dirent;
struct zzip_dir_hdr *hdr;
struct zzip_dir_hdr *hdr0;
uint16_t *p_reclen = 0;
zzip_off64_t entries;
zzip_off64_t zz_offset; /* offset from start of root directory */
char *fd_map = 0;
zzip_off64_t zz_fd_gap = 0;
zzip_off64_t zz_entries = _disk_trailer_localentries(trailer);
zzip_off64_t zz_rootsize = _disk_trailer_rootsize(trailer);
zzip_off64_t zz_rootseek = _disk_trailer_rootseek(trailer);
__correct_rootseek(zz_rootseek, zz_rootsize, trailer);
if (zz_entries < 0 || zz_rootseek < 0 || zz_rootseek < 0)
return ZZIP_CORRUPTED;
hdr0 = (struct zzip_dir_hdr *) malloc(zz_rootsize);
if (! hdr0)
return ZZIP_DIRSIZE;
hdr = hdr0;
__debug_dir_hdr(hdr);
if (USE_MMAP && io->fd.sys)
{
zz_fd_gap = zz_rootseek & (_zzip_getpagesize(io->fd.sys) - 1);
HINT4(" fd_gap=%ld, mapseek=0x%lx, maplen=%ld", (long) (zz_fd_gap),
(long) (zz_rootseek - zz_fd_gap),
(long) (zz_rootsize + zz_fd_gap));
fd_map =
_zzip_mmap(io->fd.sys, fd, zz_rootseek - zz_fd_gap,
zz_rootsize + zz_fd_gap);
/* if mmap failed we will fallback to seek/read mode */
if (fd_map == MAP_FAILED)
{
NOTE2("map failed: %s", strerror(errno));
fd_map = 0;
} else
{
HINT3("mapped *%p len=%li", fd_map,
(long) (zz_rootsize + zz_fd_gap));
}
}
for (entries=0, zz_offset=0; ; entries++)
{
register struct zzip_disk_entry *d;
uint16_t u_extras, u_comment, u_namlen;
# ifndef ZZIP_ALLOW_MODULO_ENTRIES
if (entries >= zz_entries) {
if (zz_offset + 256 < zz_rootsize) {
FAIL4("%li's entry is long before the end of directory - enable modulo_entries? (O:%li R:%li)",
(long) entries, (long) (zz_offset), (long) zz_rootsize);
}
break;
}
# endif
if (fd_map)
{
d = (void*)(fd_map+zz_fd_gap+zz_offset); /* fd_map+fd_gap==u_rootseek */
} else
{
if (io->fd.seeks(fd, zz_rootseek + zz_offset, SEEK_SET) < 0)
return ZZIP_DIR_SEEK;
if (io->fd.read(fd, &dirent, sizeof(dirent)) < __sizeof(dirent))
return ZZIP_DIR_READ;
d = &dirent;
}
if ((zzip_off64_t) (zz_offset + sizeof(*d)) > zz_rootsize ||
(zzip_off64_t) (zz_offset + sizeof(*d)) < 0)
{
FAIL4("%li's entry stretches beyond root directory (O:%li R:%li)",
(long) entries, (long) (zz_offset), (long) zz_rootsize);
break;
}
if (! zzip_disk_entry_check_magic(d)) {
# ifndef ZZIP_ALLOW_MODULO_ENTRIES
FAIL4("%li's entry has no disk_entry magic indicator (O:%li R:%li)",
(long) entries, (long) (zz_offset), (long) zz_rootsize);
# endif
break;
}
# if 0 && defined DEBUG
zzip_debug_xbuf((unsigned char *) d, sizeof(*d) + 8);
# endif
u_extras = zzip_disk_entry_get_extras(d);
u_comment = zzip_disk_entry_get_comment(d);
u_namlen = zzip_disk_entry_get_namlen(d);
HINT5("offset=0x%lx, size %ld, dirent *%p, hdr %p\n",
(long) (zz_offset + zz_rootseek), (long) zz_rootsize, d, hdr);
/* writes over the read buffer, Since the structure where data is
copied is smaller than the data in buffer this can be done.
It is important that the order of setting the fields is considered
when filling the structure, so that some data is not trashed in
first structure read.
at the end the whole copied list of structures is copied into
newly allocated buffer */
hdr->d_crc32 = zzip_disk_entry_get_crc32(d);
hdr->d_csize = zzip_disk_entry_get_csize(d);
hdr->d_usize = zzip_disk_entry_get_usize(d);
hdr->d_off = zzip_disk_entry_get_offset(d);
hdr->d_compr = zzip_disk_entry_get_compr(d);
if (hdr->d_compr > _255)
hdr->d_compr = 255;
if ((zzip_off64_t) (zz_offset + sizeof(*d) + u_namlen) > zz_rootsize ||
(zzip_off64_t) (zz_offset + sizeof(*d) + u_namlen) < 0)
{
FAIL4("%li's name stretches beyond root directory (O:%li N:%li)",
(long) entries, (long) (zz_offset), (long) (u_namlen));
break;
}
if (fd_map)
{ memcpy(hdr->d_name, fd_map+zz_fd_gap + zz_offset+sizeof(*d), u_namlen); }
else
{ io->fd.read(fd, hdr->d_name, u_namlen); }
hdr->d_name[u_namlen] = '\0';
hdr->d_namlen = u_namlen;
/* update offset by the total length of this entry -> next entry */
zz_offset += sizeof(*d) + u_namlen + u_extras + u_comment;
if (zz_offset > zz_rootsize)
{
FAIL3("%li's entry stretches beyond root directory (O:%li)",
(long) entries, (long) (zz_offset));
entries ++;
break;
}
HINT5("file %ld { compr=%d crc32=$%x offset=%d",
(long) entries, hdr->d_compr, hdr->d_crc32, hdr->d_off);
HINT5("csize=%d usize=%d namlen=%d extras=%d",
hdr->d_csize, hdr->d_usize, u_namlen, u_extras);
HINT5("comment=%d name='%s' %s <sizeof %d> } ",
u_comment, hdr->d_name, "", (int) sizeof(*d));
p_reclen = &hdr->d_reclen;
{
register char *p = (char *) hdr;
register char *q = aligned4(p + sizeof(*hdr) + u_namlen + 1);
*p_reclen = (uint16_t) (q - p);
hdr = (struct zzip_dir_hdr *) q;
}
} /*for */
if (USE_MMAP && fd_map)
{
HINT3("unmap *%p len=%li", fd_map, (long) (zz_rootsize + zz_fd_gap));
_zzip_munmap(io->fd.sys, fd_map, zz_rootsize + zz_fd_gap);
}
if (p_reclen)
{
*p_reclen = 0; /* mark end of list */
if (hdr_return)
*hdr_return = hdr0;
} /* else zero (sane) entries */
# ifndef ZZIP_ALLOW_MODULO_ENTRIES
return (entries != zz_entries ? ZZIP_CORRUPTED : 0);
# else
return ((entries & (unsigned)0xFFFF) != zz_entries ? ZZIP_CORRUPTED : 0);
# endif
} | 1 | [
"CWE-119"
] | zziplib | feae4da1a5c92100c44ebfcbaaa895959cc0829b | 31,532,253,133,466,990,000,000,000,000,000,000,000 | 178 | fix for zz_rootsize #41 |
const struct GUID *samdb_domain_guid(struct ldb_context *ldb)
{
TALLOC_CTX *tmp_ctx = NULL;
struct GUID *domain_guid = NULL;
const char *attrs[] = {
"objectGUID",
NULL
};
struct ldb_result *res = NULL;
int ret;
/* see if we have a cached copy */
domain_guid = (struct GUID *)ldb_get_opaque(ldb, "cache.domain_guid");
if (domain_guid) {
return domain_guid;
}
tmp_ctx = talloc_new(ldb);
if (tmp_ctx == NULL) {
goto failed;
}
ret = ldb_search(ldb, tmp_ctx, &res, ldb_get_default_basedn(ldb), LDB_SCOPE_BASE, attrs, "objectGUID=*");
if (ret != LDB_SUCCESS) {
goto failed;
}
if (res->count != 1) {
goto failed;
}
domain_guid = talloc(tmp_ctx, struct GUID);
if (domain_guid == NULL) {
goto failed;
}
*domain_guid = samdb_result_guid(res->msgs[0], "objectGUID");
/* cache the domain_sid in the ldb */
if (ldb_set_opaque(ldb, "cache.domain_guid", domain_guid) != LDB_SUCCESS) {
goto failed;
}
talloc_steal(ldb, domain_guid);
talloc_free(tmp_ctx);
return domain_guid;
failed:
talloc_free(tmp_ctx);
return NULL;
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 98,193,120,215,313,640,000,000,000,000,000,000,000 | 51 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <josephsutton@catalyst.net.nz> |
void vsock_remove_sock(struct vsock_sock *vsk)
{
vsock_remove_bound(vsk);
vsock_remove_connected(vsk);
} | 0 | [
"CWE-667"
] | linux | c518adafa39f37858697ac9309c6cf1805581446 | 243,599,215,230,230,930,000,000,000,000,000,000,000 | 5 | vsock: fix the race conditions in multi-transport support
There are multiple similar bugs implicitly introduced by the
commit c0cfa2d8a788fcf4 ("vsock: add multi-transports support") and
commit 6a2c0962105ae8ce ("vsock: prevent transport modules unloading").
The bug pattern:
[1] vsock_sock.transport pointer is copied to a local variable,
[2] lock_sock() is called,
[3] the local variable is used.
VSOCK multi-transport support introduced the race condition:
vsock_sock.transport value may change between [1] and [2].
Let's copy vsock_sock.transport pointer to local variables after
the lock_sock() call.
Fixes: c0cfa2d8a788fcf4 ("vsock: add multi-transports support")
Signed-off-by: Alexander Popov <alex.popov@linux.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Reviewed-by: Jorgen Hansen <jhansen@vmware.com>
Link: https://lore.kernel.org/r/20210201084719.2257066-1-alex.popov@linux.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org> |
static int jas_strtoopenmode(const char *s)
{
int openmode = 0;
while (*s != '\0') {
switch (*s) {
case 'r':
openmode |= JAS_STREAM_READ;
break;
case 'w':
openmode |= JAS_STREAM_WRITE | JAS_STREAM_CREATE;
break;
case 'b':
openmode |= JAS_STREAM_BINARY;
break;
case 'a':
openmode |= JAS_STREAM_APPEND;
break;
case '+':
openmode |= JAS_STREAM_READ | JAS_STREAM_WRITE;
break;
default:
break;
}
++s;
}
return openmode;
} | 0 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 68,390,022,846,829,020,000,000,000,000,000,000,000 | 27 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
qmerge_start (struct qmerge **qm, const char servers[64],
int flagrecursive, const char *q,
const char qtype[2], const char localip[4], const char *control)
{
int i = 0, r = 0;
struct qmerge_key k;
qmerge_free (qm);
byte_zero (&k, sizeof (k));
if (!qmerge_key_init (&k, q, qtype, control))
return -1;
for (i = 0; i < QMERGE_MAX; i++)
{
if (!inprogress[i].active)
continue;
if (!qmerge_key_equal (&k, &inprogress[i].key))
continue;
log_tx_piggyback (q, qtype, control);
inprogress[i].active++;
*qm = &inprogress[i];
qmerge_key_free (&k);
return 0;
}
for (i = 0; i < QMERGE_MAX; i++)
if (!inprogress[i].active)
break;
if (i == QMERGE_MAX)
return -1;
log_tx (q, qtype, control, servers, 0);
r = dns_transmit_start (&inprogress[i].dt, servers,
flagrecursive, q, qtype, localip);
if (r == -1)
{
qmerge_key_free (&k);
return -1;
}
inprogress[i].active++;
inprogress[i].state = 0;
qmerge_key_free (&inprogress[i].key);
byte_copy (&inprogress[i].key, sizeof (k), &k);
*qm = &inprogress[i];
return 0;
} | 0 | [
"CWE-362"
] | ndjbdns | 847523271f3966cf4618c5689b905703c41dec1c | 37,356,202,804,316,594,000,000,000,000,000,000,000 | 51 | Merge identical outgoing requests.
This patch fixes dnscache to combine *same* client queries into one
single outgoing request, thus securing the server from possible cache
poisoning attacks. This fixes one of the cache poisoning vulnerability
reported by Mr Mark Johnson
-> https://bugzilla.redhat.com/show_bug.cgi?id=838965.
Nonetheless the original patch for this issue was created by
Mr Jeff king -> http://www.your.org/dnscache/
Sincere thanks to Mr Mark for reporting this issue and Mr Jeff for
creating the patch and releasing it under public domain. |