idx
int64 0
522k
| project
stringclasses 631
values | commit_id
stringlengths 7
40
| project_url
stringclasses 630
values | commit_url
stringlengths 4
164
| commit_message
stringlengths 0
11.5k
| target
int64 0
1
| func
stringlengths 5
484k
| func_hash
float64 1,559,120,642,045,605,000,000,000B
340,279,892,905,069,500,000,000,000,000B
| file_name
stringlengths 4
45
| file_hash
float64 25,942,829,220,065,710,000,000,000B
340,272,304,251,680,200,000,000,000,000B
⌀ | cwe
sequencelengths 0
1
| cve
stringlengths 4
16
| cve_desc
stringlengths 0
2.3k
| nvd_url
stringlengths 37
49
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,129 | linux | e15ca9a0ef9a86f0477530b0f44a725d67f889ee | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/e15ca9a0ef9a86f0477530b0f44a725d67f889ee | Bluetooth: HCI - Fix info leak in getsockopt(HCI_FILTER)
The HCI code fails to initialize the two padding bytes of struct
hci_ufilter before copying it to userland -- that for leaking two
bytes kernel stack. Add an explicit memset(0) before filling the
structure to avoid the info leak.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Cc: Marcel Holtmann <marcel@holtmann.org>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: Johan Hedberg <johan.hedberg@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct hci_ufilter uf;
struct sock *sk = sock->sk;
int len, opt, err = 0;
BT_DBG("sk %p, opt %d", sk, optname);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
err = -EINVAL;
goto done;
}
switch (optname) {
case HCI_DATA_DIR:
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
opt = 1;
else
opt = 0;
if (put_user(opt, optval))
err = -EFAULT;
break;
case HCI_TIME_STAMP:
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
opt = 1;
else
opt = 0;
if (put_user(opt, optval))
err = -EFAULT;
break;
case HCI_FILTER:
{
struct hci_filter *f = &hci_pi(sk)->filter;
uf.type_mask = f->type_mask;
uf.opcode = f->opcode;
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
uf.event_mask[1] = *((u32 *) f->event_mask + 1);
}
len = min_t(unsigned int, len, sizeof(uf));
if (copy_to_user(optval, &uf, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
done:
release_sock(sk);
return err;
}
| 274,243,474,172,140,940,000,000,000,000,000,000,000 | hci_sock.c | 222,649,962,349,762,240,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2012-6544 | The Bluetooth protocol stack in the Linux kernel before 3.6 does not properly initialize certain structures, which allows local users to obtain sensitive information from kernel stack memory via a crafted application that targets the (1) L2CAP or (2) HCI implementation. | https://nvd.nist.gov/vuln/detail/CVE-2012-6544 |
1,130 | linux | 04d4fbca1017c11381e7d82acea21dd741e748bc | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/04d4fbca1017c11381e7d82acea21dd741e748bc | l2tp: fix info leak via getsockname()
The L2TP code for IPv6 fails to initialize the l2tp_unused member of
struct sockaddr_l2tpip6 and that for leaks two bytes kernel stack via
the getsockname() syscall. Initialize l2tp_unused with 0 to avoid the
info leak.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Cc: James Chapman <jchapman@katalix.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
struct sock *sk = sock->sk;
struct ipv6_pinfo *np = inet6_sk(sk);
struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
lsa->l2tp_family = AF_INET6;
lsa->l2tp_flowinfo = 0;
lsa->l2tp_scope_id = 0;
if (peer) {
if (!lsk->peer_conn_id)
return -ENOTCONN;
lsa->l2tp_conn_id = lsk->peer_conn_id;
lsa->l2tp_addr = np->daddr;
if (np->sndflow)
lsa->l2tp_flowinfo = np->flow_label;
} else {
if (ipv6_addr_any(&np->rcv_saddr))
lsa->l2tp_addr = np->saddr;
else
lsa->l2tp_addr = np->rcv_saddr;
lsa->l2tp_conn_id = lsk->conn_id;
}
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = sk->sk_bound_dev_if;
*uaddr_len = sizeof(*lsa);
return 0;
}
| 85,210,193,838,679,960,000,000,000,000,000,000,000 | l2tp_ip6.c | 37,086,785,037,095,373,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2012-6543 | The l2tp_ip6_getname function in net/l2tp/l2tp_ip6.c in the Linux kernel before 3.6 does not initialize a certain structure member, which allows local users to obtain sensitive information from kernel stack memory via a crafted application. | https://nvd.nist.gov/vuln/detail/CVE-2012-6543 |
1,131 | linux | 3592aaeb80290bda0f2cf0b5456c97bfc638b192 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/3592aaeb80290bda0f2cf0b5456c97bfc638b192 | llc: fix info leak via getsockname()
The LLC code wrongly returns 0, i.e. "success", when the socket is
zapped. Together with the uninitialized uaddrlen pointer argument from
sys_getsockname this leads to an arbitrary memory leak of up to 128
bytes kernel stack via the getsockname() syscall.
Return an error instead when the socket is zapped to prevent the info
leak. Also remove the unnecessary memset(0). We don't directly write to
the memory pointed by uaddr but memcpy() a local structure at the end of
the function that is properly initialized.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddrlen, int peer)
{
struct sockaddr_llc sllc;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
int rc = 0;
memset(&sllc, 0, sizeof(sllc));
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
*uaddrlen = sizeof(sllc);
memset(uaddr, 0, *uaddrlen);
if (peer) {
rc = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
if(llc->dev)
sllc.sllc_arphrd = llc->dev->type;
sllc.sllc_sap = llc->daddr.lsap;
memcpy(&sllc.sllc_mac, &llc->daddr.mac, IFHWADDRLEN);
} else {
rc = -EINVAL;
if (!llc->sap)
goto out;
sllc.sllc_sap = llc->sap->laddr.lsap;
if (llc->dev) {
sllc.sllc_arphrd = llc->dev->type;
memcpy(&sllc.sllc_mac, llc->dev->dev_addr,
IFHWADDRLEN);
}
}
rc = 0;
sllc.sllc_family = AF_LLC;
memcpy(uaddr, &sllc, sizeof(sllc));
out:
release_sock(sk);
return rc;
}
| 39,906,440,321,854,090,000,000,000,000,000,000,000 | af_llc.c | 121,753,272,392,455,930,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2012-6542 | The llc_ui_getname function in net/llc/af_llc.c in the Linux kernel before 3.6 has an incorrect return value in certain circumstances, which allows local users to obtain sensitive information from kernel stack memory via a crafted application that leverages an uninitialized pointer argument. | https://nvd.nist.gov/vuln/detail/CVE-2012-6542 |
1,132 | linux | 7b07f8eb75aa3097cdfd4f6eac3da49db787381d | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/7b07f8eb75aa3097cdfd4f6eac3da49db787381d | dccp: fix info leak via getsockopt(DCCP_SOCKOPT_CCID_TX_INFO)
The CCID3 code fails to initialize the trailing padding bytes of struct
tfrc_tx_info added for alignment on 64 bit architectures. It that for
potentially leaks four bytes kernel stack via the getsockopt() syscall.
Add an explicit memset(0) before filling the structure to avoid the
info leak.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Cc: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
u32 __user *optval, int __user *optlen)
{
const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
struct tfrc_tx_info tfrc;
const void *val;
switch (optname) {
case DCCP_SOCKOPT_CCID_TX_INFO:
if (len < sizeof(tfrc))
return -EINVAL;
tfrc.tfrctx_x = hc->tx_x;
tfrc.tfrctx_x_recv = hc->tx_x_recv;
tfrc.tfrctx_x_calc = hc->tx_x_calc;
tfrc.tfrctx_rtt = hc->tx_rtt;
tfrc.tfrctx_p = hc->tx_p;
tfrc.tfrctx_rto = hc->tx_t_rto;
tfrc.tfrctx_ipi = hc->tx_t_ipi;
len = sizeof(tfrc);
val = &tfrc;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen) || copy_to_user(optval, val, len))
return -EFAULT;
return 0;
}
| 8,536,137,024,975,475,000,000,000,000,000,000,000 | ccid3.c | 160,351,560,622,096,350,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2012-6541 | The ccid3_hc_tx_getsockopt function in net/dccp/ccids/ccid3.c in the Linux kernel before 3.6 does not initialize a certain structure, which allows local users to obtain sensitive information from kernel stack memory via a crafted application. | https://nvd.nist.gov/vuln/detail/CVE-2012-6541 |
1,133 | linux | 2d8a041b7bfe1097af21441cb77d6af95f4f4680 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/2d8a041b7bfe1097af21441cb77d6af95f4f4680 | ipvs: fix info leak in getsockopt(IP_VS_SO_GET_TIMEOUT)
If at least one of CONFIG_IP_VS_PROTO_TCP or CONFIG_IP_VS_PROTO_UDP is
not set, __ip_vs_get_timeouts() does not fully initialize the structure
that gets copied to userland and that for leaks up to 12 bytes of kernel
stack. Add an explicit memset(0) before passing the structure to
__ip_vs_get_timeouts() to avoid the info leak.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Cc: Wensong Zhang <wensong@linux-vs.org>
Cc: Simon Horman <horms@verge.net.au>
Cc: Julian Anastasov <ja@ssi.bg>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[128];
int ret = 0;
unsigned int copylen;
struct net *net = sock_net(sk);
struct netns_ipvs *ipvs = net_ipvs(net);
BUG_ON(!net);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
return -EINVAL;
if (*len < get_arglen[GET_CMDID(cmd)]) {
pr_err("get_ctl: len %u < %u\n",
*len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
copylen = get_arglen[GET_CMDID(cmd)];
if (copylen > 128)
return -EINVAL;
if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
/*
* Handle daemons first since it has its own locking
*/
if (cmd == IP_VS_SO_GET_DAEMON) {
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
if (mutex_lock_interruptible(&ipvs->sync_mutex))
return -ERESTARTSYS;
if (ipvs->sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
sizeof(d[0].mcast_ifn));
d[0].syncid = ipvs->master_syncid;
}
if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
sizeof(d[1].mcast_ifn));
d[1].syncid = ipvs->backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
mutex_unlock(&ipvs->sync_mutex);
return ret;
}
if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
switch (cmd) {
case IP_VS_SO_GET_VERSION:
{
char buf[64];
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
ret = -EFAULT;
goto out;
}
*len = strlen(buf)+1;
}
break;
case IP_VS_SO_GET_INFO:
{
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = ip_vs_conn_tab_size;
info.num_services = ipvs->num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_SERVICES:
{
struct ip_vs_get_services *get;
int size;
get = (struct ip_vs_get_services *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_service_entry) * get->num_services;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_service_entries(net, get, user);
}
break;
case IP_VS_SO_GET_SERVICE:
{
struct ip_vs_service_entry *entry;
struct ip_vs_service *svc;
union nf_inet_addr addr;
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
if (entry->fwmark)
svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
else
svc = __ip_vs_service_find(net, AF_INET,
entry->protocol, &addr,
entry->port);
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
ret = -EFAULT;
} else
ret = -ESRCH;
}
break;
case IP_VS_SO_GET_DESTS:
{
struct ip_vs_get_dests *get;
int size;
get = (struct ip_vs_get_dests *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_dest_entry) * get->num_dests;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_dest_entries(net, get, user);
}
break;
case IP_VS_SO_GET_TIMEOUT:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
| 267,941,661,112,975,060,000,000,000,000,000,000,000 | ip_vs_ctl.c | 289,258,931,771,187,420,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2012-6540 | The do_ip_vs_get_ctl function in net/netfilter/ipvs/ip_vs_ctl.c in the Linux kernel before 3.6 does not initialize a certain structure for IP_VS_SO_GET_TIMEOUT commands, which allows local users to obtain sensitive information from kernel stack memory via a crafted application. | https://nvd.nist.gov/vuln/detail/CVE-2012-6540 |
1,134 | linux | f778a636713a435d3a922c60b1622a91136560c1 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/f778a636713a435d3a922c60b1622a91136560c1 | xfrm_user: fix info leak in copy_to_user_state()
The memory reserved to dump the xfrm state includes the padding bytes of
struct xfrm_usersa_info added by the compiler for alignment (7 for
amd64, 3 for i386). Add an explicit memset(0) before filling the buffer
to avoid the info leak.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
{
memcpy(&p->id, &x->id, sizeof(p->id));
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
memcpy(&p->stats, &x->stats, sizeof(p->stats));
memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
p->mode = x->props.mode;
p->replay_window = x->props.replay_window;
p->reqid = x->props.reqid;
p->family = x->props.family;
p->flags = x->props.flags;
p->seq = x->km.seq;
}
| 144,112,786,017,541,910,000,000,000,000,000,000,000 | xfrm_user.c | 38,362,585,315,635,697,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2012-6537 | net/xfrm/xfrm_user.c in the Linux kernel before 3.6 does not initialize certain structures, which allows local users to obtain sensitive information from kernel memory by leveraging the CAP_NET_ADMIN capability. | https://nvd.nist.gov/vuln/detail/CVE-2012-6537 |
1,142 | linux | b66c5984017533316fd1951770302649baf1aa33 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/b66c5984017533316fd1951770302649baf1aa33 | exec: do not leave bprm->interp on stack
If a series of scripts are executed, each triggering module loading via
unprintable bytes in the script header, kernel stack contents can leak
into the command line.
Normally execution of binfmt_script and binfmt_misc happens recursively.
However, when modules are enabled, and unprintable bytes exist in the
bprm->buf, execution will restart after attempting to load matching
binfmt modules. Unfortunately, the logic in binfmt_script and
binfmt_misc does not expect to get restarted. They leave bprm->interp
pointing to their local stack. This means on restart bprm->interp is
left pointing into unused stack memory which can then be copied into the
userspace argv areas.
After additional study, it seems that both recursion and restart remains
the desirable way to handle exec with scripts, misc, and modules. As
such, we need to protect the changes to interp.
This changes the logic to require allocation for any changes to the
bprm->interp. To avoid adding a new kmalloc to every exec, the default
value is left as-is. Only when passing through binfmt_script or
binfmt_misc does an allocation take place.
For a proof of concept, see DoTest.sh from:
http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: halfdog <me@halfdog.net>
Cc: P J P <ppandit@redhat.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | 1 | static int load_script(struct linux_binprm *bprm)
{
const char *i_arg, *i_name;
char *cp;
struct file *file;
char interp[BINPRM_BUF_SIZE];
int retval;
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
/*
* This section does the #! interpretation.
* Sorta complicated, but hopefully it will work. -TYT
*/
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
if ((cp = strchr(bprm->buf, '\n')) == NULL)
cp = bprm->buf+BINPRM_BUF_SIZE-1;
*cp = '\0';
while (cp > bprm->buf) {
cp--;
if ((*cp == ' ') || (*cp == '\t'))
*cp = '\0';
else
break;
}
for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++);
if (*cp == '\0')
return -ENOEXEC; /* No interpreter name found */
i_name = cp;
i_arg = NULL;
for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++)
/* nothing */ ;
while ((*cp == ' ') || (*cp == '\t'))
*cp++ = '\0';
if (*cp)
i_arg = cp;
strcpy (interp, i_name);
/*
* OK, we've parsed out the interpreter name and
* (optional) argument.
* Splice in (1) the interpreter's name for argv[0]
* (2) (optional) argument to interpreter
* (3) filename of shell script (replace argv[0])
*
* This is done in reverse order, because of how the
* user environment and arguments are stored.
*/
retval = remove_arg_zero(bprm);
if (retval)
return retval;
retval = copy_strings_kernel(1, &bprm->interp, bprm);
if (retval < 0) return retval;
bprm->argc++;
if (i_arg) {
retval = copy_strings_kernel(1, &i_arg, bprm);
if (retval < 0) return retval;
bprm->argc++;
}
retval = copy_strings_kernel(1, &i_name, bprm);
if (retval) return retval;
bprm->argc++;
bprm->interp = interp;
/*
* OK, now restart the process with the interpreter's dentry.
*/
file = open_exec(interp);
if (IS_ERR(file))
return PTR_ERR(file);
bprm->file = file;
retval = prepare_binprm(bprm);
if (retval < 0)
return retval;
return search_binary_handler(bprm);
}
| 328,383,916,190,717,060,000,000,000,000,000,000,000 | None | null | [
"CWE-200"
] | CVE-2012-4530 | The load_script function in fs/binfmt_script.c in the Linux kernel before 3.7.2 does not properly handle recursion, which allows local users to obtain sensitive information from kernel stack memory via a crafted application. | https://nvd.nist.gov/vuln/detail/CVE-2012-4530 |
1,150 | krb5 | db64ca25d661a47b996b4e2645998b5d7f0eb52c | https://github.com/krb5/krb5 | https://github.com/krb5/krb5/commit/db64ca25d661a47b996b4e2645998b5d7f0eb52c | PKINIT (draft9) null ptr deref [CVE-2012-1016]
Don't check for an agility KDF identifier in the non-draft9 reply
structure when we're building a draft9 reply, because it'll be NULL.
The KDC plugin for PKINIT can dereference a null pointer when handling
a draft9 request, leading to a crash of the KDC process. An attacker
would need to have a valid PKINIT certificate, or an unauthenticated
attacker could execute the attack if anonymous PKINIT is enabled.
CVSSv2 vector: AV:N/AC:M/Au:N/C:N/I:N/A:P/E:P/RL:O/RC:C
[tlyu@mit.edu: reformat comment and edit log message]
(back ported from commit cd5ff932c9d1439c961b0cf9ccff979356686aff)
ticket: 7527 (new)
version_fixed: 1.10.4
status: resolved | 1 | pkinit_server_return_padata(krb5_context context,
krb5_pa_data * padata,
krb5_data *req_pkt,
krb5_kdc_req * request,
krb5_kdc_rep * reply,
krb5_keyblock * encrypting_key,
krb5_pa_data ** send_pa,
krb5_kdcpreauth_callbacks cb,
krb5_kdcpreauth_rock rock,
krb5_kdcpreauth_moddata moddata,
krb5_kdcpreauth_modreq modreq)
{
krb5_error_code retval = 0;
krb5_data scratch = {0, 0, NULL};
krb5_pa_pk_as_req *reqp = NULL;
krb5_pa_pk_as_req_draft9 *reqp9 = NULL;
int i = 0;
unsigned char *subjectPublicKey = NULL;
unsigned char *dh_pubkey = NULL, *server_key = NULL;
unsigned int subjectPublicKey_len = 0;
unsigned int server_key_len = 0, dh_pubkey_len = 0;
krb5_kdc_dh_key_info dhkey_info;
krb5_data *encoded_dhkey_info = NULL;
krb5_pa_pk_as_rep *rep = NULL;
krb5_pa_pk_as_rep_draft9 *rep9 = NULL;
krb5_data *out_data = NULL;
krb5_octet_data secret;
krb5_enctype enctype = -1;
krb5_reply_key_pack *key_pack = NULL;
krb5_reply_key_pack_draft9 *key_pack9 = NULL;
krb5_data *encoded_key_pack = NULL;
pkinit_kdc_context plgctx;
pkinit_kdc_req_context reqctx;
int fixed_keypack = 0;
*send_pa = NULL;
if (padata->pa_type == KRB5_PADATA_PKINIT_KX) {
return return_pkinit_kx(context, request, reply,
encrypting_key, send_pa);
}
if (padata->length <= 0 || padata->contents == NULL)
return 0;
if (modreq == NULL) {
pkiDebug("missing request context \n");
return EINVAL;
}
plgctx = pkinit_find_realm_context(context, moddata, request->server);
if (plgctx == NULL) {
pkiDebug("Unable to locate correct realm context\n");
return ENOENT;
}
pkiDebug("pkinit_return_padata: entered!\n");
reqctx = (pkinit_kdc_req_context)modreq;
if (encrypting_key->contents) {
free(encrypting_key->contents);
encrypting_key->length = 0;
encrypting_key->contents = NULL;
}
for(i = 0; i < request->nktypes; i++) {
enctype = request->ktype[i];
if (!krb5_c_valid_enctype(enctype))
continue;
else {
pkiDebug("KDC picked etype = %d\n", enctype);
break;
}
}
if (i == request->nktypes) {
retval = KRB5KDC_ERR_ETYPE_NOSUPP;
goto cleanup;
}
switch((int)reqctx->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
init_krb5_pa_pk_as_rep(&rep);
if (rep == NULL) {
retval = ENOMEM;
goto cleanup;
}
/* let's assume it's RSA. we'll reset it to DH if needed */
rep->choice = choice_pa_pk_as_rep_encKeyPack;
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
init_krb5_pa_pk_as_rep_draft9(&rep9);
if (rep9 == NULL) {
retval = ENOMEM;
goto cleanup;
}
rep9->choice = choice_pa_pk_as_rep_draft9_encKeyPack;
break;
default:
retval = KRB5KDC_ERR_PREAUTH_FAILED;
goto cleanup;
}
if (reqctx->rcv_auth_pack != NULL &&
reqctx->rcv_auth_pack->clientPublicValue != NULL) {
subjectPublicKey =
reqctx->rcv_auth_pack->clientPublicValue->subjectPublicKey.data;
subjectPublicKey_len =
reqctx->rcv_auth_pack->clientPublicValue->subjectPublicKey.length;
rep->choice = choice_pa_pk_as_rep_dhInfo;
} else if (reqctx->rcv_auth_pack9 != NULL &&
reqctx->rcv_auth_pack9->clientPublicValue != NULL) {
subjectPublicKey =
reqctx->rcv_auth_pack9->clientPublicValue->subjectPublicKey.data;
subjectPublicKey_len =
reqctx->rcv_auth_pack9->clientPublicValue->subjectPublicKey.length;
rep9->choice = choice_pa_pk_as_rep_draft9_dhSignedData;
}
/* if this DH, then process finish computing DH key */
if (rep != NULL && (rep->choice == choice_pa_pk_as_rep_dhInfo ||
rep->choice == choice_pa_pk_as_rep_draft9_dhSignedData)) {
pkiDebug("received DH key delivery AS REQ\n");
retval = server_process_dh(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx, subjectPublicKey,
subjectPublicKey_len, &dh_pubkey, &dh_pubkey_len,
&server_key, &server_key_len);
if (retval) {
pkiDebug("failed to process/create dh paramters\n");
goto cleanup;
}
}
if ((rep9 != NULL &&
rep9->choice == choice_pa_pk_as_rep_draft9_dhSignedData) ||
(rep != NULL && rep->choice == choice_pa_pk_as_rep_dhInfo)) {
/*
* This is DH, so don't generate the key until after we
* encode the reply, because the encoded reply is needed
* to generate the key in some cases.
*/
dhkey_info.subjectPublicKey.length = dh_pubkey_len;
dhkey_info.subjectPublicKey.data = dh_pubkey;
dhkey_info.nonce = request->nonce;
dhkey_info.dhKeyExpiration = 0;
retval = k5int_encode_krb5_kdc_dh_key_info(&dhkey_info,
&encoded_dhkey_info);
if (retval) {
pkiDebug("encode_krb5_kdc_dh_key_info failed\n");
goto cleanup;
}
#ifdef DEBUG_ASN1
print_buffer_bin((unsigned char *)encoded_dhkey_info->data,
encoded_dhkey_info->length,
"/tmp/kdc_dh_key_info");
#endif
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
retval = cms_signeddata_create(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx, CMS_SIGN_SERVER, 1,
(unsigned char *)encoded_dhkey_info->data,
encoded_dhkey_info->length,
&rep->u.dh_Info.dhSignedData.data,
&rep->u.dh_Info.dhSignedData.length);
if (retval) {
pkiDebug("failed to create pkcs7 signed data\n");
goto cleanup;
}
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
retval = cms_signeddata_create(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx, CMS_SIGN_DRAFT9, 1,
(unsigned char *)encoded_dhkey_info->data,
encoded_dhkey_info->length,
&rep9->u.dhSignedData.data,
&rep9->u.dhSignedData.length);
if (retval) {
pkiDebug("failed to create pkcs7 signed data\n");
goto cleanup;
}
break;
}
} else {
pkiDebug("received RSA key delivery AS REQ\n");
retval = krb5_c_make_random_key(context, enctype, encrypting_key);
if (retval) {
pkiDebug("unable to make a session key\n");
goto cleanup;
}
/* check if PA_TYPE of 132 is present which means the client is
* requesting that a checksum is send back instead of the nonce
*/
for (i = 0; request->padata[i] != NULL; i++) {
pkiDebug("%s: Checking pa_type 0x%08x\n",
__FUNCTION__, request->padata[i]->pa_type);
if (request->padata[i]->pa_type == 132)
fixed_keypack = 1;
}
pkiDebug("%s: return checksum instead of nonce = %d\n",
__FUNCTION__, fixed_keypack);
/* if this is an RFC reply or draft9 client requested a checksum
* in the reply instead of the nonce, create an RFC-style keypack
*/
if ((int)padata->pa_type == KRB5_PADATA_PK_AS_REQ || fixed_keypack) {
init_krb5_reply_key_pack(&key_pack);
if (key_pack == NULL) {
retval = ENOMEM;
goto cleanup;
}
retval = krb5_c_make_checksum(context, 0,
encrypting_key, KRB5_KEYUSAGE_TGS_REQ_AUTH_CKSUM,
req_pkt, &key_pack->asChecksum);
if (retval) {
pkiDebug("unable to calculate AS REQ checksum\n");
goto cleanup;
}
#ifdef DEBUG_CKSUM
pkiDebug("calculating checksum on buf size = %d\n", req_pkt->length);
print_buffer(req_pkt->data, req_pkt->length);
pkiDebug("checksum size = %d\n", key_pack->asChecksum.length);
print_buffer(key_pack->asChecksum.contents,
key_pack->asChecksum.length);
pkiDebug("encrypting key (%d)\n", encrypting_key->length);
print_buffer(encrypting_key->contents, encrypting_key->length);
#endif
krb5_copy_keyblock_contents(context, encrypting_key,
&key_pack->replyKey);
retval = k5int_encode_krb5_reply_key_pack(key_pack,
&encoded_key_pack);
if (retval) {
pkiDebug("failed to encode reply_key_pack\n");
goto cleanup;
}
}
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
rep->choice = choice_pa_pk_as_rep_encKeyPack;
retval = cms_envelopeddata_create(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx, padata->pa_type, 1,
(unsigned char *)encoded_key_pack->data,
encoded_key_pack->length,
&rep->u.encKeyPack.data, &rep->u.encKeyPack.length);
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
/* if the request is from the broken draft9 client that
* expects back a nonce, create it now
*/
if (!fixed_keypack) {
init_krb5_reply_key_pack_draft9(&key_pack9);
if (key_pack9 == NULL) {
retval = ENOMEM;
goto cleanup;
}
key_pack9->nonce = reqctx->rcv_auth_pack9->pkAuthenticator.nonce;
krb5_copy_keyblock_contents(context, encrypting_key,
&key_pack9->replyKey);
retval = k5int_encode_krb5_reply_key_pack_draft9(key_pack9,
&encoded_key_pack);
if (retval) {
pkiDebug("failed to encode reply_key_pack\n");
goto cleanup;
}
}
rep9->choice = choice_pa_pk_as_rep_draft9_encKeyPack;
retval = cms_envelopeddata_create(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx, padata->pa_type, 1,
(unsigned char *)encoded_key_pack->data,
encoded_key_pack->length,
&rep9->u.encKeyPack.data, &rep9->u.encKeyPack.length);
break;
}
if (retval) {
pkiDebug("failed to create pkcs7 enveloped data: %s\n",
error_message(retval));
goto cleanup;
}
#ifdef DEBUG_ASN1
print_buffer_bin((unsigned char *)encoded_key_pack->data,
encoded_key_pack->length,
"/tmp/kdc_key_pack");
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
print_buffer_bin(rep->u.encKeyPack.data,
rep->u.encKeyPack.length,
"/tmp/kdc_enc_key_pack");
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
print_buffer_bin(rep9->u.encKeyPack.data,
rep9->u.encKeyPack.length,
"/tmp/kdc_enc_key_pack");
break;
}
#endif
}
if ((rep != NULL && rep->choice == choice_pa_pk_as_rep_dhInfo) &&
((reqctx->rcv_auth_pack != NULL &&
reqctx->rcv_auth_pack->supportedKDFs != NULL))) {
/* If using the alg-agility KDF, put the algorithm in the reply
* before encoding it.
*/
if (reqctx->rcv_auth_pack != NULL &&
reqctx->rcv_auth_pack->supportedKDFs != NULL) {
retval = pkinit_pick_kdf_alg(context, reqctx->rcv_auth_pack->supportedKDFs,
&(rep->u.dh_Info.kdfID));
if (retval) {
pkiDebug("pkinit_pick_kdf_alg failed: %s\n",
error_message(retval));
goto cleanup;
}
}
}
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
retval = k5int_encode_krb5_pa_pk_as_rep(rep, &out_data);
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
retval = k5int_encode_krb5_pa_pk_as_rep_draft9(rep9, &out_data);
break;
}
if (retval) {
pkiDebug("failed to encode AS_REP\n");
goto cleanup;
}
#ifdef DEBUG_ASN1
if (out_data != NULL)
print_buffer_bin((unsigned char *)out_data->data, out_data->length,
"/tmp/kdc_as_rep");
#endif
/* If this is DH, we haven't computed the key yet, so do it now. */
if ((rep9 != NULL &&
rep9->choice == choice_pa_pk_as_rep_draft9_dhSignedData) ||
(rep != NULL && rep->choice == choice_pa_pk_as_rep_dhInfo)) {
/* If mutually supported KDFs were found, use the alg agility KDF */
if (rep->u.dh_Info.kdfID) {
secret.data = server_key;
secret.length = server_key_len;
retval = pkinit_alg_agility_kdf(context, &secret,
rep->u.dh_Info.kdfID,
request->client, request->server,
enctype,
(krb5_octet_data *)req_pkt,
(krb5_octet_data *)out_data,
encrypting_key);
if (retval) {
pkiDebug("pkinit_alg_agility_kdf failed: %s\n",
error_message(retval));
goto cleanup;
}
/* Otherwise, use the older octetstring2key() function */
} else {
retval = pkinit_octetstring2key(context, enctype, server_key,
server_key_len, encrypting_key);
if (retval) {
pkiDebug("pkinit_octetstring2key failed: %s\n",
error_message(retval));
goto cleanup;
}
}
}
*send_pa = malloc(sizeof(krb5_pa_data));
if (*send_pa == NULL) {
retval = ENOMEM;
free(out_data->data);
free(out_data);
out_data = NULL;
goto cleanup;
}
(*send_pa)->magic = KV5M_PA_DATA;
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
(*send_pa)->pa_type = KRB5_PADATA_PK_AS_REP;
break;
case KRB5_PADATA_PK_AS_REQ_OLD:
case KRB5_PADATA_PK_AS_REP_OLD:
(*send_pa)->pa_type = KRB5_PADATA_PK_AS_REP_OLD;
break;
}
(*send_pa)->length = out_data->length;
(*send_pa)->contents = (krb5_octet *) out_data->data;
cleanup:
pkinit_fini_kdc_req_context(context, reqctx);
free(scratch.data);
free(out_data);
if (encoded_dhkey_info != NULL)
krb5_free_data(context, encoded_dhkey_info);
if (encoded_key_pack != NULL)
krb5_free_data(context, encoded_key_pack);
free(dh_pubkey);
free(server_key);
switch ((int)padata->pa_type) {
case KRB5_PADATA_PK_AS_REQ:
free_krb5_pa_pk_as_req(&reqp);
free_krb5_pa_pk_as_rep(&rep);
free_krb5_reply_key_pack(&key_pack);
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
free_krb5_pa_pk_as_req_draft9(&reqp9);
free_krb5_pa_pk_as_rep_draft9(&rep9);
if (!fixed_keypack)
free_krb5_reply_key_pack_draft9(&key_pack9);
else
free_krb5_reply_key_pack(&key_pack);
break;
}
if (retval)
pkiDebug("pkinit_verify_padata failure");
return retval;
}
| 76,503,992,088,312,030,000,000,000,000,000,000,000 | None | null | [
"CWE-476"
] | CVE-2012-1016 | The pkinit_server_return_padata function in plugins/preauth/pkinit/pkinit_srv.c in the PKINIT implementation in the Key Distribution Center (KDC) in MIT Kerberos 5 (aka krb5) before 1.10.4 attempts to find an agility KDF identifier in inappropriate circumstances, which allows remote attackers to cause a denial of service (NULL pointer dereference and daemon crash) via a crafted Draft 9 request. | https://nvd.nist.gov/vuln/detail/CVE-2012-1016 |
1,151 | linux | b5a1eeef04cc7859f34dec9b72ea1b28e4aba07c | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/b5a1eeef04cc7859f34dec9b72ea1b28e4aba07c | batman-adv: Only write requested number of byte to user buffer
Don't write more than the requested number of bytes of an batman-adv icmp
packet to the userspace buffer. Otherwise unrelated userspace memory might get
overridden by the kernel.
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Marek Lindner <lindner_marek@yahoo.de> | 1 | static ssize_t bat_socket_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct socket_client *socket_client = file->private_data;
struct socket_packet *socket_packet;
size_t packet_len;
int error;
if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
return -EAGAIN;
if ((!buf) || (count < sizeof(struct icmp_packet)))
return -EINVAL;
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
error = wait_event_interruptible(socket_client->queue_wait,
socket_client->queue_len);
if (error)
return error;
spin_lock_bh(&socket_client->lock);
socket_packet = list_first_entry(&socket_client->queue_list,
struct socket_packet, list);
list_del(&socket_packet->list);
socket_client->queue_len--;
spin_unlock_bh(&socket_client->lock);
error = copy_to_user(buf, &socket_packet->icmp_packet,
socket_packet->icmp_len);
packet_len = socket_packet->icmp_len;
kfree(socket_packet);
if (error)
return -EFAULT;
return packet_len;
}
| 85,869,577,016,758,350,000,000,000,000,000,000,000 | icmp_socket.c | 220,335,110,825,043,840,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2011-4604 | The bat_socket_read function in net/batman-adv/icmp_socket.c in the Linux kernel before 3.3 allows remote attackers to cause a denial of service (memory corruption) or possibly have unspecified other impact via a crafted batman-adv ICMP packet. | https://nvd.nist.gov/vuln/detail/CVE-2011-4604 |
1,152 | linux | ae53b5bd77719fed58086c5be60ce4f22bffe1c6 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/ae53b5bd77719fed58086c5be60ce4f22bffe1c6 | sctp: Fix another socket race during accept/peeloff
There is a race between sctp_rcv() and sctp_accept() where we
have moved the association from the listening socket to the
accepted socket, but sctp_rcv() processing cached the old
socket and continues to use it.
The easy solution is to check for the socket mismatch once we've
grabed the socket lock. If we hit a mis-match, that means
that were are currently holding the lock on the listening socket,
but the association is refrencing a newly accepted socket. We need
to drop the lock on the old socket and grab the lock on the new one.
A more proper solution might be to create accepted sockets when
the new association is established, similar to TCP. That would
eliminate the race for 1-to-1 style sockets, but it would still
existing for 1-to-many sockets where a user wished to peeloff an
association. For now, we'll live with this easy solution as
it addresses the problem.
Reported-by: Michal Hocko <mhocko@suse.cz>
Reported-by: Karsten Keil <kkeil@suse.de>
Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | int sctp_rcv(struct sk_buff *skb)
{
struct sock *sk;
struct sctp_association *asoc;
struct sctp_endpoint *ep = NULL;
struct sctp_ep_common *rcvr;
struct sctp_transport *transport = NULL;
struct sctp_chunk *chunk;
struct sctphdr *sh;
union sctp_addr src;
union sctp_addr dest;
int family;
struct sctp_af *af;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
if (skb_linearize(skb))
goto discard_it;
sh = sctp_hdr(skb);
/* Pull up the IP and SCTP headers. */
__skb_pull(skb, skb_transport_offset(skb));
if (skb->len < sizeof(struct sctphdr))
goto discard_it;
if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0)
goto discard_it;
skb_pull(skb, sizeof(struct sctphdr));
/* Make sure we at least have chunk headers worth of data left. */
if (skb->len < sizeof(struct sctp_chunkhdr))
goto discard_it;
family = ipver2af(ip_hdr(skb)->version);
af = sctp_get_af_specific(family);
if (unlikely(!af))
goto discard_it;
/* Initialize local addresses for lookups. */
af->from_skb(&src, skb, 1);
af->from_skb(&dest, skb, 0);
/* If the packet is to or from a non-unicast address,
* silently discard the packet.
*
* This is not clearly defined in the RFC except in section
* 8.4 - OOTB handling. However, based on the book "Stream Control
* Transmission Protocol" 2.1, "It is important to note that the
* IP address of an SCTP transport address must be a routable
* unicast address. In other words, IP multicast addresses and
* IP broadcast addresses cannot be used in an SCTP transport
* address."
*/
if (!af->addr_valid(&src, NULL, skb) ||
!af->addr_valid(&dest, NULL, skb))
goto discard_it;
asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
if (!asoc)
ep = __sctp_rcv_lookup_endpoint(&dest);
/* Retrieve the common input handling substructure. */
rcvr = asoc ? &asoc->base : &ep->base;
sk = rcvr->sk;
/*
* If a frame arrives on an interface and the receiving socket is
* bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
*/
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
{
if (asoc) {
sctp_association_put(asoc);
asoc = NULL;
} else {
sctp_endpoint_put(ep);
ep = NULL;
}
sk = sctp_get_ctl_sock();
ep = sctp_sk(sk)->ep;
sctp_endpoint_hold(ep);
rcvr = &ep->base;
}
/*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets.
* An SCTP packet is called an "out of the blue" (OOTB)
* packet if it is correctly formed, i.e., passed the
* receiver's checksum check, but the receiver is not
* able to identify the association to which this
* packet belongs.
*/
if (!asoc) {
if (sctp_rcv_ootb(skb)) {
SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
goto discard_release;
}
}
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
goto discard_release;
nf_reset(skb);
if (sk_filter(sk, skb))
goto discard_release;
/* Create an SCTP packet structure. */
chunk = sctp_chunkify(skb, asoc, sk);
if (!chunk)
goto discard_release;
SCTP_INPUT_CB(skb)->chunk = chunk;
/* Remember what endpoint is to handle this packet. */
chunk->rcvr = rcvr;
/* Remember the SCTP header. */
chunk->sctp_hdr = sh;
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk, &src, &dest);
/* Remember where we came from. */
chunk->transport = transport;
/* Acquire access to the sock lock. Note: We are safe from other
* bottom halves on this lock, but a user may be in the lock too,
* so check if it is busy.
*/
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
sctp_add_backlog(sk, skb);
} else {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
}
sctp_bh_unlock_sock(sk);
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
return 0;
discard_it:
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS);
kfree_skb(skb);
return 0;
discard_release:
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
goto discard_it;
}
| 173,020,335,186,799,700,000,000,000,000,000,000,000 | input.c | 44,064,972,847,145,270,000,000,000,000,000,000,000 | [
"CWE-362"
] | CVE-2011-4348 | Race condition in the sctp_rcv function in net/sctp/input.c in the Linux kernel before 2.6.29 allows remote attackers to cause a denial of service (system hang) via SCTP packets. NOTE: in some environments, this issue exists because of an incomplete fix for CVE-2011-2482. | https://nvd.nist.gov/vuln/detail/CVE-2011-4348 |
1,153 | linux | c4e7f9022e506c6635a5037713c37118e23193e4 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/c4e7f9022e506c6635a5037713c37118e23193e4 | None | 1 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
struct kvm_assigned_pci_dev *assigned_dev)
{
int r = 0, idx;
struct kvm_assigned_dev_kernel *match;
struct pci_dev *dev;
if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
return -EINVAL;
mutex_lock(&kvm->lock);
idx = srcu_read_lock(&kvm->srcu);
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
assigned_dev->assigned_dev_id);
if (match) {
/* device already assigned */
r = -EEXIST;
goto out;
}
match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
if (match == NULL) {
printk(KERN_INFO "%s: Couldn't allocate memory\n",
__func__);
r = -ENOMEM;
goto out;
}
dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
assigned_dev->busnr,
assigned_dev->devfn);
if (!dev) {
printk(KERN_INFO "%s: host device not found\n", __func__);
r = -EINVAL;
goto out_free;
}
if (pci_enable_device(dev)) {
printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
r = -EBUSY;
goto out_put;
}
r = pci_request_regions(dev, "kvm_assigned_device");
if (r) {
printk(KERN_INFO "%s: Could not get access to device regions\n",
__func__);
goto out_disable;
}
pci_reset_function(dev);
pci_save_state(dev);
match->pci_saved_state = pci_store_saved_state(dev);
if (!match->pci_saved_state)
printk(KERN_DEBUG "%s: Couldn't store %s saved state\n",
__func__, dev_name(&dev->dev));
match->assigned_dev_id = assigned_dev->assigned_dev_id;
match->host_segnr = assigned_dev->segnr;
match->host_busnr = assigned_dev->busnr;
match->host_devfn = assigned_dev->devfn;
match->flags = assigned_dev->flags;
match->dev = dev;
spin_lock_init(&match->intx_lock);
match->irq_source_id = -1;
match->kvm = kvm;
match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
list_add(&match->list, &kvm->arch.assigned_dev_head);
if (!kvm->arch.iommu_domain) {
r = kvm_iommu_map_guest(kvm);
if (r)
goto out_list_del;
}
r = kvm_assign_device(kvm, match);
if (r)
goto out_list_del;
out:
srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
return r;
out_list_del:
if (pci_load_and_free_saved_state(dev, &match->pci_saved_state))
printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
__func__, dev_name(&dev->dev));
list_del(&match->list);
pci_release_regions(dev);
out_disable:
pci_disable_device(dev);
out_put:
pci_dev_put(dev);
out_free:
kfree(match);
srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
return r;
}
| 272,600,557,570,379,850,000,000,000,000,000,000,000 | None | null | [
"CWE-264"
] | CVE-2011-4347 | The kvm_vm_ioctl_assign_device function in virt/kvm/assigned-dev.c in the KVM subsystem in the Linux kernel before 3.1.10 does not verify permission to access PCI configuration space and BAR resources, which allows host OS users to assign PCI devices and cause a denial of service (host OS crash) via a KVM_ASSIGN_PCI_DEVICE operation. | https://nvd.nist.gov/vuln/detail/CVE-2011-4347 |
1,160 | linux | f8e9881c2aef1e982e5abc25c046820cd0b7cf64 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/f8e9881c2aef1e982e5abc25c046820cd0b7cf64 | bridge: reset IPCB in br_parse_ip_options
Commit 462fb2af9788a82 (bridge : Sanitize skb before it enters the IP
stack), missed one IPCB init before calling ip_options_compile()
Thanks to Scot Doyle for his tests and bug reports.
Reported-by: Scot Doyle <lkml@scotdoyle.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
Acked-by: Bandan Das <bandan.das@stratus.com>
Acked-by: Stephen Hemminger <shemminger@vyatta.com>
Cc: Jan Lübbe <jluebbe@debian.org>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int br_parse_ip_options(struct sk_buff *skb)
{
struct ip_options *opt;
struct iphdr *iph;
struct net_device *dev = skb->dev;
u32 len;
iph = ip_hdr(skb);
opt = &(IPCB(skb)->opt);
/* Basic sanity checks */
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
len = ntohs(iph->tot_len);
if (skb->len < len) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
} else if (len < (iph->ihl*4))
goto inhdr_error;
if (pskb_trim_rcsum(skb, len)) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto drop;
}
/* Zero out the CB buffer if no options present */
if (iph->ihl == 5) {
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
return 0;
}
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))
goto inhdr_error;
/* Check correct handling of SRR option */
if (unlikely(opt->srr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
goto drop;
if (ip_options_rcv_srr(skb))
goto drop;
}
return 0;
inhdr_error:
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
drop:
return -1;
}
| 126,955,592,485,195,450,000,000,000,000,000,000,000 | br_netfilter.c | 318,634,080,555,349,820,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2011-4087 | The br_parse_ip_options function in net/bridge/br_netfilter.c in the Linux kernel before 2.6.39 does not properly initialize a certain data structure, which allows remote attackers to cause a denial of service by leveraging connectivity to a network interface that uses an Ethernet bridge device. | https://nvd.nist.gov/vuln/detail/CVE-2011-4087 |
1,163 | linux | a5b2c5b2ad5853591a6cac6134cd0f599a720865 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/a5b2c5b2ad5853591a6cac6134cd0f599a720865 | AppArmor: fix oops in apparmor_setprocattr
When invalid parameters are passed to apparmor_setprocattr a NULL deref
oops occurs when it tries to record an audit message. This is because
it is passing NULL for the profile parameter for aa_audit. But aa_audit
now requires that the profile passed is not NULL.
Fix this by passing the current profile on the task that is trying to
setprocattr.
Signed-off-by: Kees Cook <kees@ubuntu.com>
Signed-off-by: John Johansen <john.johansen@canonical.com>
Cc: stable@kernel.org
Signed-off-by: James Morris <jmorris@namei.org> | 1 | static int apparmor_setprocattr(struct task_struct *task, char *name,
void *value, size_t size)
{
char *command, *args = value;
size_t arg_size;
int error;
if (size == 0)
return -EINVAL;
/* args points to a PAGE_SIZE buffer, AppArmor requires that
* the buffer must be null terminated or have size <= PAGE_SIZE -1
* so that AppArmor can null terminate them
*/
if (args[size - 1] != '\0') {
if (size == PAGE_SIZE)
return -EINVAL;
args[size] = '\0';
}
/* task can only write its own attributes */
if (current != task)
return -EACCES;
args = value;
args = strim(args);
command = strsep(&args, " ");
if (!args)
return -EINVAL;
args = skip_spaces(args);
if (!*args)
return -EINVAL;
arg_size = size - (args - (char *) value);
if (strcmp(name, "current") == 0) {
if (strcmp(command, "changehat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
!AA_DO_TEST);
} else if (strcmp(command, "permhat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
AA_DO_TEST);
} else if (strcmp(command, "changeprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
!AA_DO_TEST);
} else if (strcmp(command, "permprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
AA_DO_TEST);
} else if (strcmp(command, "permipc") == 0) {
error = aa_setprocattr_permipc(args);
} else {
struct common_audit_data sa;
COMMON_AUDIT_DATA_INIT(&sa, NONE);
sa.aad.op = OP_SETPROCATTR;
sa.aad.info = name;
sa.aad.error = -EINVAL;
return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL,
&sa, NULL);
}
} else if (strcmp(name, "exec") == 0) {
error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
!AA_DO_TEST);
} else {
/* only support the "current" and "exec" process attributes */
return -EINVAL;
}
if (!error)
error = size;
return error;
}
| 198,478,101,125,261,500,000,000,000,000,000,000,000 | lsm.c | 86,780,604,940,296,660,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2011-3619 | The apparmor_setprocattr function in security/apparmor/lsm.c in the Linux kernel before 3.0 does not properly handle invalid parameters, which allows local users to cause a denial of service (NULL pointer dereference and OOPS) or possibly have unspecified other impact by writing to a /proc/#####/attr/current file. | https://nvd.nist.gov/vuln/detail/CVE-2011-3619 |
1,164 | linux | aba8d056078e47350d85b06a9cabd5afcc4b72ea | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/aba8d056078e47350d85b06a9cabd5afcc4b72ea | perf tools: do not look at ./config for configuration
In addition to /etc/perfconfig and $HOME/.perfconfig, perf looks for
configuration in the file ./config, imitating git which looks at
$GIT_DIR/config. If ./config is not a perf configuration file, it
fails, or worse, treats it as a configuration file and changes behavior
in some unexpected way.
"config" is not an unusual name for a file to be lying around and perf
does not have a private directory dedicated for its own use, so let's
just stop looking for configuration in the cwd. Callers needing
context-sensitive configuration can use the PERF_CONFIG environment
variable.
Requested-by: Christian Ohm <chr.ohm@gmx.net>
Cc: 632923@bugs.debian.org
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Christian Ohm <chr.ohm@gmx.net>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110805165838.GA7237@elie.gateway.2wire.net
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> | 1 | int perf_config(config_fn_t fn, void *data)
{
int ret = 0, found = 0;
char *repo_config = NULL;
const char *home = NULL;
/* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
if (config_exclusive_filename)
return perf_config_from_file(fn, config_exclusive_filename, data);
if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) {
ret += perf_config_from_file(fn, perf_etc_perfconfig(),
data);
found += 1;
}
home = getenv("HOME");
if (perf_config_global() && home) {
char *user_config = strdup(mkpath("%s/.perfconfig", home));
if (!access(user_config, R_OK)) {
ret += perf_config_from_file(fn, user_config, data);
found += 1;
}
free(user_config);
}
repo_config = perf_pathdup("config");
if (!access(repo_config, R_OK)) {
ret += perf_config_from_file(fn, repo_config, data);
found += 1;
}
free(repo_config);
if (found == 0)
return -1;
return ret;
}
| 291,567,066,678,415,750,000,000,000,000,000,000,000 | config.c | 300,265,789,152,003,100,000,000,000,000,000,000,000 | [
"CWE-94"
] | CVE-2011-2905 | Untrusted search path vulnerability in the perf_config function in tools/perf/util/config.c in perf, as distributed in the Linux kernel before 3.1, allows local users to overwrite arbitrary files via a crafted config file in the current working directory. | https://nvd.nist.gov/vuln/detail/CVE-2011-2905 |
1,167 | linux | ea2bc483ff5caada7c4aa0d5fbf87d3a6590273d | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/ea2bc483ff5caada7c4aa0d5fbf87d3a6590273d | [SCTP]: Fix assertion (!atomic_read(&sk->sk_rmem_alloc)) failed message
In current implementation, LKSCTP does receive buffer accounting for
data in sctp_receive_queue and pd_lobby. However, LKSCTP don't do
accounting for data in frag_list when data is fragmented. In addition,
LKSCTP doesn't do accounting for data in reasm and lobby queue in
structure sctp_ulpq.
When there are date in these queue, assertion failed message is printed
in inet_sock_destruct because sk_rmem_alloc of oldsk does not become 0
when socket is destroyed.
Signed-off-by: Tsutomu Fujii <t-fujii@nb.jp.nec.com>
Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
struct sctp_association *assoc,
sctp_socket_type_t type)
{
struct sctp_sock *oldsp = sctp_sk(oldsk);
struct sctp_sock *newsp = sctp_sk(newsk);
struct sctp_bind_bucket *pp; /* hash list port iterator */
struct sctp_endpoint *newep = newsp->ep;
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
int flags = 0;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
*/
newsk->sk_sndbuf = oldsk->sk_sndbuf;
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
/* Brute force copy old sctp opt. */
inet_sk_copy_descendant(newsk, oldsk);
/* Restore the ep value that was overwritten with the above structure
* copy.
*/
newsp->ep = newep;
newsp->hmac = NULL;
/* Hook this new socket in to the bind_hash list. */
pp = sctp_sk(oldsk)->bind_hash;
sk_add_bind_node(newsk, &pp->owner);
sctp_sk(newsk)->bind_hash = pp;
inet_sk(newsk)->num = inet_sk(oldsk)->num;
/* Copy the bind_addr list from the original endpoint to the new
* endpoint so that we can handle restarts properly
*/
if (PF_INET6 == assoc->base.sk->sk_family)
flags = SCTP_ADDR6_ALLOWED;
if (assoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (assoc->peer.ipv6_address)
flags |= SCTP_ADDR6_PEERSUPP;
sctp_bind_addr_copy(&newsp->ep->base.bind_addr,
&oldsp->ep->base.bind_addr,
SCTP_SCOPE_GLOBAL, GFP_KERNEL, flags);
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
*/
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
sctp_sock_rfree(skb);
__skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb);
sctp_skb_set_owner_r(skb, newsk);
}
}
/* Clean up any messages pending delivery due to partial
* delivery. Three cases:
* 1) No partial deliver; no work.
* 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
* 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
*/
skb_queue_head_init(&newsp->pd_lobby);
sctp_sk(newsk)->pd_mode = assoc->ulpq.pd_mode;
if (sctp_sk(oldsk)->pd_mode) {
struct sk_buff_head *queue;
/* Decide which queue to move pd_lobby skbs to. */
if (assoc->ulpq.pd_mode) {
queue = &newsp->pd_lobby;
} else
queue = &newsk->sk_receive_queue;
/* Walk through the pd_lobby, looking for skbs that
* need moved to the new socket.
*/
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
sctp_sock_rfree(skb);
__skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb);
sctp_skb_set_owner_r(skb, newsk);
}
}
/* Clear up any skbs waiting for the partial
* delivery to finish.
*/
if (assoc->ulpq.pd_mode)
sctp_clear_pd(oldsk);
}
/* Set the type of socket to indicate that it is peeled off from the
* original UDP-style socket or created with the accept() call on a
* TCP-style socket..
*/
newsp->type = type;
/* Mark the new socket "in-use" by the user so that any packets
* that may arrive on the association after we've moved it are
* queued to the backlog. This prevents a potential race between
* backlog processing on the old socket and new-packet processing
* on the new socket.
*/
sctp_lock_sock(newsk);
sctp_assoc_migrate(assoc, newsk);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
*/
if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP))
newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED;
sctp_release_sock(newsk);
}
| 245,295,386,150,735,570,000,000,000,000,000,000,000 | socket.c | 237,855,478,219,647,640,000,000,000,000,000,000,000 | [
"CWE-476"
] | CVE-2011-2482 | A certain Red Hat patch to the sctp_sock_migrate function in net/sctp/socket.c in the Linux kernel before 2.6.21, as used in Red Hat Enterprise Linux (RHEL) 5, allows remote attackers to cause a denial of service (NULL pointer dereference and OOPS) via a crafted SCTP packet. | https://nvd.nist.gov/vuln/detail/CVE-2011-2482 |
1,172 | linux | 4ff67b720c02c36e54d55b88c2931879b7db1cd2 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/4ff67b720c02c36e54d55b88c2931879b7db1cd2 | cifs: clean up cifs_find_smb_ses (try #2)
This patch replaces the earlier patch by the same name. The only
difference is that MAX_PASSWORD_SIZE has been increased to attempt to
match the limits that windows enforces.
Do a better job of matching sessions by authtype. Matching by username
for a Kerberos session is incorrect, and anonymous sessions need special
handling.
Also, in the case where we do match by username, we also need to match
by password. That ensures that someone else doesn't "borrow" an existing
session without needing to know the password.
Finally, passwords can be longer than 16 bytes. Bump MAX_PASSWORD_SIZE
to 512 to match the size that the userspace mount helper allows.
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Steve French <sfrench@us.ibm.com> | 1 | cifs_find_smb_ses(struct TCP_Server_Info *server, char *username)
{
struct list_head *tmp;
struct cifsSesInfo *ses;
write_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
if (strncmp(ses->userName, username, MAX_USERNAME_SIZE))
continue;
++ses->ses_count;
write_unlock(&cifs_tcp_ses_lock);
return ses;
}
write_unlock(&cifs_tcp_ses_lock);
return NULL;
}
| 174,415,870,851,944,500,000,000,000,000,000,000,000 | connect.c | 326,028,216,122,812,540,000,000,000,000,000,000,000 | [
"CWE-264"
] | CVE-2011-1585 | The cifs_find_smb_ses function in fs/cifs/connect.c in the Linux kernel before 2.6.36 does not properly determine the associations between users and sessions, which allows local users to bypass CIFS share authentication by leveraging a mount of a share by a different user. | https://nvd.nist.gov/vuln/detail/CVE-2011-1585 |
1,176 | linux | d370af0ef7951188daeb15bae75db7ba57c67846 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/d370af0ef7951188daeb15bae75db7ba57c67846 | irda: validate peer name and attribute lengths
Length fields provided by a peer for names and attributes may be longer
than the destination array sizes. Validate lengths to prevent stack
buffer overflows.
Signed-off-by: Dan Rosenberg <drosenberg@vsecurity.com>
Cc: stable@kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static void iriap_getvaluebyclass_indication(struct iriap_cb *self,
struct sk_buff *skb)
{
struct ias_object *obj;
struct ias_attrib *attrib;
int name_len;
int attr_len;
char name[IAS_MAX_CLASSNAME + 1]; /* 60 bytes */
char attr[IAS_MAX_ATTRIBNAME + 1]; /* 60 bytes */
__u8 *fp;
int n;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
fp = skb->data;
n = 1;
name_len = fp[n++];
memcpy(name, fp+n, name_len); n+=name_len;
name[name_len] = '\0';
attr_len = fp[n++];
memcpy(attr, fp+n, attr_len); n+=attr_len;
attr[attr_len] = '\0';
IRDA_DEBUG(4, "LM-IAS: Looking up %s: %s\n", name, attr);
obj = irias_find_object(name);
if (obj == NULL) {
IRDA_DEBUG(2, "LM-IAS: Object %s not found\n", name);
iriap_getvaluebyclass_response(self, 0x1235, IAS_CLASS_UNKNOWN,
&irias_missing);
return;
}
IRDA_DEBUG(4, "LM-IAS: found %s, id=%d\n", obj->name, obj->id);
attrib = irias_find_attrib(obj, attr);
if (attrib == NULL) {
IRDA_DEBUG(2, "LM-IAS: Attribute %s not found\n", attr);
iriap_getvaluebyclass_response(self, obj->id,
IAS_ATTRIB_UNKNOWN,
&irias_missing);
return;
}
/* We have a match; send the value. */
iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS,
attrib->value);
}
| 289,039,370,717,523,300,000,000,000,000,000,000,000 | iriap.c | 294,293,270,431,801,740,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2011-1180 | Multiple stack-based buffer overflows in the iriap_getvaluebyclass_indication function in net/irda/iriap.c in the Linux kernel before 2.6.39 allow remote attackers to cause a denial of service (memory corruption) or possibly have unspecified other impact by leveraging connectivity to an IrDA infrared network and sending a large integer value for a (1) name length or (2) attribute length. | https://nvd.nist.gov/vuln/detail/CVE-2011-1180 |
1,177 | linux | 8909c9ad8ff03611c9c96c9a92656213e4bb495b | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/8909c9ad8ff03611c9c96c9a92656213e4bb495b | net: don't allow CAP_NET_ADMIN to load non-netdev kernel modules
Since a8f80e8ff94ecba629542d9b4b5f5a8ee3eb565c any process with
CAP_NET_ADMIN may load any module from /lib/modules/. This doesn't mean
that CAP_NET_ADMIN is a superset of CAP_SYS_MODULE as modules are
limited to /lib/modules/**. However, CAP_NET_ADMIN capability shouldn't
allow anybody load any module not related to networking.
This patch restricts an ability of autoloading modules to netdev modules
with explicit aliases. This fixes CVE-2011-1019.
Arnd Bergmann suggested to leave untouched the old pre-v2.6.32 behavior
of loading netdev modules by name (without any prefix) for processes
with CAP_SYS_MODULE to maintain the compatibility with network scripts
that use autoloading netdev modules by aliases like "eth0", "wlan0".
Currently there are only three users of the feature in the upstream
kernel: ipip, ip_gre and sit.
root@albatros:~# capsh --drop=$(seq -s, 0 11),$(seq -s, 13 34) --
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: fffffff800001000
CapEff: fffffff800001000
CapBnd: fffffff800001000
root@albatros:~# modprobe xfs
FATAL: Error inserting xfs
(/lib/modules/2.6.38-rc6-00001-g2bf4ca3/kernel/fs/xfs/xfs.ko): Operation not permitted
root@albatros:~# lsmod | grep xfs
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit
sit: error fetching interface information: Device not found
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit0
sit0 Link encap:IPv6-in-IPv4
NOARP MTU:1480 Metric:1
root@albatros:~# lsmod | grep sit
sit 10457 0
tunnel4 2957 1 sit
For CAP_SYS_MODULE module loading is still relaxed:
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: ffffffffffffffff
CapEff: ffffffffffffffff
CapBnd: ffffffffffffffff
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
xfs 745319 0
Reference: https://lkml.org/lkml/2011/2/24/203
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Kees Cook <kees.cook@canonical.com>
Signed-off-by: James Morris <jmorris@namei.org> | 1 | void dev_load(struct net *net, const char *name)
{
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_name_rcu(net, name);
rcu_read_unlock();
if (!dev && capable(CAP_NET_ADMIN))
request_module("%s", name);
}
| 265,834,847,794,592,050,000,000,000,000,000,000,000 | dev.c | 199,475,088,564,555,730,000,000,000,000,000,000,000 | [
"CWE-264"
] | CVE-2011-1019 | The dev_load function in net/core/dev.c in the Linux kernel before 2.6.38 allows local users to bypass an intended CAP_SYS_MODULE capability requirement and load arbitrary modules by leveraging the CAP_NET_ADMIN capability. | https://nvd.nist.gov/vuln/detail/CVE-2011-1019 |
1,178 | krb5 | cf1a0c411b2668c57c41e9c4efd15ba17b6b322c | https://github.com/krb5/krb5 | https://github.com/krb5/krb5/commit/cf1a0c411b2668c57c41e9c4efd15ba17b6b322c | Fix kpasswd UDP ping-pong [CVE-2002-2443]
The kpasswd service provided by kadmind was vulnerable to a UDP
"ping-pong" attack [CVE-2002-2443]. Don't respond to packets unless
they pass some basic validation, and don't respond to our own error
packets.
Some authors use CVE-1999-0103 to refer to the kpasswd UDP ping-pong
attack or UDP ping-pong attacks in general, but there is discussion
leading toward narrowing the definition of CVE-1999-0103 to the echo,
chargen, or other similar built-in inetd services.
Thanks to Vincent Danen for alerting us to this issue.
CVSSv2: AV:N/AC:L/Au:N/C:N/I:N/A:P/E:P/RL:O/RC:C
ticket: 7637 (new)
target_version: 1.11.3
tags: pullup | 1 | process_chpw_request(krb5_context context, void *server_handle, char *realm,
krb5_keytab keytab, const krb5_fulladdr *local_faddr,
const krb5_fulladdr *remote_faddr, krb5_data *req,
krb5_data *rep)
{
krb5_error_code ret;
char *ptr;
unsigned int plen, vno;
krb5_data ap_req, ap_rep = empty_data();
krb5_data cipher = empty_data(), clear = empty_data();
krb5_auth_context auth_context = NULL;
krb5_principal changepw = NULL;
krb5_principal client, target = NULL;
krb5_ticket *ticket = NULL;
krb5_replay_data replay;
krb5_error krberror;
int numresult;
char strresult[1024];
char *clientstr = NULL, *targetstr = NULL;
const char *errmsg = NULL;
size_t clen;
char *cdots;
struct sockaddr_storage ss;
socklen_t salen;
char addrbuf[100];
krb5_address *addr = remote_faddr->address;
*rep = empty_data();
if (req->length < 4) {
/* either this, or the server is printing bad messages,
or the caller passed in garbage */
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request was truncated", sizeof(strresult));
goto chpwfail;
}
ptr = req->data;
/* verify length */
plen = (*ptr++ & 0xff);
plen = (plen<<8) | (*ptr++ & 0xff);
if (plen != req->length) {
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request length was inconsistent",
sizeof(strresult));
goto chpwfail;
}
/* verify version number */
vno = (*ptr++ & 0xff) ;
vno = (vno<<8) | (*ptr++ & 0xff);
if (vno != 1 && vno != RFC3244_VERSION) {
ret = KRB5KDC_ERR_BAD_PVNO;
numresult = KRB5_KPASSWD_BAD_VERSION;
snprintf(strresult, sizeof(strresult),
"Request contained unknown protocol version number %d", vno);
goto chpwfail;
}
/* read, check ap-req length */
ap_req.length = (*ptr++ & 0xff);
ap_req.length = (ap_req.length<<8) | (*ptr++ & 0xff);
if (ptr + ap_req.length >= req->data + req->length) {
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request was truncated in AP-REQ",
sizeof(strresult));
goto chpwfail;
}
/* verify ap_req */
ap_req.data = ptr;
ptr += ap_req.length;
ret = krb5_auth_con_init(context, &auth_context);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed initializing auth context",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_auth_con_setflags(context, auth_context,
KRB5_AUTH_CONTEXT_DO_SEQUENCE);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed initializing auth context",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_build_principal(context, &changepw, strlen(realm), realm,
"kadmin", "changepw", NULL);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed building kadmin/changepw principal",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_rd_req(context, &auth_context, &ap_req, changepw, keytab,
NULL, &ticket);
if (ret) {
numresult = KRB5_KPASSWD_AUTHERROR;
strlcpy(strresult, "Failed reading application request",
sizeof(strresult));
goto chpwfail;
}
/* construct the ap-rep */
ret = krb5_mk_rep(context, auth_context, &ap_rep);
if (ret) {
numresult = KRB5_KPASSWD_AUTHERROR;
strlcpy(strresult, "Failed replying to application request",
sizeof(strresult));
goto chpwfail;
}
/* decrypt the ChangePasswdData */
cipher.length = (req->data + req->length) - ptr;
cipher.data = ptr;
/*
* Don't set a remote address in auth_context before calling krb5_rd_priv,
* so that we can work against clients behind a NAT. Reflection attacks
* aren't a concern since we use sequence numbers and since our requests
* don't look anything like our responses. Also don't set a local address,
* since we don't know what interface the request was received on.
*/
ret = krb5_rd_priv(context, auth_context, &cipher, &clear, &replay);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed decrypting request", sizeof(strresult));
goto chpwfail;
}
client = ticket->enc_part2->client;
/* decode ChangePasswdData for setpw requests */
if (vno == RFC3244_VERSION) {
krb5_data *clear_data;
ret = decode_krb5_setpw_req(&clear, &clear_data, &target);
if (ret != 0) {
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Failed decoding ChangePasswdData",
sizeof(strresult));
goto chpwfail;
}
zapfree(clear.data, clear.length);
clear = *clear_data;
free(clear_data);
if (target != NULL) {
ret = krb5_unparse_name(context, target, &targetstr);
if (ret != 0) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed unparsing target name for log",
sizeof(strresult));
goto chpwfail;
}
}
}
ret = krb5_unparse_name(context, client, &clientstr);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed unparsing client name for log",
sizeof(strresult));
goto chpwfail;
}
/* for cpw, verify that this is an AS_REQ ticket */
if (vno == 1 &&
(ticket->enc_part2->flags & TKT_FLG_INITIAL) == 0) {
numresult = KRB5_KPASSWD_INITIAL_FLAG_NEEDED;
strlcpy(strresult, "Ticket must be derived from a password",
sizeof(strresult));
goto chpwfail;
}
/* change the password */
ptr = k5memdup0(clear.data, clear.length, &ret);
ret = schpw_util_wrapper(server_handle, client, target,
(ticket->enc_part2->flags & TKT_FLG_INITIAL) != 0,
ptr, NULL, strresult, sizeof(strresult));
if (ret)
errmsg = krb5_get_error_message(context, ret);
/* zap the password */
zapfree(clear.data, clear.length);
zapfree(ptr, clear.length);
clear = empty_data();
clen = strlen(clientstr);
trunc_name(&clen, &cdots);
switch (addr->addrtype) {
case ADDRTYPE_INET: {
struct sockaddr_in *sin = ss2sin(&ss);
sin->sin_family = AF_INET;
memcpy(&sin->sin_addr, addr->contents, addr->length);
sin->sin_port = htons(remote_faddr->port);
salen = sizeof(*sin);
break;
}
case ADDRTYPE_INET6: {
struct sockaddr_in6 *sin6 = ss2sin6(&ss);
sin6->sin6_family = AF_INET6;
memcpy(&sin6->sin6_addr, addr->contents, addr->length);
sin6->sin6_port = htons(remote_faddr->port);
salen = sizeof(*sin6);
break;
}
default: {
struct sockaddr *sa = ss2sa(&ss);
sa->sa_family = AF_UNSPEC;
salen = sizeof(*sa);
break;
}
}
if (getnameinfo(ss2sa(&ss), salen,
addrbuf, sizeof(addrbuf), NULL, 0,
NI_NUMERICHOST | NI_NUMERICSERV) != 0)
strlcpy(addrbuf, "<unprintable>", sizeof(addrbuf));
if (vno == RFC3244_VERSION) {
size_t tlen;
char *tdots;
const char *targetp;
if (target == NULL) {
tlen = clen;
tdots = cdots;
targetp = targetstr;
} else {
tlen = strlen(targetstr);
trunc_name(&tlen, &tdots);
targetp = clientstr;
}
krb5_klog_syslog(LOG_NOTICE, _("setpw request from %s by %.*s%s for "
"%.*s%s: %s"), addrbuf, (int) clen,
clientstr, cdots, (int) tlen, targetp, tdots,
errmsg ? errmsg : "success");
} else {
krb5_klog_syslog(LOG_NOTICE, _("chpw request from %s for %.*s%s: %s"),
addrbuf, (int) clen, clientstr, cdots,
errmsg ? errmsg : "success");
}
switch (ret) {
case KADM5_AUTH_CHANGEPW:
numresult = KRB5_KPASSWD_ACCESSDENIED;
break;
case KADM5_PASS_Q_TOOSHORT:
case KADM5_PASS_REUSE:
case KADM5_PASS_Q_CLASS:
case KADM5_PASS_Q_DICT:
case KADM5_PASS_Q_GENERIC:
case KADM5_PASS_TOOSOON:
numresult = KRB5_KPASSWD_SOFTERROR;
break;
case 0:
numresult = KRB5_KPASSWD_SUCCESS;
strlcpy(strresult, "", sizeof(strresult));
break;
default:
numresult = KRB5_KPASSWD_HARDERROR;
break;
}
chpwfail:
clear.length = 2 + strlen(strresult);
clear.data = (char *) malloc(clear.length);
ptr = clear.data;
*ptr++ = (numresult>>8) & 0xff;
*ptr++ = numresult & 0xff;
memcpy(ptr, strresult, strlen(strresult));
cipher = empty_data();
if (ap_rep.length) {
ret = krb5_auth_con_setaddrs(context, auth_context,
local_faddr->address, NULL);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult,
"Failed storing client and server internet addresses",
sizeof(strresult));
} else {
ret = krb5_mk_priv(context, auth_context, &clear, &cipher,
&replay);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed encrypting reply",
sizeof(strresult));
}
}
}
/* if no KRB-PRIV was constructed, then we need a KRB-ERROR.
if this fails, just bail. there's nothing else we can do. */
if (cipher.length == 0) {
/* clear out ap_rep now, so that it won't be inserted in the
reply */
if (ap_rep.length) {
free(ap_rep.data);
ap_rep = empty_data();
}
krberror.ctime = 0;
krberror.cusec = 0;
krberror.susec = 0;
ret = krb5_timeofday(context, &krberror.stime);
if (ret)
goto bailout;
/* this is really icky. but it's what all the other callers
to mk_error do. */
krberror.error = ret;
krberror.error -= ERROR_TABLE_BASE_krb5;
if (krberror.error < 0 || krberror.error > 128)
krberror.error = KRB_ERR_GENERIC;
krberror.client = NULL;
ret = krb5_build_principal(context, &krberror.server,
strlen(realm), realm,
"kadmin", "changepw", NULL);
if (ret)
goto bailout;
krberror.text.length = 0;
krberror.e_data = clear;
ret = krb5_mk_error(context, &krberror, &cipher);
krb5_free_principal(context, krberror.server);
if (ret)
goto bailout;
}
/* construct the reply */
ret = alloc_data(rep, 6 + ap_rep.length + cipher.length);
if (ret)
goto bailout;
ptr = rep->data;
/* length */
*ptr++ = (rep->length>>8) & 0xff;
*ptr++ = rep->length & 0xff;
/* version == 0x0001 big-endian */
*ptr++ = 0;
*ptr++ = 1;
/* ap_rep length, big-endian */
*ptr++ = (ap_rep.length>>8) & 0xff;
*ptr++ = ap_rep.length & 0xff;
/* ap-rep data */
if (ap_rep.length) {
memcpy(ptr, ap_rep.data, ap_rep.length);
ptr += ap_rep.length;
}
/* krb-priv or krb-error */
memcpy(ptr, cipher.data, cipher.length);
bailout:
krb5_auth_con_free(context, auth_context);
krb5_free_principal(context, changepw);
krb5_free_ticket(context, ticket);
free(ap_rep.data);
free(clear.data);
free(cipher.data);
krb5_free_principal(context, target);
krb5_free_unparsed_name(context, targetstr);
krb5_free_unparsed_name(context, clientstr);
krb5_free_error_message(context, errmsg);
return ret;
}
| 151,952,841,528,932,100,000,000,000,000,000,000,000 | schpw.c | 303,446,096,086,926,720,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2002-2443 | schpw.c in the kpasswd service in kadmind in MIT Kerberos 5 (aka krb5) before 1.11.3 does not properly validate UDP packets before sending responses, which allows remote attackers to cause a denial of service (CPU and bandwidth consumption) via a forged packet that triggers a communication loop, as demonstrated by krb_pingpong.nasl, a related issue to CVE-1999-0103. | https://nvd.nist.gov/vuln/detail/CVE-2002-2443 |
1,179 | linux | f54e18f1b831c92f6512d2eedb224cd63d607d3d | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/f54e18f1b831c92f6512d2eedb224cd63d607d3d | isofs: Fix infinite looping over CE entries
Rock Ridge extensions define so called Continuation Entries (CE) which
define where is further space with Rock Ridge data. Corrupted isofs
image can contain arbitrarily long chain of these, including a one
containing loop and thus causing kernel to end in an infinite loop when
traversing these entries.
Limit the traversal to 32 entries which should be more than enough space
to store all the Rock Ridge data.
Reported-by: P J P <ppandit@redhat.com>
CC: stable@vger.kernel.org
Signed-off-by: Jan Kara <jack@suse.cz> | 1 | static int rock_continue(struct rock_state *rs)
{
int ret = 1;
int blocksize = 1 << rs->inode->i_blkbits;
const int min_de_size = offsetof(struct rock_ridge, u);
kfree(rs->buffer);
rs->buffer = NULL;
if ((unsigned)rs->cont_offset > blocksize - min_de_size ||
(unsigned)rs->cont_size > blocksize ||
(unsigned)(rs->cont_offset + rs->cont_size) > blocksize) {
printk(KERN_NOTICE "rock: corrupted directory entry. "
"extent=%d, offset=%d, size=%d\n",
rs->cont_extent, rs->cont_offset, rs->cont_size);
ret = -EIO;
goto out;
}
if (rs->cont_extent) {
struct buffer_head *bh;
rs->buffer = kmalloc(rs->cont_size, GFP_KERNEL);
if (!rs->buffer) {
ret = -ENOMEM;
goto out;
}
ret = -EIO;
bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
if (bh) {
memcpy(rs->buffer, bh->b_data + rs->cont_offset,
rs->cont_size);
put_bh(bh);
rs->chr = rs->buffer;
rs->len = rs->cont_size;
rs->cont_extent = 0;
rs->cont_size = 0;
rs->cont_offset = 0;
return 0;
}
printk("Unable to read rock-ridge attributes\n");
}
out:
kfree(rs->buffer);
rs->buffer = NULL;
return ret;
}
| 37,972,843,024,361,270,000,000,000,000,000,000,000 | rock.c | 282,024,907,067,004,600,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-9420 | The rock_continue function in fs/isofs/rock.c in the Linux kernel through 3.18.1 does not restrict the number of Rock Ridge continuation entries, which allows local users to cause a denial of service (infinite loop, and system crash or hang) via a crafted iso9660 image. | https://nvd.nist.gov/vuln/detail/CVE-2014-9420 |
1,180 | linux | f647d7c155f069c1a068030255c300663516420e | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/f647d7c155f069c1a068030255c300663516420e | x86_64, switch_to(): Load TLS descriptors before switching DS and ES
Otherwise, if buggy user code points DS or ES into the TLS
array, they would be corrupted after a context switch.
This also significantly improves the comments and documents some
gotchas in the code.
Before this patch, the both tests below failed. With this
patch, the es test passes, although the gsbase test still fails.
----- begin es test -----
/*
* Copyright (c) 2014 Andy Lutomirski
* GPL v2
*/
static unsigned short GDT3(int idx)
{
return (idx << 3) | 3;
}
static int create_tls(int idx, unsigned int base)
{
struct user_desc desc = {
.entry_number = idx,
.base_addr = base,
.limit = 0xfffff,
.seg_32bit = 1,
.contents = 0, /* Data, grow-up */
.read_exec_only = 0,
.limit_in_pages = 1,
.seg_not_present = 0,
.useable = 0,
};
if (syscall(SYS_set_thread_area, &desc) != 0)
err(1, "set_thread_area");
return desc.entry_number;
}
int main()
{
int idx = create_tls(-1, 0);
printf("Allocated GDT index %d\n", idx);
unsigned short orig_es;
asm volatile ("mov %%es,%0" : "=rm" (orig_es));
int errors = 0;
int total = 1000;
for (int i = 0; i < total; i++) {
asm volatile ("mov %0,%%es" : : "rm" (GDT3(idx)));
usleep(100);
unsigned short es;
asm volatile ("mov %%es,%0" : "=rm" (es));
asm volatile ("mov %0,%%es" : : "rm" (orig_es));
if (es != GDT3(idx)) {
if (errors == 0)
printf("[FAIL]\tES changed from 0x%hx to 0x%hx\n",
GDT3(idx), es);
errors++;
}
}
if (errors) {
printf("[FAIL]\tES was corrupted %d/%d times\n", errors, total);
return 1;
} else {
printf("[OK]\tES was preserved\n");
return 0;
}
}
----- end es test -----
----- begin gsbase test -----
/*
* gsbase.c, a gsbase test
* Copyright (c) 2014 Andy Lutomirski
* GPL v2
*/
static unsigned char *testptr, *testptr2;
static unsigned char read_gs_testvals(void)
{
unsigned char ret;
asm volatile ("movb %%gs:%1, %0" : "=r" (ret) : "m" (*testptr));
return ret;
}
int main()
{
int errors = 0;
testptr = mmap((void *)0x200000000UL, 1, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
if (testptr == MAP_FAILED)
err(1, "mmap");
testptr2 = mmap((void *)0x300000000UL, 1, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
if (testptr2 == MAP_FAILED)
err(1, "mmap");
*testptr = 0;
*testptr2 = 1;
if (syscall(SYS_arch_prctl, ARCH_SET_GS,
(unsigned long)testptr2 - (unsigned long)testptr) != 0)
err(1, "ARCH_SET_GS");
usleep(100);
if (read_gs_testvals() == 1) {
printf("[OK]\tARCH_SET_GS worked\n");
} else {
printf("[FAIL]\tARCH_SET_GS failed\n");
errors++;
}
asm volatile ("mov %0,%%gs" : : "r" (0));
if (read_gs_testvals() == 0) {
printf("[OK]\tWriting 0 to gs worked\n");
} else {
printf("[FAIL]\tWriting 0 to gs failed\n");
errors++;
}
usleep(100);
if (read_gs_testvals() == 0) {
printf("[OK]\tgsbase is still zero\n");
} else {
printf("[FAIL]\tgsbase was corrupted\n");
errors++;
}
return errors == 0 ? 0 : 1;
}
----- end gsbase test -----
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: <stable@vger.kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/509d27c9fec78217691c3dad91cec87e1006b34a.1418075657.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org> | 1 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread;
struct thread_struct *next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
unsigned fsindex, gsindex;
fpu_switch_t fpu;
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
/*
* Reload esp0, LDT and the page table pointer:
*/
load_sp0(tss, next);
/*
* Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok.
*/
savesegment(es, prev->es);
if (unlikely(next->es | prev->es))
loadsegment(es, next->es);
savesegment(ds, prev->ds);
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
*
* (e.g. xen_load_tls())
*/
savesegment(fs, fsindex);
savesegment(gs, gsindex);
load_TLS(next, cpu);
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
* the GDT and LDT are properly updated, and must be
* done before math_state_restore, so the TS bit is up
* to date.
*/
arch_end_context_switch(next_p);
/*
* Switch FS and GS.
*
* Segment register != 0 always requires a reload. Also
* reload when it has changed. When prev process used 64bit
* base always reload to avoid an information leak.
*/
if (unlikely(fsindex | next->fsindex | prev->fs)) {
loadsegment(fs, next->fsindex);
/*
* Check if the user used a selector != 0; if yes
* clear 64bit base, since overloaded base is always
* mapped to the Null selector
*/
if (fsindex)
prev->fs = 0;
}
/* when next process has a 64bit base use it */
if (next->fs)
wrmsrl(MSR_FS_BASE, next->fs);
prev->fsindex = fsindex;
if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex);
if (gsindex)
prev->gs = 0;
}
if (next->gs)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
prev->gsindex = gsindex;
switch_fpu_finish(next_p, fpu);
/*
* Switch the PDA and FPU contexts.
*/
prev->usersp = this_cpu_read(old_rsp);
this_cpu_write(old_rsp, next->usersp);
this_cpu_write(current_task, next_p);
/*
* If it were not for PREEMPT_ACTIVE we could guarantee that the
* preempt_count of all tasks was equal here and this would not be
* needed.
*/
task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
this_cpu_write(kernel_stack,
(unsigned long)task_stack_page(next_p) +
THREAD_SIZE - KERNEL_STACK_OFFSET);
/*
* Now maybe reload the debug registers and handle I/O bitmaps
*/
if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);
return prev_p;
}
| 205,391,279,388,742,040,000,000,000,000,000,000,000 | process_64.c | 81,668,502,972,952,200,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2014-9419 | The __switch_to function in arch/x86/kernel/process_64.c in the Linux kernel through 3.18.1 does not ensure that Thread Local Storage (TLS) descriptors are loaded before proceeding with other steps, which makes it easier for local users to bypass the ASLR protection mechanism via a crafted application that reads a TLS base address. | https://nvd.nist.gov/vuln/detail/CVE-2014-9419 |
1,183 | tcpdump | 0f95d441e4b5d7512cc5c326c8668a120e048eda | https://github.com/the-tcpdump-group/tcpdump | https://github.com/the-tcpdump-group/tcpdump/commit/0f95d441e4b5d7512cc5c326c8668a120e048eda | Do bounds checking when unescaping PPP.
Clean up a const issue while we're at it. | 1 | ppp_hdlc(netdissect_options *ndo,
const u_char *p, int length)
{
u_char *b, *s, *t, c;
int i, proto;
const void *se;
if (length <= 0)
return;
b = (uint8_t *)malloc(length);
if (b == NULL)
return;
/*
* Unescape all the data into a temporary, private, buffer.
* Do this so that we dont overwrite the original packet
* contents.
*/
for (s = (u_char *)p, t = b, i = length; i > 0; i--) {
c = *s++;
if (c == 0x7d) {
if (i > 1) {
i--;
c = *s++ ^ 0x20;
} else
continue;
}
*t++ = c;
}
se = ndo->ndo_snapend;
ndo->ndo_snapend = t;
length = t - b;
/* now lets guess about the payload codepoint format */
if (length < 1)
goto trunc;
proto = *b; /* start with a one-octet codepoint guess */
switch (proto) {
case PPP_IP:
ip_print(ndo, b + 1, length - 1);
goto cleanup;
case PPP_IPV6:
ip6_print(ndo, b + 1, length - 1);
goto cleanup;
default: /* no luck - try next guess */
break;
}
if (length < 2)
goto trunc;
proto = EXTRACT_16BITS(b); /* next guess - load two octets */
switch (proto) {
case (PPP_ADDRESS << 8 | PPP_CONTROL): /* looks like a PPP frame */
if (length < 4)
goto trunc;
proto = EXTRACT_16BITS(b+2); /* load the PPP proto-id */
handle_ppp(ndo, proto, b + 4, length - 4);
break;
default: /* last guess - proto must be a PPP proto-id */
handle_ppp(ndo, proto, b + 2, length - 2);
break;
}
cleanup:
ndo->ndo_snapend = se;
free(b);
return;
trunc:
ndo->ndo_snapend = se;
free(b);
ND_PRINT((ndo, "[|ppp]"));
}
| 163,830,884,355,634,390,000,000,000,000,000,000,000 | print-ppp.c | 89,533,130,049,478,180,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-9140 | Buffer overflow in the ppp_hdlc function in print-ppp.c in tcpdump 4.6.2 and earlier allows remote attackers to cause a denial of service (crash) cia a crafted PPP packet. | https://nvd.nist.gov/vuln/detail/CVE-2014-9140 |
1,184 | linux | f2e323ec96077642d397bb1c355def536d489d16 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/f2e323ec96077642d397bb1c355def536d489d16 | [media] ttusb-dec: buffer overflow in ioctl
We need to add a limit check here so we don't overflow the buffer.
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 1 | static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *cmd)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
u8 b[] = { 0x00, 0xff, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00 };
memcpy(&b[4], cmd->msg, cmd->msg_len);
state->config->send_command(fe, 0x72,
sizeof(b) - (6 - cmd->msg_len), b,
NULL, NULL);
return 0;
}
| 192,608,949,045,328,570,000,000,000,000,000,000,000 | ttusbdecfe.c | 24,169,251,397,313,176,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-8884 | Stack-based buffer overflow in the ttusbdecfe_dvbs_diseqc_send_master_cmd function in drivers/media/usb/ttusb-dec/ttusbdecfe.c in the Linux kernel before 3.17.4 allows local users to cause a denial of service (system crash) or possibly gain privileges via a large message length in an ioctl call. | https://nvd.nist.gov/vuln/detail/CVE-2014-8884 |
1,185 | linux | 338f977f4eb441e69bb9a46eaa0ac715c931a67f | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/338f977f4eb441e69bb9a46eaa0ac715c931a67f | mac80211: fix fragmentation code, particularly for encryption
The "new" fragmentation code (since my rewrite almost 5 years ago)
erroneously sets skb->len rather than using skb_trim() to adjust
the length of the first fragment after copying out all the others.
This leaves the skb tail pointer pointing to after where the data
originally ended, and thus causes the encryption MIC to be written
at that point, rather than where it belongs: immediately after the
data.
The impact of this is that if software encryption is done, then
a) encryption doesn't work for the first fragment, the connection
becomes unusable as the first fragment will never be properly
verified at the receiver, the MIC is practically guaranteed to
be wrong
b) we leak up to 8 bytes of plaintext (!) of the packet out into
the air
This is only mitigated by the fact that many devices are capable
of doing encryption in hardware, in which case this can't happen
as the tail pointer is irrelevant in that case. Additionally,
fragmentation is not used very frequently and would normally have
to be configured manually.
Fix this by using skb_trim() properly.
Cc: stable@vger.kernel.org
Fixes: 2de8e0d999b8 ("mac80211: rewrite fragmentation")
Reported-by: Jouni Malinen <j@w1.fi>
Signed-off-by: Johannes Berg <johannes.berg@intel.com> | 1 | static int ieee80211_fragment(struct ieee80211_tx_data *tx,
struct sk_buff *skb, int hdrlen,
int frag_threshold)
{
struct ieee80211_local *local = tx->local;
struct ieee80211_tx_info *info;
struct sk_buff *tmp;
int per_fragm = frag_threshold - hdrlen - FCS_LEN;
int pos = hdrlen + per_fragm;
int rem = skb->len - hdrlen - per_fragm;
if (WARN_ON(rem < 0))
return -EINVAL;
/* first fragment was already added to queue by caller */
while (rem) {
int fraglen = per_fragm;
if (fraglen > rem)
fraglen = rem;
rem -= fraglen;
tmp = dev_alloc_skb(local->tx_headroom +
frag_threshold +
tx->sdata->encrypt_headroom +
IEEE80211_ENCRYPT_TAILROOM);
if (!tmp)
return -ENOMEM;
__skb_queue_tail(&tx->skbs, tmp);
skb_reserve(tmp,
local->tx_headroom + tx->sdata->encrypt_headroom);
/* copy control information */
memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
info = IEEE80211_SKB_CB(tmp);
info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
IEEE80211_TX_CTL_FIRST_FRAGMENT);
if (rem)
info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
skb_copy_queue_mapping(tmp, skb);
tmp->priority = skb->priority;
tmp->dev = skb->dev;
/* copy header and data */
memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
pos += fraglen;
}
/* adjust first fragment's length */
skb->len = hdrlen + per_fragm;
return 0;
}
| 252,627,261,483,521,550,000,000,000,000,000,000,000 | tx.c | 187,323,276,197,443,400,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2014-8709 | The ieee80211_fragment function in net/mac80211/tx.c in the Linux kernel before 3.13.5 does not properly maintain a certain tail pointer, which allows remote attackers to obtain sensitive cleartext information by reading packets. | https://nvd.nist.gov/vuln/detail/CVE-2014-8709 |
1,186 | linux | a430c9166312e1aa3d80bce32374233bdbfeba32 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/a430c9166312e1aa3d80bce32374233bdbfeba32 | KVM: emulate: avoid accessing NULL ctxt->memopp
A failure to decode the instruction can cause a NULL pointer access.
This is fixed simply by moving the "done" label as close as possible
to the return.
This fixes CVE-2014-8481.
Reported-by: Andy Lutomirski <luto@amacapital.net>
Cc: stable@vger.kernel.org
Fixes: 41061cdb98a0bec464278b4db8e894a3121671f5
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 1 | int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 ||
(mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
done:
if (ctxt->rip_relative)
ctxt->memopp->addr.mem.ea += ctxt->_eip;
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
| 201,743,535,793,806,920,000,000,000,000,000,000,000 | emulate.c | 284,934,346,528,550,170,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-8481 | The instruction decoder in arch/x86/kvm/emulate.c in the KVM subsystem in the Linux kernel before 3.18-rc2 does not properly handle invalid instructions, which allows guest OS users to cause a denial of service (NULL pointer dereference and host OS crash) via a crafted application that triggers (1) an improperly fetched instruction or (2) an instruction that occupies too many bytes. NOTE: this vulnerability exists because of an incomplete fix for CVE-2014-8480. | https://nvd.nist.gov/vuln/detail/CVE-2014-8481 |
1,187 | linux | 3d32e4dbe71374a6780eaf51d719d76f9a9bf22f | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/3d32e4dbe71374a6780eaf51d719d76f9a9bf22f | kvm: fix excessive pages un-pinning in kvm_iommu_map error path.
The third parameter of kvm_unpin_pages() when called from
kvm_iommu_map_pages() is wrong, it should be the number of pages to un-pin
and not the page size.
This error was facilitated with an inconsistent API: kvm_pin_pages() takes
a size, but kvn_unpin_pages() takes a number of pages, so fix the problem
by matching the two.
This was introduced by commit 350b8bd ("kvm: iommu: fix the third parameter
of kvm_iommu_put_pages (CVE-2014-3601)"), which fixes the lack of
un-pinning for pages intended to be un-pinned (i.e. memory leak) but
unfortunately potentially aggravated the number of pages we un-pin that
should have stayed pinned. As far as I understand though, the same
practical mitigations apply.
This issue was found during review of Red Hat 6.6 patches to prepare
Ksplice rebootless updates.
Thanks to Vegard for his time on a late Friday evening to help me in
understanding this code.
Fixes: 350b8bd ("kvm: iommu: fix the third parameter of... (CVE-2014-3601)")
Cc: stable@vger.kernel.org
Signed-off-by: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Signed-off-by: Jamie Iles <jamie.iles@oracle.com>
Reviewed-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 1 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
gfn_t gfn, end_gfn;
pfn_t pfn;
int r = 0;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int flags;
/* check if iommu exists and in use */
if (!domain)
return 0;
gfn = slot->base_gfn;
end_gfn = gfn + slot->npages;
flags = IOMMU_READ;
if (!(slot->flags & KVM_MEM_READONLY))
flags |= IOMMU_WRITE;
if (!kvm->arch.iommu_noncoherent)
flags |= IOMMU_CACHE;
while (gfn < end_gfn) {
unsigned long page_size;
/* Check if already mapped */
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
gfn += 1;
continue;
}
/* Get the page size we could use to map */
page_size = kvm_host_page_size(kvm, gfn);
/* Make sure the page_size does not exceed the memslot */
while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
page_size >>= 1;
/* Make sure gfn is aligned to the page size we want to map */
while ((gfn << PAGE_SHIFT) & (page_size - 1))
page_size >>= 1;
/* Make sure hva is aligned to the page size we want to map */
while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
page_size >>= 1;
/*
* Pin all pages we are about to map in memory. This is
* important because we unmap and unpin in 4kb steps later.
*/
pfn = kvm_pin_pages(slot, gfn, page_size);
if (is_error_noslot_pfn(pfn)) {
gfn += 1;
continue;
}
/* Map into IO address space */
r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
page_size, flags);
if (r) {
printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%llx\n", pfn);
kvm_unpin_pages(kvm, pfn, page_size);
goto unmap_pages;
}
gfn += page_size >> PAGE_SHIFT;
}
return 0;
unmap_pages:
kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
return r;
}
| 40,650,788,508,994,537,000,000,000,000,000,000,000 | iommu.c | 293,495,940,641,321,050,000,000,000,000,000,000,000 | [
"CWE-189"
] | CVE-2014-8369 | The kvm_iommu_map_pages function in virt/kvm/iommu.c in the Linux kernel through 3.17.2 miscalculates the number of pages during the handling of a mapping failure, which allows guest OS users to cause a denial of service (host OS page unpinning) or possibly have unspecified other impact by leveraging guest OS privileges. NOTE: this vulnerability exists because of an incorrect fix for CVE-2014-3601. | https://nvd.nist.gov/vuln/detail/CVE-2014-8369 |
1,191 | linux | a2b9e6c1a35afcc0973acb72e591c714e78885ff | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/a2b9e6c1a35afcc0973acb72e591c714e78885ff | KVM: x86: Don't report guest userspace emulation error to userspace
Commit fc3a9157d314 ("KVM: X86: Don't report L2 emulation failures to
user-space") disabled the reporting of L2 (nested guest) emulation failures to
userspace due to race-condition between a vmexit and the instruction emulator.
The same rational applies also to userspace applications that are permitted by
the guest OS to access MMIO area or perform PIO.
This patch extends the current behavior - of injecting a #UD instead of
reporting it to userspace - also for guest userspace code.
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 1 | static int handle_emulation_failure(struct kvm_vcpu *vcpu)
{
int r = EMULATE_DONE;
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
if (!is_guest_mode(vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
r = EMULATE_FAIL;
}
kvm_queue_exception(vcpu, UD_VECTOR);
return r;
}
| 44,326,647,696,150,180,000,000,000,000,000,000,000 | x86.c | 92,946,483,395,699,820,000,000,000,000,000,000,000 | [
"CWE-362"
] | CVE-2014-7842 | Race condition in arch/x86/kvm/x86.c in the Linux kernel before 3.17.4 allows guest OS users to cause a denial of service (guest OS crash) via a crafted application that performs an MMIO transaction or a PIO transaction to trigger a guest userspace emulation error report, a similar issue to CVE-2010-5313. | https://nvd.nist.gov/vuln/detail/CVE-2014-7842 |
1,192 | linux | e40607cbe270a9e8360907cb1e62ddf0736e4864 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/e40607cbe270a9e8360907cb1e62ddf0736e4864 | net: sctp: fix NULL pointer dereference in af->from_addr_param on malformed packet
An SCTP server doing ASCONF will panic on malformed INIT ping-of-death
in the form of:
------------ INIT[PARAM: SET_PRIMARY_IP] ------------>
While the INIT chunk parameter verification dissects through many things
in order to detect malformed input, it misses to actually check parameters
inside of parameters. E.g. RFC5061, section 4.2.4 proposes a 'set primary
IP address' parameter in ASCONF, which has as a subparameter an address
parameter.
So an attacker may send a parameter type other than SCTP_PARAM_IPV4_ADDRESS
or SCTP_PARAM_IPV6_ADDRESS, param_type2af() will subsequently return 0
and thus sctp_get_af_specific() returns NULL, too, which we then happily
dereference unconditionally through af->from_addr_param().
The trace for the log:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000078
IP: [<ffffffffa01e9c62>] sctp_process_init+0x492/0x990 [sctp]
PGD 0
Oops: 0000 [#1] SMP
[...]
Pid: 0, comm: swapper Not tainted 2.6.32-504.el6.x86_64 #1 Bochs Bochs
RIP: 0010:[<ffffffffa01e9c62>] [<ffffffffa01e9c62>] sctp_process_init+0x492/0x990 [sctp]
[...]
Call Trace:
<IRQ>
[<ffffffffa01f2add>] ? sctp_bind_addr_copy+0x5d/0xe0 [sctp]
[<ffffffffa01e1fcb>] sctp_sf_do_5_1B_init+0x21b/0x340 [sctp]
[<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp]
[<ffffffffa01e5c09>] ? sctp_endpoint_lookup_assoc+0xc9/0xf0 [sctp]
[<ffffffffa01e61f6>] sctp_endpoint_bh_rcv+0x116/0x230 [sctp]
[<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp]
[<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp]
[<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter]
[<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[...]
A minimal way to address this is to check for NULL as we do on all
other such occasions where we know sctp_get_af_specific() could
possibly return with NULL.
Fixes: d6de3097592b ("[SCTP]: Add the handling of "Set Primary IP Address" parameter to INIT")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Vlad Yasevich <vyasevich@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int sctp_process_param(struct sctp_association *asoc,
union sctp_params param,
const union sctp_addr *peer_addr,
gfp_t gfp)
{
struct net *net = sock_net(asoc->base.sk);
union sctp_addr addr;
int i;
__u16 sat;
int retval = 1;
sctp_scope_t scope;
time_t stale;
struct sctp_af *af;
union sctp_addr_param *addr_param;
struct sctp_transport *t;
struct sctp_endpoint *ep = asoc->ep;
/* We maintain all INIT parameters in network byte order all the
* time. This allows us to not worry about whether the parameters
* came from a fresh INIT, and INIT ACK, or were stored in a cookie.
*/
switch (param.p->type) {
case SCTP_PARAM_IPV6_ADDRESS:
if (PF_INET6 != asoc->base.sk->sk_family)
break;
goto do_addr_param;
case SCTP_PARAM_IPV4_ADDRESS:
/* v4 addresses are not allowed on v6-only socket */
if (ipv6_only_sock(asoc->base.sk))
break;
do_addr_param:
af = sctp_get_af_specific(param_type2af(param.p->type));
af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
scope = sctp_scope(peer_addr);
if (sctp_in_scope(net, &addr, scope))
if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
return 0;
break;
case SCTP_PARAM_COOKIE_PRESERVATIVE:
if (!net->sctp.cookie_preserve_enable)
break;
stale = ntohl(param.life->lifespan_increment);
/* Suggested Cookie Life span increment's unit is msec,
* (1/1000sec).
*/
asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale);
break;
case SCTP_PARAM_HOST_NAME_ADDRESS:
pr_debug("%s: unimplemented SCTP_HOST_NAME_ADDRESS\n", __func__);
break;
case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES:
/* Turn off the default values first so we'll know which
* ones are really set by the peer.
*/
asoc->peer.ipv4_address = 0;
asoc->peer.ipv6_address = 0;
/* Assume that peer supports the address family
* by which it sends a packet.
*/
if (peer_addr->sa.sa_family == AF_INET6)
asoc->peer.ipv6_address = 1;
else if (peer_addr->sa.sa_family == AF_INET)
asoc->peer.ipv4_address = 1;
/* Cycle through address types; avoid divide by 0. */
sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
if (sat)
sat /= sizeof(__u16);
for (i = 0; i < sat; ++i) {
switch (param.sat->types[i]) {
case SCTP_PARAM_IPV4_ADDRESS:
asoc->peer.ipv4_address = 1;
break;
case SCTP_PARAM_IPV6_ADDRESS:
if (PF_INET6 == asoc->base.sk->sk_family)
asoc->peer.ipv6_address = 1;
break;
case SCTP_PARAM_HOST_NAME_ADDRESS:
asoc->peer.hostname_address = 1;
break;
default: /* Just ignore anything else. */
break;
}
}
break;
case SCTP_PARAM_STATE_COOKIE:
asoc->peer.cookie_len =
ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
asoc->peer.cookie = param.cookie->body;
break;
case SCTP_PARAM_HEARTBEAT_INFO:
/* Would be odd to receive, but it causes no problems. */
break;
case SCTP_PARAM_UNRECOGNIZED_PARAMETERS:
/* Rejected during verify stage. */
break;
case SCTP_PARAM_ECN_CAPABLE:
asoc->peer.ecn_capable = 1;
break;
case SCTP_PARAM_ADAPTATION_LAYER_IND:
asoc->peer.adaptation_ind = ntohl(param.aind->adaptation_ind);
break;
case SCTP_PARAM_SET_PRIMARY:
if (!net->sctp.addip_enable)
goto fall_through;
addr_param = param.v + sizeof(sctp_addip_param_t);
af = sctp_get_af_specific(param_type2af(param.p->type));
af->from_addr_param(&addr, addr_param,
htons(asoc->peer.port), 0);
/* if the address is invalid, we can't process it.
* XXX: see spec for what to do.
*/
if (!af->addr_valid(&addr, NULL, NULL))
break;
t = sctp_assoc_lookup_paddr(asoc, &addr);
if (!t)
break;
sctp_assoc_set_primary(asoc, t);
break;
case SCTP_PARAM_SUPPORTED_EXT:
sctp_process_ext_param(asoc, param);
break;
case SCTP_PARAM_FWD_TSN_SUPPORT:
if (net->sctp.prsctp_enable) {
asoc->peer.prsctp_capable = 1;
break;
}
/* Fall Through */
goto fall_through;
case SCTP_PARAM_RANDOM:
if (!ep->auth_enable)
goto fall_through;
/* Save peer's random parameter */
asoc->peer.peer_random = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_random) {
retval = 0;
break;
}
break;
case SCTP_PARAM_HMAC_ALGO:
if (!ep->auth_enable)
goto fall_through;
/* Save peer's HMAC list */
asoc->peer.peer_hmacs = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_hmacs) {
retval = 0;
break;
}
/* Set the default HMAC the peer requested*/
sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo);
break;
case SCTP_PARAM_CHUNKS:
if (!ep->auth_enable)
goto fall_through;
asoc->peer.peer_chunks = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_chunks)
retval = 0;
break;
fall_through:
default:
/* Any unrecognized parameters should have been caught
* and handled by sctp_verify_param() which should be
* called prior to this routine. Simply log the error
* here.
*/
pr_debug("%s: ignoring param:%d for association:%p.\n",
__func__, ntohs(param.p->type), asoc);
break;
}
return retval;
}
| 233,310,274,121,482,150,000,000,000,000,000,000,000 | sm_make_chunk.c | 336,027,494,887,991,000,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-7841 | The sctp_process_param function in net/sctp/sm_make_chunk.c in the SCTP implementation in the Linux kernel before 3.17.4, when ASCONF is used, allows remote attackers to cause a denial of service (NULL pointer dereference and system crash) via a malformed INIT chunk. | https://nvd.nist.gov/vuln/detail/CVE-2014-7841 |
1,199 | linux | c88547a8119e3b581318ab65e9b72f27f23e641d | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/c88547a8119e3b581318ab65e9b72f27f23e641d | xfs: fix directory hash ordering bug
Commit f5ea1100 ("xfs: add CRCs to dir2/da node blocks") introduced
in 3.10 incorrectly converted the btree hash index array pointer in
xfs_da3_fixhashpath(). It resulted in the the current hash always
being compared against the first entry in the btree rather than the
current block index into the btree block's hash entry array. As a
result, it was comparing the wrong hashes, and so could misorder the
entries in the btree.
For most cases, this doesn't cause any problems as it requires hash
collisions to expose the ordering problem. However, when there are
hash collisions within a directory there is a very good probability
that the entries will be ordered incorrectly and that actually
matters when duplicate hashes are placed into or removed from the
btree block hash entry array.
This bug results in an on-disk directory corruption and that results
in directory verifier functions throwing corruption warnings into
the logs. While no data or directory entries are lost, access to
them may be compromised, and attempts to remove entries from a
directory that has suffered from this corruption may result in a
filesystem shutdown. xfs_repair will fix the directory hash
ordering without data loss occuring.
[dchinner: wrote useful a commit message]
cc: <stable@vger.kernel.org>
Reported-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Mark Tinguely <tinguely@sgi.com>
Reviewed-by: Ben Myers <bpm@sgi.com>
Signed-off-by: Dave Chinner <david@fromorbit.com> | 1 | xfs_da3_fixhashpath(
struct xfs_da_state *state,
struct xfs_da_state_path *path)
{
struct xfs_da_state_blk *blk;
struct xfs_da_intnode *node;
struct xfs_da_node_entry *btree;
xfs_dahash_t lasthash=0;
int level;
int count;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_fixhashpath(state->args);
level = path->active-1;
blk = &path->blk[ level ];
switch (blk->magic) {
case XFS_ATTR_LEAF_MAGIC:
lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
if (count == 0)
return;
break;
case XFS_DIR2_LEAFN_MAGIC:
lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
if (count == 0)
return;
break;
case XFS_DA_NODE_MAGIC:
lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
if (count == 0)
return;
break;
}
for (blk--, level--; level >= 0; blk--, level--) {
struct xfs_da3_icnode_hdr nodehdr;
node = blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
if (be32_to_cpu(btree->hashval) == lasthash)
break;
blk->hashval = lasthash;
btree[blk->index].hashval = cpu_to_be32(lasthash);
xfs_trans_log_buf(state->args->trans, blk->bp,
XFS_DA_LOGRANGE(node, &btree[blk->index],
sizeof(*btree)));
lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
}
}
| 190,462,110,703,120,600,000,000,000,000,000,000,000 | xfs_da_btree.c | 36,126,382,876,200,700,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-7283 | The xfs_da3_fixhashpath function in fs/xfs/xfs_da_btree.c in the xfs implementation in the Linux kernel before 3.14.2 does not properly compare btree hash values, which allows local users to cause a denial of service (filesystem corruption, and OOPS or panic) via operations on directories that have hash collisions, as demonstrated by rmdir operations. | https://nvd.nist.gov/vuln/detail/CVE-2014-7283 |
1,200 | linux | 18f39e7be0121317550d03e267e3ebd4dbfbb3ce | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/18f39e7be0121317550d03e267e3ebd4dbfbb3ce | [CIFS] Possible null ptr deref in SMB2_tcon
As Raphael Geissert pointed out, tcon_error_exit can dereference tcon
and there is one path in which tcon can be null.
Signed-off-by: Steve French <smfrench@gmail.com>
CC: Stable <stable@vger.kernel.org> # v3.7+
Reported-by: Raphael Geissert <geissert@debian.org> | 1 | SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
struct cifs_tcon *tcon, const struct nls_table *cp)
{
struct smb2_tree_connect_req *req;
struct smb2_tree_connect_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
int resp_buftype;
int unc_path_len;
struct TCP_Server_Info *server;
__le16 *unc_path = NULL;
cifs_dbg(FYI, "TCON\n");
if ((ses->server) && tree)
server = ses->server;
else
return -EIO;
if (tcon && tcon->bad_network_name)
return -ENOENT;
unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
if (unc_path == NULL)
return -ENOMEM;
unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
unc_path_len *= 2;
if (unc_path_len < 2) {
kfree(unc_path);
return -EINVAL;
}
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
if (rc) {
kfree(unc_path);
return rc;
}
if (tcon == NULL) {
/* since no tcon, smb2_init can not do this, so do here */
req->hdr.SessionId = ses->Suid;
/* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
}
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field and 1 for pad */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
- 1 /* pad */ - 4 /* do not count rfc1001 len field */);
req->PathLength = cpu_to_le16(unc_path_len - 2);
iov[1].iov_base = unc_path;
iov[1].iov_len = unc_path_len;
inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
if (rc != 0) {
if (tcon) {
cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
tcon->need_reconnect = true;
}
goto tcon_error_exit;
}
if (tcon == NULL) {
ses->ipc_tid = rsp->hdr.TreeId;
goto tcon_exit;
}
if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
cifs_dbg(FYI, "connection to disk share\n");
else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
tcon->ipc = true;
cifs_dbg(FYI, "connection to pipe share\n");
} else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
tcon->print = true;
cifs_dbg(FYI, "connection to printer\n");
} else {
cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
rc = -EOPNOTSUPP;
goto tcon_error_exit;
}
tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
tcon->tidStatus = CifsGood;
tcon->need_reconnect = false;
tcon->tid = rsp->hdr.TreeId;
strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
init_copy_chunk_defaults(tcon);
if (tcon->ses->server->ops->validate_negotiate)
rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
tcon_exit:
free_rsp_buf(resp_buftype, rsp);
kfree(unc_path);
return rc;
tcon_error_exit:
if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
tcon->bad_network_name = true;
}
goto tcon_exit;
}
| 60,644,300,295,276,330,000,000,000,000,000,000,000 | smb2pdu.c | 338,425,991,842,107,300,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-7145 | The SMB2_tcon function in fs/cifs/smb2pdu.c in the Linux kernel before 3.16.3 allows remote CIFS servers to cause a denial of service (NULL pointer dereference and client system crash) or possibly have unspecified other impact by deleting the IPC$ share during resolution of DFS referrals. | https://nvd.nist.gov/vuln/detail/CVE-2014-7145 |
1,204 | linux | c03aa9f6e1f938618e6db2e23afef0574efeeb65 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/c03aa9f6e1f938618e6db2e23afef0574efeeb65 | udf: Avoid infinite loop when processing indirect ICBs
We did not implement any bound on number of indirect ICBs we follow when
loading inode. Thus corrupted medium could cause kernel to go into an
infinite loop, possibly causing a stack overflow.
Fix the possible stack overflow by removing recursion from
__udf_read_inode() and limit number of indirect ICBs we follow to avoid
infinite loops.
Signed-off-by: Jan Kara <jack@suse.cz> | 1 | static void __udf_read_inode(struct inode *inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
struct extendedFileEntry *efe;
uint16_t ident;
struct udf_inode_info *iinfo = UDF_I(inode);
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
unsigned int link_count;
/*
* Set defaults, but the inode is still incomplete!
* Note: get_new_inode() sets the following on a new inode:
* i_sb = sb
* i_no = ino
* i_flags = sb->s_flags
* i_state = 0
* clean_inode(): zero fills and sets
* i_count = 1
* i_nlink = 1
* i_op = NULL;
*/
bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
if (!bh) {
udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
make_bad_inode(inode);
return;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
ident != TAG_IDENT_USE) {
udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
inode->i_ino, ident);
brelse(bh);
make_bad_inode(inode);
return;
}
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
struct buffer_head *ibh;
ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
&ident);
if (ident == TAG_IDENT_IE && ibh) {
struct buffer_head *nbh = NULL;
struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength &&
(nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
&ident))) {
if (ident == TAG_IDENT_FE ||
ident == TAG_IDENT_EFE) {
memcpy(&iinfo->i_location,
&loc,
sizeof(struct kernel_lb_addr));
brelse(bh);
brelse(ibh);
brelse(nbh);
__udf_read_inode(inode);
return;
}
brelse(nbh);
}
}
brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
udf_err(inode->i_sb, "unsupported strategy type: %d\n",
le16_to_cpu(fe->icbTag.strategyType));
brelse(bh);
make_bad_inode(inode);
return;
}
if (fe->icbTag.strategyType == cpu_to_le16(4))
iinfo->i_strat4096 = 0;
else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
iinfo->i_strat4096 = 1;
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
ICBTAG_FLAG_AD_MASK;
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
iinfo->i_lenAlloc = 0;
iinfo->i_next_alloc_block = 0;
iinfo->i_next_alloc_goal = 0;
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry))) {
make_bad_inode(inode);
return;
}
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct extendedFileEntry),
inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
sizeof(struct fileEntry))) {
make_bad_inode(inode);
return;
}
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct fileEntry),
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
iinfo->i_efe = 0;
iinfo->i_use = 1;
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry))) {
make_bad_inode(inode);
return;
}
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
return;
}
read_lock(&sbi->s_cred_lock);
i_uid_write(inode, le32_to_cpu(fe->uid));
if (!uid_valid(inode->i_uid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
i_gid_write(inode, le32_to_cpu(fe->gid));
if (!gid_valid(inode->i_gid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_fmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_fmode;
else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_dmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_dmode;
else
inode->i_mode = udf_convert_permissions(fe);
inode->i_mode &= ~sbi->s_umask;
read_unlock(&sbi->s_cred_lock);
link_count = le16_to_cpu(fe->fileLinkCount);
if (!link_count)
link_count = 1;
set_nlink(inode, link_count);
inode->i_size = le64_to_cpu(fe->informationLength);
iinfo->i_lenExtents = inode->i_size;
if (iinfo->i_efe == 0) {
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
fe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
} else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
efe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
iinfo->i_crtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
}
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
inode->i_op = &udf_dir_inode_operations;
inode->i_fop = &udf_dir_operations;
inode->i_mode |= S_IFDIR;
inc_nlink(inode);
break;
case ICBTAG_FILE_TYPE_REALTIME:
case ICBTAG_FILE_TYPE_REGULAR:
case ICBTAG_FILE_TYPE_UNDEF:
case ICBTAG_FILE_TYPE_VAT20:
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
inode->i_mode |= S_IFREG;
break;
case ICBTAG_FILE_TYPE_BLOCK:
inode->i_mode |= S_IFBLK;
break;
case ICBTAG_FILE_TYPE_CHAR:
inode->i_mode |= S_IFCHR;
break;
case ICBTAG_FILE_TYPE_FIFO:
init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
break;
case ICBTAG_FILE_TYPE_SOCKET:
init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
break;
case ICBTAG_FILE_TYPE_SYMLINK:
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
inode->i_mode = S_IFLNK | S_IRWXUGO;
break;
case ICBTAG_FILE_TYPE_MAIN:
udf_debug("METADATA FILE-----\n");
break;
case ICBTAG_FILE_TYPE_MIRROR:
udf_debug("METADATA MIRROR FILE-----\n");
break;
case ICBTAG_FILE_TYPE_BITMAP:
udf_debug("METADATA BITMAP FILE-----\n");
break;
default:
udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
inode->i_ino, fe->icbTag.fileType);
make_bad_inode(inode);
return;
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct deviceSpec *dsea =
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
if (dsea) {
init_special_inode(inode, inode->i_mode,
MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
le32_to_cpu(dsea->minorDeviceIdent)));
/* Developer ID ??? */
} else
make_bad_inode(inode);
}
brelse(bh);
}
| 121,718,855,555,196,310,000,000,000,000,000,000,000 | inode.c | 339,312,702,252,410,520,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-6410 | The __udf_read_inode function in fs/udf/inode.c in the Linux kernel through 3.16.3 does not restrict the amount of ICB indirection, which allows physically proximate attackers to cause a denial of service (infinite loop or stack consumption) via a UDF filesystem with a crafted inode. | https://nvd.nist.gov/vuln/detail/CVE-2014-6410 |
1,205 | ettercap | e3abe7d7585ecc420a7cab73313216613aadad5a | https://github.com/Ettercap/ettercap | https://github.com/Ettercap/ettercap/commit/e3abe7d7585ecc420a7cab73313216613aadad5a | Fixed heap overflow caused by length | 1 | FUNC_DECODER(dissector_postgresql)
{
DECLARE_DISP_PTR(ptr);
struct ec_session *s = NULL;
void *ident = NULL;
char tmp[MAX_ASCII_ADDR_LEN];
struct postgresql_status *conn_status;
/* don't complain about unused var */
(void) DECODE_DATA;
(void) DECODE_DATALEN;
(void) DECODED_LEN;
if (FROM_CLIENT("postgresql", PACKET)) {
if (PACKET->DATA.len < 4)
return NULL;
dissect_create_ident(&ident, PACKET, DISSECT_CODE(dissector_postgresql));
/* if the session does not exist... */
if (session_get(&s, ident, DISSECT_IDENT_LEN) == -ENOTFOUND) {
/* search for user and database strings, look for StartupMessage */
unsigned char *u = memmem(ptr, PACKET->DATA.len, "user", 4);
unsigned char *d = memmem(ptr, PACKET->DATA.len, "database", 8);
if (!memcmp(ptr + 4, "\x00\x03\x00\x00", 4) && u && d) {
/* create the new session */
dissect_create_session(&s, PACKET, DISSECT_CODE(dissector_postgresql));
/* remember the state (used later) */
SAFE_CALLOC(s->data, 1, sizeof(struct postgresql_status));
conn_status = (struct postgresql_status *) s->data;
conn_status->status = WAIT_AUTH;
/* user is always null-terminated */
strncpy((char*)conn_status->user, (char*)(u + 5), 65);
conn_status->user[64] = 0;
/* database is always null-terminated */
strncpy((char*)conn_status->database, (char*)(d + 9), 65);
conn_status->database[64] = 0;
/* save the session */
session_put(s);
}
} else {
conn_status = (struct postgresql_status *) s->data;
if (conn_status->status == WAIT_RESPONSE) {
/* check for PasswordMessage packet */
if (ptr[0] == 'p' && conn_status->type == MD5) {
DEBUG_MSG("\tDissector_postgresql RESPONSE type is MD5");
if(memcmp(ptr + 1, "\x00\x00\x00\x28", 4)) {
DEBUG_MSG("\tDissector_postgresql BUG, expected length is 40");
return NULL;
}
if (PACKET->DATA.len < 40) {
DEBUG_MSG("\tDissector_postgresql BUG, expected length is 40");
return NULL;
}
memcpy(conn_status->hash, ptr + 5 + 3, 32);
conn_status->hash[32] = 0;
DISSECT_MSG("%s:$postgres$%s*%s*%s:%s:%d\n", conn_status->user, conn_status->user, conn_status->salt, conn_status->hash, ip_addr_ntoa(&PACKET->L3.dst, tmp), ntohs(PACKET->L4.dst));
dissect_wipe_session(PACKET, DISSECT_CODE(dissector_postgresql));
}
else if (ptr[0] == 'p' && conn_status->type == CT) {
int length;
DEBUG_MSG("\tDissector_postgresql RESPONSE type is clear-text!");
GET_ULONG_BE(length, ptr, 1);
strncpy((char*)conn_status->password, (char*)(ptr + 5), length - 4);
conn_status->password[length - 4] = 0;
DISSECT_MSG("PostgreSQL credentials:%s-%d:%s:%s\n", ip_addr_ntoa(&PACKET->L3.dst, tmp), ntohs(PACKET->L4.dst), conn_status->user, conn_status->password);
dissect_wipe_session(PACKET, DISSECT_CODE(dissector_postgresql));
}
}
}
} else { /* Packets coming from the server */
if (PACKET->DATA.len < 9)
return NULL;
dissect_create_ident(&ident, PACKET, DISSECT_CODE(dissector_postgresql));
if (session_get(&s, ident, DISSECT_IDENT_LEN) == ESUCCESS) {
conn_status = (struct postgresql_status *) s->data;
if (conn_status->status == WAIT_AUTH &&
ptr[0] == 'R' && !memcmp(ptr + 1, "\x00\x00\x00\x0c", 4) &&
!memcmp(ptr + 5, "\x00\x00\x00\x05", 4)) {
conn_status->status = WAIT_RESPONSE;
conn_status->type = MD5;
DEBUG_MSG("\tDissector_postgresql AUTH type is MD5");
hex_encode(ptr + 9, 4, conn_status->salt); /* save salt */
}
else if (conn_status->status == WAIT_AUTH &&
ptr[0] == 'R' && !memcmp(ptr + 1, "\x00\x00\x00\x08", 4) &&
!memcmp(ptr + 5, "\x00\x00\x00\x03", 4)) {
conn_status->status = WAIT_RESPONSE;
conn_status->type = CT;
DEBUG_MSG("\tDissector_postgresql AUTH type is clear-text!");
}
}
}
SAFE_FREE(ident);
return NULL;
}
| 113,723,108,844,572,020,000,000,000,000,000,000,000 | ec_postgresql.c | 75,203,240,410,363,380,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-6395 | Heap-based buffer overflow in the dissector_postgresql function in dissectors/ec_postgresql.c in Ettercap before 0.8.1 allows remote attackers to cause a denial of service or possibly execute arbitrary code via a crafted password length value that is inconsistent with the actual length of the password. | https://nvd.nist.gov/vuln/detail/CVE-2014-6395 |
1,209 | linux | 410dd3cf4c9b36f27ed4542ee18b1af5e68645a4 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/410dd3cf4c9b36f27ed4542ee18b1af5e68645a4 | isofs: Fix unbounded recursion when processing relocated directories
We did not check relocated directory in any way when processing Rock
Ridge 'CL' tag. Thus a corrupted isofs image can possibly have a CL
entry pointing to another CL entry leading to possibly unbounded
recursion in kernel code and thus stack overflow or deadlocks (if there
is a loop created from CL entries).
Fix the problem by not allowing CL entry to point to a directory entry
with CL entry (such use makes no good sense anyway) and by checking
whether CL entry doesn't point to itself.
CC: stable@vger.kernel.org
Reported-by: Chris Evans <cevans@google.com>
Signed-off-by: Jan Kara <jack@suse.cz> | 1 | parse_rock_ridge_inode_internal(struct iso_directory_record *de,
struct inode *inode, int regard_xa)
{
int symlink_len = 0;
int cnt, sig;
struct inode *reloc;
struct rock_ridge *rr;
int rootflag;
struct rock_state rs;
int ret = 0;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
if (regard_xa) {
rs.chr += 14;
rs.len -= 14;
if (rs.len < 0)
rs.len = 0;
}
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
/*
* Ignore rock ridge info if rr->len is out of range, but
* don't return -EIO because that would make the file
* invisible.
*/
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto eio;
rs.chr += rr->len;
rs.len -= rr->len;
/*
* As above, just ignore the rock ridge info if rr->len
* is bogus.
*/
if (rs.len < 0)
goto out; /* Something got screwed up here */
switch (sig) {
#ifndef CONFIG_ZISOFS /* No flag for SF or ZF */
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] &
(RR_PX | RR_TF | RR_SL | RR_CL)) == 0)
goto out;
break;
#endif
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('C', 'E'):
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('E', 'R'):
ISOFS_SB(inode->i_sb)->s_rock = 1;
printk(KERN_DEBUG "ISO 9660 Extensions: ");
{
int p;
for (p = 0; p < rr->u.ER.len_id; p++)
printk("%c", rr->u.ER.data[p]);
}
printk("\n");
break;
case SIG('P', 'X'):
inode->i_mode = isonum_733(rr->u.PX.mode);
set_nlink(inode, isonum_733(rr->u.PX.n_links));
i_uid_write(inode, isonum_733(rr->u.PX.uid));
i_gid_write(inode, isonum_733(rr->u.PX.gid));
break;
case SIG('P', 'N'):
{
int high, low;
high = isonum_733(rr->u.PN.dev_high);
low = isonum_733(rr->u.PN.dev_low);
/*
* The Rock Ridge standard specifies that if
* sizeof(dev_t) <= 4, then the high field is
* unused, and the device number is completely
* stored in the low field. Some writers may
* ignore this subtlety,
* and as a result we test to see if the entire
* device number is
* stored in the low field, and use that.
*/
if ((low & ~0xff) && high == 0) {
inode->i_rdev =
MKDEV(low >> 8, low & 0xff);
} else {
inode->i_rdev =
MKDEV(high, low);
}
}
break;
case SIG('T', 'F'):
/*
* Some RRIP writers incorrectly place ctime in the
* TF_CREATE field. Try to handle this correctly for
* either case.
*/
/* Rock ridge never appears on a High Sierra disk */
cnt = 0;
if (rr->u.TF.flags & TF_CREATE) {
inode->i_ctime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_ctime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_MODIFY) {
inode->i_mtime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_mtime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ACCESS) {
inode->i_atime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_atime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ATTRIBUTES) {
inode->i_ctime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_ctime.tv_nsec = 0;
}
break;
case SIG('S', 'L'):
{
int slen;
struct SL_component *slp;
struct SL_component *oldslp;
slen = rr->len - 5;
slp = &rr->u.SL.link;
inode->i_size = symlink_len;
while (slen > 1) {
rootflag = 0;
switch (slp->flags & ~1) {
case 0:
inode->i_size +=
slp->len;
break;
case 2:
inode->i_size += 1;
break;
case 4:
inode->i_size += 2;
break;
case 8:
rootflag = 1;
inode->i_size += 1;
break;
default:
printk("Symlink component flag "
"not implemented\n");
}
slen -= slp->len + 2;
oldslp = slp;
slp = (struct SL_component *)
(((char *)slp) + slp->len + 2);
if (slen < 2) {
if (((rr->u.SL.
flags & 1) != 0)
&&
((oldslp->
flags & 1) == 0))
inode->i_size +=
1;
break;
}
/*
* If this component record isn't
* continued, then append a '/'.
*/
if (!rootflag
&& (oldslp->flags & 1) == 0)
inode->i_size += 1;
}
}
symlink_len = inode->i_size;
break;
case SIG('R', 'E'):
printk(KERN_WARNING "Attempt to read inode for "
"relocated directory\n");
goto out;
case SIG('C', 'L'):
ISOFS_I(inode)->i_first_extent =
isonum_733(rr->u.CL.location);
reloc =
isofs_iget(inode->i_sb,
ISOFS_I(inode)->i_first_extent,
0);
if (IS_ERR(reloc)) {
ret = PTR_ERR(reloc);
goto out;
}
inode->i_mode = reloc->i_mode;
set_nlink(inode, reloc->i_nlink);
inode->i_uid = reloc->i_uid;
inode->i_gid = reloc->i_gid;
inode->i_rdev = reloc->i_rdev;
inode->i_size = reloc->i_size;
inode->i_blocks = reloc->i_blocks;
inode->i_atime = reloc->i_atime;
inode->i_ctime = reloc->i_ctime;
inode->i_mtime = reloc->i_mtime;
iput(reloc);
break;
#ifdef CONFIG_ZISOFS
case SIG('Z', 'F'): {
int algo;
if (ISOFS_SB(inode->i_sb)->s_nocompress)
break;
algo = isonum_721(rr->u.ZF.algorithm);
if (algo == SIG('p', 'z')) {
int block_shift =
isonum_711(&rr->u.ZF.parms[1]);
if (block_shift > 17) {
printk(KERN_WARNING "isofs: "
"Can't handle ZF block "
"size of 2^%d\n",
block_shift);
} else {
/*
* Note: we don't change
* i_blocks here
*/
ISOFS_I(inode)->i_file_format =
isofs_file_compressed;
/*
* Parameters to compression
* algorithm (header size,
* block size)
*/
ISOFS_I(inode)->i_format_parm[0] =
isonum_711(&rr->u.ZF.parms[0]);
ISOFS_I(inode)->i_format_parm[1] =
isonum_711(&rr->u.ZF.parms[1]);
inode->i_size =
isonum_733(rr->u.ZF.
real_size);
}
} else {
printk(KERN_WARNING
"isofs: Unknown ZF compression "
"algorithm: %c%c\n",
rr->u.ZF.algorithm[0],
rr->u.ZF.algorithm[1]);
}
break;
}
#endif
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret == 1)
ret = 0;
out:
kfree(rs.buffer);
return ret;
eio:
ret = -EIO;
goto out;
}
| 46,643,672,205,519,710,000,000,000,000,000,000,000 | rock.c | 131,672,781,682,898,080,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-5472 | The parse_rock_ridge_inode_internal function in fs/isofs/rock.c in the Linux kernel through 3.16.1 allows local users to cause a denial of service (unkillable mount process) via a crafted iso9660 image with a self-referential CL entry. | https://nvd.nist.gov/vuln/detail/CVE-2014-5472 |
1,211 | krb5 | af0ed4df4dfae762ab5fb605f5a0c8f59cb4f6ca | https://github.com/krb5/krb5 | https://github.com/krb5/krb5/commit/af0ed4df4dfae762ab5fb605f5a0c8f59cb4f6ca | Return only new keys in randkey [CVE-2014-5351]
In kadmind's randkey operation, if a client specifies the keepold
flag, do not include the preserved old keys in the response.
CVE-2014-5351:
An authenticated remote attacker can retrieve the current keys for a
service principal when generating a new set of keys for that
principal. The attacker needs to be authenticated as a user who has
the elevated privilege for randomizing the keys of other principals.
Normally, when a Kerberos administrator randomizes the keys of a
service principal, kadmind returns only the new keys. This prevents
an administrator who lacks legitimate privileged access to a service
from forging tickets to authenticate to that service. If the
"keepold" flag to the kadmin randkey RPC operation is true, kadmind
retains the old keys in the KDC database as intended, but also
unexpectedly returns the old keys to the client, which exposes the
service to ticket forgery attacks from the administrator.
A mitigating factor is that legitimate clients of the affected service
will start failing to authenticate to the service once they begin to
receive service tickets encrypted in the new keys. The affected
service will be unable to decrypt the newly issued tickets, possibly
alerting the legitimate administrator of the affected service.
CVSSv2: AV:N/AC:H/Au:S/C:P/I:N/A:N/E:POC/RL:OF/RC:C
[tlyu@mit.edu: CVE description and CVSS score]
ticket: 8018 (new)
target_version: 1.13
tags: pullup | 1 | kadm5_randkey_principal_3(void *server_handle,
krb5_principal principal,
krb5_boolean keepold,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
krb5_keyblock **keyblocks,
int *n_keys)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_int32 now;
kadm5_policy_ent_rec pol;
int ret, last_pwd;
krb5_boolean have_pol = FALSE;
kadm5_server_handle_t handle = server_handle;
krb5_keyblock *act_mkey;
krb5_kvno act_kvno;
int new_n_ks_tuple = 0;
krb5_key_salt_tuple *new_ks_tuple = NULL;
if (keyblocks)
*keyblocks = NULL;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if (principal == NULL)
return EINVAL;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
ret = apply_keysalt_policy(handle, adb.policy, n_ks_tuple, ks_tuple,
&new_n_ks_tuple, &new_ks_tuple);
if (ret)
goto done;
if (krb5_principal_compare(handle->context, principal, hist_princ)) {
/* If changing the history entry, the new entry must have exactly one
* key. */
if (keepold)
return KADM5_PROTECT_PRINCIPAL;
new_n_ks_tuple = 1;
}
ret = kdb_get_active_mkey(handle, &act_kvno, &act_mkey);
if (ret)
goto done;
ret = krb5_dbe_crk(handle->context, act_mkey, new_ks_tuple, new_n_ks_tuple,
keepold, kdb);
if (ret)
goto done;
ret = krb5_dbe_update_mkvno(handle->context, kdb, act_kvno);
if (ret)
goto done;
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
ret = krb5_timeofday(handle->context, &now);
if (ret)
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
ret = get_policy(handle, adb.policy, &pol, &have_pol);
if (ret)
goto done;
}
if (have_pol) {
ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb, &last_pwd);
if (ret)
goto done;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now);
if (ret)
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
if (keyblocks) {
ret = decrypt_key_data(handle->context,
kdb->n_key_data, kdb->key_data,
keyblocks, n_keys);
if (ret)
goto done;
}
/* key data changed, let the database provider know */
kdb->mask = KADM5_KEY_DATA | KADM5_FAIL_AUTH_COUNT;
/* | KADM5_RANDKEY_USED */;
ret = k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, principal, keepold,
new_n_ks_tuple, new_ks_tuple, NULL);
if (ret)
goto done;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
(void) k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, principal,
keepold, new_n_ks_tuple, new_ks_tuple, NULL);
ret = KADM5_OK;
done:
free(new_ks_tuple);
kdb_free_entry(handle, kdb, &adb);
if (have_pol)
kadm5_free_policy_ent(handle->lhandle, &pol);
return ret;
}
| 147,608,648,129,973,760,000,000,000,000,000,000,000 | svr_principal.c | 212,802,498,518,670,500,000,000,000,000,000,000,000 | [
"CWE-255"
] | CVE-2014-5351 | The kadm5_randkey_principal_3 function in lib/kadm5/srv/svr_principal.c in kadmind in MIT Kerberos 5 (aka krb5) before 1.13 sends old keys in a response to a -randkey -keepold request, which allows remote authenticated users to forge tickets by leveraging administrative access. | https://nvd.nist.gov/vuln/detail/CVE-2014-5351 |
1,217 | linux | 9566d6742852c527bf5af38af5cbb878dad75705 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/9566d6742852c527bf5af38af5cbb878dad75705 | mnt: Correct permission checks in do_remount
While invesgiating the issue where in "mount --bind -oremount,ro ..."
would result in later "mount --bind -oremount,rw" succeeding even if
the mount started off locked I realized that there are several
additional mount flags that should be locked and are not.
In particular MNT_NOSUID, MNT_NODEV, MNT_NOEXEC, and the atime
flags in addition to MNT_READONLY should all be locked. These
flags are all per superblock, can all be changed with MS_BIND,
and should not be changable if set by a more privileged user.
The following additions to the current logic are added in this patch.
- nosuid may not be clearable by a less privileged user.
- nodev may not be clearable by a less privielged user.
- noexec may not be clearable by a less privileged user.
- atime flags may not be changeable by a less privileged user.
The logic with atime is that always setting atime on access is a
global policy and backup software and auditing software could break if
atime bits are not updated (when they are configured to be updated),
and serious performance degradation could result (DOS attack) if atime
updates happen when they have been explicitly disabled. Therefore an
unprivileged user should not be able to mess with the atime bits set
by a more privileged user.
The additional restrictions are implemented with the addition of
MNT_LOCK_NOSUID, MNT_LOCK_NODEV, MNT_LOCK_NOEXEC, and MNT_LOCK_ATIME
mnt flags.
Taken together these changes and the fixes for MNT_LOCK_READONLY
should make it safe for an unprivileged user to create a user
namespace and to call "mount --bind -o remount,... ..." without
the danger of mount flags being changed maliciously.
Cc: stable@vger.kernel.org
Acked-by: Serge E. Hallyn <serge.hallyn@ubuntu.com>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> | 1 | static int do_remount(struct path *path, int flags, int mnt_flags,
void *data)
{
int err;
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
if (!check_mnt(mnt))
return -EINVAL;
if (path->dentry != path->mnt->mnt_root)
return -EINVAL;
/* Don't allow changing of locked mnt flags.
*
* No locks need to be held here while testing the various
* MNT_LOCK flags because those flags can never be cleared
* once they are set.
*/
if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
!(mnt_flags & MNT_READONLY)) {
return -EPERM;
}
err = security_sb_remount(sb, data);
if (err)
return err;
down_write(&sb->s_umount);
if (flags & MS_BIND)
err = change_mount_flags(path->mnt, flags);
else if (!capable(CAP_SYS_ADMIN))
err = -EPERM;
else
err = do_remount_sb(sb, flags, data, 0);
if (!err) {
lock_mount_hash();
mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
mnt->mnt.mnt_flags = mnt_flags;
touch_mnt_namespace(mnt->mnt_ns);
unlock_mount_hash();
}
up_write(&sb->s_umount);
return err;
}
| 67,643,344,520,913,290,000,000,000,000,000,000,000 | namespace.c | 248,678,835,224,379,800,000,000,000,000,000,000,000 | [
"CWE-264"
] | CVE-2014-5207 | fs/namespace.c in the Linux kernel through 3.16.1 does not properly restrict clearing MNT_NODEV, MNT_NOSUID, and MNT_NOEXEC and changing MNT_ATIME_MASK during a remount of a bind mount, which allows local users to gain privileges, interfere with backups and auditing on systems that had atime enabled, or cause a denial of service (excessive filesystem updating) on systems that had atime disabled via a "mount -o remount" command within a user namespace. | https://nvd.nist.gov/vuln/detail/CVE-2014-5207 |
1,218 | linux | a6138db815df5ee542d848318e5dae681590fccd | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/a6138db815df5ee542d848318e5dae681590fccd | mnt: Only change user settable mount flags in remount
Kenton Varda <kenton@sandstorm.io> discovered that by remounting a
read-only bind mount read-only in a user namespace the
MNT_LOCK_READONLY bit would be cleared, allowing an unprivileged user
to the remount a read-only mount read-write.
Correct this by replacing the mask of mount flags to preserve
with a mask of mount flags that may be changed, and preserve
all others. This ensures that any future bugs with this mask and
remount will fail in an easy to detect way where new mount flags
simply won't change.
Cc: stable@vger.kernel.org
Acked-by: Serge E. Hallyn <serge.hallyn@ubuntu.com>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> | 1 | static int do_remount(struct path *path, int flags, int mnt_flags,
void *data)
{
int err;
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
if (!check_mnt(mnt))
return -EINVAL;
if (path->dentry != path->mnt->mnt_root)
return -EINVAL;
err = security_sb_remount(sb, data);
if (err)
return err;
down_write(&sb->s_umount);
if (flags & MS_BIND)
err = change_mount_flags(path->mnt, flags);
else if (!capable(CAP_SYS_ADMIN))
err = -EPERM;
else
err = do_remount_sb(sb, flags, data, 0);
if (!err) {
lock_mount_hash();
mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
mnt->mnt.mnt_flags = mnt_flags;
touch_mnt_namespace(mnt->mnt_ns);
unlock_mount_hash();
}
up_write(&sb->s_umount);
return err;
}
| 251,245,953,508,821,370,000,000,000,000,000,000,000 | namespace.c | 87,954,754,859,779,580,000,000,000,000,000,000,000 | [
"CWE-264"
] | CVE-2014-5206 | The do_remount function in fs/namespace.c in the Linux kernel through 3.16.1 does not maintain the MNT_LOCK_READONLY bit across a remount of a bind mount, which allows local users to bypass an intended read-only restriction and defeat certain sandbox protection mechanisms via a "mount -o remount" command within a user namespace. | https://nvd.nist.gov/vuln/detail/CVE-2014-5206 |
1,219 | linux | 1be9a950c646c9092fb3618197f7b6bfb50e82aa | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/1be9a950c646c9092fb3618197f7b6bfb50e82aa | net: sctp: inherit auth_capable on INIT collisions
Jason reported an oops caused by SCTP on his ARM machine with
SCTP authentication enabled:
Internal error: Oops: 17 [#1] ARM
CPU: 0 PID: 104 Comm: sctp-test Not tainted 3.13.0-68744-g3632f30c9b20-dirty #1
task: c6eefa40 ti: c6f52000 task.ti: c6f52000
PC is at sctp_auth_calculate_hmac+0xc4/0x10c
LR is at sg_init_table+0x20/0x38
pc : [<c024bb80>] lr : [<c00f32dc>] psr: 40000013
sp : c6f538e8 ip : 00000000 fp : c6f53924
r10: c6f50d80 r9 : 00000000 r8 : 00010000
r7 : 00000000 r6 : c7be4000 r5 : 00000000 r4 : c6f56254
r3 : c00c8170 r2 : 00000001 r1 : 00000008 r0 : c6f1e660
Flags: nZcv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user
Control: 0005397f Table: 06f28000 DAC: 00000015
Process sctp-test (pid: 104, stack limit = 0xc6f521c0)
Stack: (0xc6f538e8 to 0xc6f54000)
[...]
Backtrace:
[<c024babc>] (sctp_auth_calculate_hmac+0x0/0x10c) from [<c0249af8>] (sctp_packet_transmit+0x33c/0x5c8)
[<c02497bc>] (sctp_packet_transmit+0x0/0x5c8) from [<c023e96c>] (sctp_outq_flush+0x7fc/0x844)
[<c023e170>] (sctp_outq_flush+0x0/0x844) from [<c023ef78>] (sctp_outq_uncork+0x24/0x28)
[<c023ef54>] (sctp_outq_uncork+0x0/0x28) from [<c0234364>] (sctp_side_effects+0x1134/0x1220)
[<c0233230>] (sctp_side_effects+0x0/0x1220) from [<c02330b0>] (sctp_do_sm+0xac/0xd4)
[<c0233004>] (sctp_do_sm+0x0/0xd4) from [<c023675c>] (sctp_assoc_bh_rcv+0x118/0x160)
[<c0236644>] (sctp_assoc_bh_rcv+0x0/0x160) from [<c023d5bc>] (sctp_inq_push+0x6c/0x74)
[<c023d550>] (sctp_inq_push+0x0/0x74) from [<c024a6b0>] (sctp_rcv+0x7d8/0x888)
While we already had various kind of bugs in that area
ec0223ec48a9 ("net: sctp: fix sctp_sf_do_5_1D_ce to verify if
we/peer is AUTH capable") and b14878ccb7fa ("net: sctp: cache
auth_enable per endpoint"), this one is a bit of a different
kind.
Giving a bit more background on why SCTP authentication is
needed can be found in RFC4895:
SCTP uses 32-bit verification tags to protect itself against
blind attackers. These values are not changed during the
lifetime of an SCTP association.
Looking at new SCTP extensions, there is the need to have a
method of proving that an SCTP chunk(s) was really sent by
the original peer that started the association and not by a
malicious attacker.
To cause this bug, we're triggering an INIT collision between
peers; normal SCTP handshake where both sides intent to
authenticate packets contains RANDOM; CHUNKS; HMAC-ALGO
parameters that are being negotiated among peers:
---------- INIT[RANDOM; CHUNKS; HMAC-ALGO] ---------->
<------- INIT-ACK[RANDOM; CHUNKS; HMAC-ALGO] ---------
-------------------- COOKIE-ECHO -------------------->
<-------------------- COOKIE-ACK ---------------------
RFC4895 says that each endpoint therefore knows its own random
number and the peer's random number *after* the association
has been established. The local and peer's random number along
with the shared key are then part of the secret used for
calculating the HMAC in the AUTH chunk.
Now, in our scenario, we have 2 threads with 1 non-blocking
SEQ_PACKET socket each, setting up common shared SCTP_AUTH_KEY
and SCTP_AUTH_ACTIVE_KEY properly, and each of them calling
sctp_bindx(3), listen(2) and connect(2) against each other,
thus the handshake looks similar to this, e.g.:
---------- INIT[RANDOM; CHUNKS; HMAC-ALGO] ---------->
<------- INIT-ACK[RANDOM; CHUNKS; HMAC-ALGO] ---------
<--------- INIT[RANDOM; CHUNKS; HMAC-ALGO] -----------
-------- INIT-ACK[RANDOM; CHUNKS; HMAC-ALGO] -------->
...
Since such collisions can also happen with verification tags,
the RFC4895 for AUTH rather vaguely says under section 6.1:
In case of INIT collision, the rules governing the handling
of this Random Number follow the same pattern as those for
the Verification Tag, as explained in Section 5.2.4 of
RFC 2960 [5]. Therefore, each endpoint knows its own Random
Number and the peer's Random Number after the association
has been established.
In RFC2960, section 5.2.4, we're eventually hitting Action B:
B) In this case, both sides may be attempting to start an
association at about the same time but the peer endpoint
started its INIT after responding to the local endpoint's
INIT. Thus it may have picked a new Verification Tag not
being aware of the previous Tag it had sent this endpoint.
The endpoint should stay in or enter the ESTABLISHED
state but it MUST update its peer's Verification Tag from
the State Cookie, stop any init or cookie timers that may
running and send a COOKIE ACK.
In other words, the handling of the Random parameter is the
same as behavior for the Verification Tag as described in
Action B of section 5.2.4.
Looking at the code, we exactly hit the sctp_sf_do_dupcook_b()
case which triggers an SCTP_CMD_UPDATE_ASSOC command to the
side effect interpreter, and in fact it properly copies over
peer_{random, hmacs, chunks} parameters from the newly created
association to update the existing one.
Also, the old asoc_shared_key is being released and based on
the new params, sctp_auth_asoc_init_active_key() updated.
However, the issue observed in this case is that the previous
asoc->peer.auth_capable was 0, and has *not* been updated, so
that instead of creating a new secret, we're doing an early
return from the function sctp_auth_asoc_init_active_key()
leaving asoc->asoc_shared_key as NULL. However, we now have to
authenticate chunks from the updated chunk list (e.g. COOKIE-ACK).
That in fact causes the server side when responding with ...
<------------------ AUTH; COOKIE-ACK -----------------
... to trigger a NULL pointer dereference, since in
sctp_packet_transmit(), it discovers that an AUTH chunk is
being queued for xmit, and thus it calls sctp_auth_calculate_hmac().
Since the asoc->active_key_id is still inherited from the
endpoint, and the same as encoded into the chunk, it uses
asoc->asoc_shared_key, which is still NULL, as an asoc_key
and dereferences it in ...
crypto_hash_setkey(desc.tfm, &asoc_key->data[0], asoc_key->len)
... causing an oops. All this happens because sctp_make_cookie_ack()
called with the *new* association has the peer.auth_capable=1
and therefore marks the chunk with auth=1 after checking
sctp_auth_send_cid(), but it is *actually* sent later on over
the then *updated* association's transport that didn't initialize
its shared key due to peer.auth_capable=0. Since control chunks
in that case are not sent by the temporary association which
are scheduled for deletion, they are issued for xmit via
SCTP_CMD_REPLY in the interpreter with the context of the
*updated* association. peer.auth_capable was 0 in the updated
association (which went from COOKIE_WAIT into ESTABLISHED state),
since all previous processing that performed sctp_process_init()
was being done on temporary associations, that we eventually
throw away each time.
The correct fix is to update to the new peer.auth_capable
value as well in the collision case via sctp_assoc_update(),
so that in case the collision migrated from 0 -> 1,
sctp_auth_asoc_init_active_key() can properly recalculate
the secret. This therefore fixes the observed server panic.
Fixes: 730fc3d05cd4 ("[SCTP]: Implete SCTP-AUTH parameter processing")
Reported-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Tested-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
Cc: Vlad Yasevich <vyasevich@gmail.com>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | void sctp_assoc_update(struct sctp_association *asoc,
struct sctp_association *new)
{
struct sctp_transport *trans;
struct list_head *pos, *temp;
/* Copy in new parameters of peer. */
asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd;
asoc->peer.sack_needed = new->peer.sack_needed;
asoc->peer.i = new->peer.i;
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
asoc->peer.i.initial_tsn, GFP_ATOMIC);
/* Remove any peer addresses not present in the new association. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
trans = list_entry(pos, struct sctp_transport, transports);
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
sctp_assoc_rm_peer(asoc, trans);
continue;
}
if (asoc->state >= SCTP_STATE_ESTABLISHED)
sctp_transport_reset(trans);
}
/* If the case is A (association restart), use
* initial_tsn as next_tsn. If the case is B, use
* current next_tsn in case data sent to peer
* has been discarded and needs retransmission.
*/
if (asoc->state >= SCTP_STATE_ESTABLISHED) {
asoc->next_tsn = new->next_tsn;
asoc->ctsn_ack_point = new->ctsn_ack_point;
asoc->adv_peer_ack_point = new->adv_peer_ack_point;
/* Reinitialize SSN for both local streams
* and peer's streams.
*/
sctp_ssnmap_clear(asoc->ssnmap);
/* Flush the ULP reassembly and ordered queue.
* Any data there will now be stale and will
* cause problems.
*/
sctp_ulpq_flush(&asoc->ulpq);
/* reset the overall association error count so
* that the restarted association doesn't get torn
* down on the next retransmission timer.
*/
asoc->overall_error_count = 0;
} else {
/* Add any peer addresses from the new association. */
list_for_each_entry(trans, &new->peer.transport_addr_list,
transports) {
if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
sctp_assoc_add_peer(asoc, &trans->ipaddr,
GFP_ATOMIC, trans->state);
}
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
if (!asoc->ssnmap) {
/* Move the ssnmap. */
asoc->ssnmap = new->ssnmap;
new->ssnmap = NULL;
}
if (!asoc->assoc_id) {
/* get a new association id since we don't have one
* yet.
*/
sctp_assoc_set_id(asoc, GFP_ATOMIC);
}
}
/* SCTP-AUTH: Save the peer parameters from the new associations
* and also move the association shared keys over
*/
kfree(asoc->peer.peer_random);
asoc->peer.peer_random = new->peer.peer_random;
new->peer.peer_random = NULL;
kfree(asoc->peer.peer_chunks);
asoc->peer.peer_chunks = new->peer.peer_chunks;
new->peer.peer_chunks = NULL;
kfree(asoc->peer.peer_hmacs);
asoc->peer.peer_hmacs = new->peer.peer_hmacs;
new->peer.peer_hmacs = NULL;
sctp_auth_key_put(asoc->asoc_shared_key);
sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
}
| 268,873,015,146,939,470,000,000,000,000,000,000,000 | associola.c | 290,607,751,015,410,100,000,000,000,000,000,000,000 | [
"CWE-476"
] | CVE-2014-5077 | The sctp_assoc_update function in net/sctp/associola.c in the Linux kernel through 3.15.8, when SCTP authentication is enabled, allows remote attackers to cause a denial of service (NULL pointer dereference and OOPS) by starting to establish an association between two endpoints immediately after an exchange of INIT and INIT ACK chunks to establish an earlier association between these endpoints in the opposite direction. | https://nvd.nist.gov/vuln/detail/CVE-2014-5077 |
1,220 | linux | 295dc39d941dc2ae53d5c170365af4c9d5c16212 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/295dc39d941dc2ae53d5c170365af4c9d5c16212 | fs: umount on symlink leaks mnt count
Currently umount on symlink blocks following umount:
/vz is separate mount
# ls /vz/ -al | grep test
drwxr-xr-x. 2 root root 4096 Jul 19 01:14 testdir
lrwxrwxrwx. 1 root root 11 Jul 19 01:16 testlink -> /vz/testdir
# umount -l /vz/testlink
umount: /vz/testlink: not mounted (expected)
# lsof /vz
# umount /vz
umount: /vz: device is busy. (unexpected)
In this case mountpoint_last() gets an extra refcount on path->mnt
Signed-off-by: Vasily Averin <vvs@openvz.org>
Acked-by: Ian Kent <raven@themaw.net>
Acked-by: Jeff Layton <jlayton@primarydata.com>
Cc: stable@vger.kernel.org
Signed-off-by: Christoph Hellwig <hch@lst.de> | 1 | mountpoint_last(struct nameidata *nd, struct path *path)
{
int error = 0;
struct dentry *dentry;
struct dentry *dir = nd->path.dentry;
/* If we're in rcuwalk, drop out of it to handle last component */
if (nd->flags & LOOKUP_RCU) {
if (unlazy_walk(nd, NULL)) {
error = -ECHILD;
goto out;
}
}
nd->flags &= ~LOOKUP_PARENT;
if (unlikely(nd->last_type != LAST_NORM)) {
error = handle_dots(nd, nd->last_type);
if (error)
goto out;
dentry = dget(nd->path.dentry);
goto done;
}
mutex_lock(&dir->d_inode->i_mutex);
dentry = d_lookup(dir, &nd->last);
if (!dentry) {
/*
* No cached dentry. Mounted dentries are pinned in the cache,
* so that means that this dentry is probably a symlink or the
* path doesn't actually point to a mounted dentry.
*/
dentry = d_alloc(dir, &nd->last);
if (!dentry) {
error = -ENOMEM;
mutex_unlock(&dir->d_inode->i_mutex);
goto out;
}
dentry = lookup_real(dir->d_inode, dentry, nd->flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry)) {
mutex_unlock(&dir->d_inode->i_mutex);
goto out;
}
}
mutex_unlock(&dir->d_inode->i_mutex);
done:
if (!dentry->d_inode || d_is_negative(dentry)) {
error = -ENOENT;
dput(dentry);
goto out;
}
path->dentry = dentry;
path->mnt = mntget(nd->path.mnt);
if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
return 1;
follow_mount(path);
error = 0;
out:
terminate_walk(nd);
return error;
}
| 289,527,718,022,341,300,000,000,000,000,000,000,000 | namei.c | 135,878,708,536,746,050,000,000,000,000,000,000,000 | [
"CWE-59"
] | CVE-2014-5045 | The mountpoint_last function in fs/namei.c in the Linux kernel before 3.15.8 does not properly maintain a certain reference count during attempts to use the umount system call in conjunction with a symlink, which allows local users to cause a denial of service (memory consumption or use-after-free) or possibly have unspecified other impact via the umount program. | https://nvd.nist.gov/vuln/detail/CVE-2014-5045 |
1,223 | linux | d3217b15a19a4779c39b212358a5c71d725822ee | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/d3217b15a19a4779c39b212358a5c71d725822ee | sctp: Fix sk_ack_backlog wrap-around problem
Consider the scenario:
For a TCP-style socket, while processing the COOKIE_ECHO chunk in
sctp_sf_do_5_1D_ce(), after it has passed a series of sanity check,
a new association would be created in sctp_unpack_cookie(), but afterwards,
some processing maybe failed, and sctp_association_free() will be called to
free the previously allocated association, in sctp_association_free(),
sk_ack_backlog value is decremented for this socket, since the initial
value for sk_ack_backlog is 0, after the decrement, it will be 65535,
a wrap-around problem happens, and if we want to establish new associations
afterward in the same socket, ABORT would be triggered since sctp deem the
accept queue as full.
Fix this issue by only decrementing sk_ack_backlog for associations in
the endpoint's list.
Fix-suggested-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: Xufeng Zhang <xufeng.zhang@windriver.com>
Acked-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | void sctp_association_free(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
struct sctp_transport *transport;
struct list_head *pos, *temp;
int i;
/* Only real associations count against the endpoint, so
* don't bother for if this is a temporary association.
*/
if (!asoc->temp) {
list_del(&asoc->asocs);
/* Decrement the backlog value for a TCP-style listening
* socket.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
sk->sk_ack_backlog--;
}
/* Mark as dead, so other users can know this structure is
* going away.
*/
asoc->base.dead = true;
/* Dispose of any data lying around in the outqueue. */
sctp_outq_free(&asoc->outqueue);
/* Dispose of any pending messages for the upper layer. */
sctp_ulpq_free(&asoc->ulpq);
/* Dispose of any pending chunks on the inqueue. */
sctp_inq_free(&asoc->base.inqueue);
sctp_tsnmap_free(&asoc->peer.tsn_map);
/* Free ssnmap storage. */
sctp_ssnmap_free(asoc->ssnmap);
/* Clean up the bound address list. */
sctp_bind_addr_free(&asoc->base.bind_addr);
/* Do we need to go through all of our timers and
* delete them? To be safe we will try to delete all, but we
* should be able to go through and make a guess based
* on our state.
*/
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
if (del_timer(&asoc->timers[i]))
sctp_association_put(asoc);
}
/* Free peer's cached cookie. */
kfree(asoc->peer.cookie);
kfree(asoc->peer.peer_random);
kfree(asoc->peer.peer_chunks);
kfree(asoc->peer.peer_hmacs);
/* Release the transport structures. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports);
list_del_rcu(pos);
sctp_transport_free(transport);
}
asoc->peer.transport_count = 0;
sctp_asconf_queue_teardown(asoc);
/* Free pending address space being deleted */
if (asoc->asconf_addr_del_pending != NULL)
kfree(asoc->asconf_addr_del_pending);
/* AUTH - Free the endpoint shared keys */
sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
/* AUTH - Free the association shared key */
sctp_auth_key_put(asoc->asoc_shared_key);
sctp_association_put(asoc);
}
| 307,155,962,389,758,070,000,000,000,000,000,000,000 | None | null | [
"CWE-20"
] | CVE-2014-4667 | The sctp_association_free function in net/sctp/associola.c in the Linux kernel before 3.15.2 does not properly manage a certain backlog value, which allows remote attackers to cause a denial of service (socket outage) via a crafted SCTP packet. | https://nvd.nist.gov/vuln/detail/CVE-2014-4667 |
1,224 | linux | ac902c112d90a89e59916f751c2745f4dbdbb4bd | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/ac902c112d90a89e59916f751c2745f4dbdbb4bd | ALSA: control: Handle numid overflow
Each control gets automatically assigned its numids when the control is created.
The allocation is done by incrementing the numid by the amount of allocated
numids per allocation. This means that excessive creation and destruction of
controls (e.g. via SNDRV_CTL_IOCTL_ELEM_ADD/REMOVE) can cause the id to
eventually overflow. Currently when this happens for the control that caused the
overflow kctl->id.numid + kctl->count will also over flow causing it to be
smaller than kctl->id.numid. Most of the code assumes that this is something
that can not happen, so we need to make sure that it won't happen
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Acked-by: Jaroslav Kysela <perex@perex.cz>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de> | 1 | static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
unsigned int count)
{
struct snd_kcontrol *kctl;
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid < card->last_numid + 1 + count &&
kctl->id.numid + kctl->count > card->last_numid + 1) {
card->last_numid = kctl->id.numid + kctl->count - 1;
return true;
}
}
return false;
}
| 82,969,020,191,262,430,000,000,000,000,000,000,000 | control.c | 65,123,648,022,085,850,000,000,000,000,000,000,000 | [
"CWE-189"
] | CVE-2014-4656 | Multiple integer overflows in sound/core/control.c in the ALSA control implementation in the Linux kernel before 3.15.2 allow local users to cause a denial of service by leveraging /dev/snd/controlCX access, related to (1) index values in the snd_ctl_add function and (2) numid values in the snd_ctl_remove_numid_conflict function. | https://nvd.nist.gov/vuln/detail/CVE-2014-4656 |
1,225 | linux | 82262a46627bebb0febcc26664746c25cef08563 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/82262a46627bebb0febcc26664746c25cef08563 | ALSA: control: Fix replacing user controls
There are two issues with the current implementation for replacing user
controls. The first is that the code does not check if the control is actually a
user control and neither does it check if the control is owned by the process
that tries to remove it. That allows userspace applications to remove arbitrary
controls, which can cause a user after free if a for example a driver does not
expect a control to be removed from under its feed.
The second issue is that on one hand when a control is replaced the
user_ctl_count limit is not checked and on the other hand the user_ctl_count is
increased (even though the number of user controls does not change). This allows
userspace, once the user_ctl_count limit as been reached, to repeatedly replace
a control until user_ctl_count overflows. Once that happens new controls can be
added effectively bypassing the user_ctl_count limit.
Both issues can be fixed by instead of open-coding the removal of the control
that is to be replaced to use snd_ctl_remove_user_ctl(). This function does
proper permission checks as well as decrements user_ctl_count after the control
has been removed.
Note that by using snd_ctl_remove_user_ctl() the check which returns -EBUSY at
beginning of the function if the control already exists is removed. This is not
a problem though since the check is quite useless, because the lock that is
protecting the control list is released between the check and before adding the
new control to the list, which means that it is possible that a different
control with the same settings is added to the list after the check. Luckily
there is another check that is done while holding the lock in snd_ctl_add(), so
we'll rely on that to make sure that the same control is not added twice.
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Acked-by: Jaroslav Kysela <perex@perex.cz>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de> | 1 | static int snd_ctl_elem_add(struct snd_ctl_file *file,
struct snd_ctl_elem_info *info, int replace)
{
struct snd_card *card = file->card;
struct snd_kcontrol kctl, *_kctl;
unsigned int access;
long private_size;
struct user_element *ue;
int idx, err;
if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
return -ENOMEM;
if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
info->id.numid = 0;
memset(&kctl, 0, sizeof(kctl));
down_write(&card->controls_rwsem);
_kctl = snd_ctl_find_id(card, &info->id);
err = 0;
if (_kctl) {
if (replace)
err = snd_ctl_remove(card, _kctl);
else
err = -EBUSY;
} else {
if (replace)
err = -ENOENT;
}
up_write(&card->controls_rwsem);
if (err < 0)
return err;
memcpy(&kctl.id, &info->id, sizeof(info->id));
kctl.count = info->owner ? info->owner : 1;
access |= SNDRV_CTL_ELEM_ACCESS_USER;
if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED)
kctl.info = snd_ctl_elem_user_enum_info;
else
kctl.info = snd_ctl_elem_user_info;
if (access & SNDRV_CTL_ELEM_ACCESS_READ)
kctl.get = snd_ctl_elem_user_get;
if (access & SNDRV_CTL_ELEM_ACCESS_WRITE)
kctl.put = snd_ctl_elem_user_put;
if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) {
kctl.tlv.c = snd_ctl_elem_user_tlv;
access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
switch (info->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
case SNDRV_CTL_ELEM_TYPE_INTEGER:
private_size = sizeof(long);
if (info->count > 128)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
private_size = sizeof(long long);
if (info->count > 64)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
private_size = sizeof(unsigned int);
if (info->count > 128 || info->value.enumerated.items == 0)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_BYTES:
private_size = sizeof(unsigned char);
if (info->count > 512)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_IEC958:
private_size = sizeof(struct snd_aes_iec958);
if (info->count != 1)
return -EINVAL;
break;
default:
return -EINVAL;
}
private_size *= info->count;
ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
if (ue == NULL)
return -ENOMEM;
ue->card = card;
ue->info = *info;
ue->info.access = 0;
ue->elem_data = (char *)ue + sizeof(*ue);
ue->elem_data_size = private_size;
if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) {
err = snd_ctl_elem_init_enum_names(ue);
if (err < 0) {
kfree(ue);
return err;
}
}
kctl.private_free = snd_ctl_elem_user_free;
_kctl = snd_ctl_new(&kctl, access);
if (_kctl == NULL) {
kfree(ue->priv_data);
kfree(ue);
return -ENOMEM;
}
_kctl->private_data = ue;
for (idx = 0; idx < _kctl->count; idx++)
_kctl->vd[idx].owner = file;
err = snd_ctl_add(card, _kctl);
if (err < 0)
return err;
down_write(&card->controls_rwsem);
card->user_ctl_count++;
up_write(&card->controls_rwsem);
return 0;
}
| 104,448,511,318,866,040,000,000,000,000,000,000,000 | control.c | 234,091,425,417,038,060,000,000,000,000,000,000,000 | [
"CWE-189"
] | CVE-2014-4654 | The snd_ctl_elem_add function in sound/core/control.c in the ALSA control implementation in the Linux kernel before 3.15.2 does not check authorization for SNDRV_CTL_IOCTL_ELEM_REPLACE commands, which allows local users to remove kernel controls and cause a denial of service (use-after-free and system crash) by leveraging /dev/snd/controlCX access for an ioctl call. | https://nvd.nist.gov/vuln/detail/CVE-2014-4654 |
1,233 | linux | 206204a1162b995e2185275167b22468c00d6b36 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/206204a1162b995e2185275167b22468c00d6b36 | lz4: ensure length does not wrap
Given some pathologically compressed data, lz4 could possibly decide to
wrap a few internal variables, causing unknown things to happen. Catch
this before the wrapping happens and abort the decompression.
Reported-by: "Don A. Bailey" <donb@securitymouse.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 1 | static int lz4_uncompress(const char *source, char *dest, int osize)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *ref;
BYTE *op = (BYTE *) dest;
BYTE * const oend = op + osize;
BYTE *cpy;
unsigned token;
size_t length;
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
while (1) {
/* get runlength */
token = *ip++;
length = (token >> ML_BITS);
if (length == RUN_MASK) {
size_t len;
len = *ip++;
for (; len == 255; length += 255)
len = *ip++;
length += len;
}
/* copy literals */
cpy = op + length;
if (unlikely(cpy > oend - COPYLENGTH)) {
/*
* Error: not enough place for another match
* (min 4) + 5 literals
*/
if (cpy != oend)
goto _output_error;
memcpy(op, ip, length);
ip += length;
break; /* EOF */
}
LZ4_WILDCOPY(ip, op, cpy);
ip -= (op - cpy);
op = cpy;
/* get offset */
LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
ip += 2;
/* Error: offset create reference outside destination buffer */
if (unlikely(ref < (BYTE *const) dest))
goto _output_error;
/* get matchlength */
length = token & ML_MASK;
if (length == ML_MASK) {
for (; *ip == 255; length += 255)
ip++;
length += *ip++;
}
/* copy repeated sequence */
if (unlikely((op - ref) < STEPSIZE)) {
#if LZ4_ARCH64
size_t dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4;
ref += 4;
ref -= dec32table[op-ref];
PUT4(ref, op);
op += STEPSIZE - 4;
ref -= dec64;
} else {
LZ4_COPYSTEP(ref, op);
}
cpy = op + length - (STEPSIZE - 4);
if (cpy > (oend - COPYLENGTH)) {
/* Error: request to write beyond destination buffer */
if (cpy > oend)
goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
op = cpy;
/*
* Check EOF (should never happen, since last 5 bytes
* are supposed to be literals)
*/
if (op == oend)
goto _output_error;
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
op = cpy; /* correction */
}
/* end of decoding */
return (int) (((char *)ip) - source);
/* write overflow error detected */
_output_error:
return (int) (-(((char *)ip) - source));
}
| 88,608,013,649,608,300,000,000,000,000,000,000,000 | None | null | [
"CWE-20"
] | CVE-2014-4611 | Integer overflow in the LZ4 algorithm implementation, as used in Yann Collet LZ4 before r118 and in the lz4_uncompress function in lib/lz4/lz4_decompress.c in the Linux kernel before 3.15.2, on 32-bit platforms might allow context-dependent attackers to cause a denial of service (memory corruption) or possibly have unspecified other impact via a crafted Literal Run that would be improperly handled by programs not complying with an API limitation, a different vulnerability than CVE-2014-4715. | https://nvd.nist.gov/vuln/detail/CVE-2014-4611 |
1,234 | linux | 206a81c18401c0cde6e579164f752c4b147324ce | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/206a81c18401c0cde6e579164f752c4b147324ce | lzo: properly check for overruns
The lzo decompressor can, if given some really crazy data, possibly
overrun some variable types. Modify the checking logic to properly
detect overruns before they happen.
Reported-by: "Don A. Bailey" <donb@securitymouse.com>
Tested-by: "Don A. Bailey" <donb@securitymouse.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 1 | int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
{
unsigned char *op;
const unsigned char *ip;
size_t t, next;
size_t state = 0;
const unsigned char *m_pos;
const unsigned char * const ip_end = in + in_len;
unsigned char * const op_end = out + *out_len;
op = out;
ip = in;
if (unlikely(in_len < 3))
goto input_overrun;
if (*ip > 17) {
t = *ip++ - 17;
if (t < 4) {
next = t;
goto match_next;
}
goto copy_literal_run;
}
for (;;) {
t = *ip++;
if (t < 16) {
if (likely(state == 0)) {
if (unlikely(t == 0)) {
while (unlikely(*ip == 0)) {
t += 255;
ip++;
NEED_IP(1);
}
t += 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
COPY8(op, ip);
op += 8;
ip += 8;
COPY8(op, ip);
op += 8;
ip += 8;
} while (ip < ie);
ip = ie;
op = oe;
} else
#endif
{
NEED_OP(t);
NEED_IP(t + 3);
do {
*op++ = *ip++;
} while (--t > 0);
}
state = 4;
continue;
} else if (state != 4) {
next = t & 3;
m_pos = op - 1;
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
NEED_OP(2);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
goto match_next;
} else {
next = t & 3;
m_pos = op - (1 + M2_MAX_OFFSET);
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
t = 3;
}
} else if (t >= 64) {
next = t & 3;
m_pos = op - 1;
m_pos -= (t >> 2) & 7;
m_pos -= *ip++ << 3;
t = (t >> 5) - 1 + (3 - 1);
} else if (t >= 32) {
t = (t & 31) + (3 - 1);
if (unlikely(t == 2)) {
while (unlikely(*ip == 0)) {
t += 255;
ip++;
NEED_IP(1);
}
t += 31 + *ip++;
NEED_IP(2);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
ip += 2;
m_pos -= next >> 2;
next &= 3;
} else {
m_pos = op;
m_pos -= (t & 8) << 11;
t = (t & 7) + (3 - 1);
if (unlikely(t == 2)) {
while (unlikely(*ip == 0)) {
t += 255;
ip++;
NEED_IP(1);
}
t += 7 + *ip++;
NEED_IP(2);
}
next = get_unaligned_le16(ip);
ip += 2;
m_pos -= next >> 2;
next &= 3;
if (m_pos == op)
goto eof_found;
m_pos -= 0x4000;
}
TEST_LB(m_pos);
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
if (likely(HAVE_OP(t + 15))) {
do {
COPY8(op, m_pos);
op += 8;
m_pos += 8;
COPY8(op, m_pos);
op += 8;
m_pos += 8;
} while (op < oe);
op = oe;
if (HAVE_IP(6)) {
state = next;
COPY4(op, ip);
op += next;
ip += next;
continue;
}
} else {
NEED_OP(t);
do {
*op++ = *m_pos++;
} while (op < oe);
}
} else
#endif
{
unsigned char *oe = op + t;
NEED_OP(t);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
m_pos += 2;
do {
*op++ = *m_pos++;
} while (op < oe);
}
match_next:
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (likely(HAVE_IP(6) && HAVE_OP(4))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
NEED_IP(t + 3);
NEED_OP(t);
while (t > 0) {
*op++ = *ip++;
t--;
}
}
}
eof_found:
*out_len = op - out;
return (t != 3 ? LZO_E_ERROR :
ip == ip_end ? LZO_E_OK :
ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN);
input_overrun:
*out_len = op - out;
return LZO_E_INPUT_OVERRUN;
output_overrun:
*out_len = op - out;
return LZO_E_OUTPUT_OVERRUN;
lookbehind_overrun:
*out_len = op - out;
return LZO_E_LOOKBEHIND_OVERRUN;
}
| 274,406,254,790,435,130,000,000,000,000,000,000,000 | lzo1x_decompress_safe.c | 59,242,432,641,946,220,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-4608 | Multiple integer overflows in the lzo1x_decompress_safe function in lib/lzo/lzo1x_decompress_safe.c in the LZO decompressor in the Linux kernel before 3.15.2 allow context-dependent attackers to cause a denial of service (memory corruption) via a crafted Literal Run. NOTE: the author of the LZO algorithms says "the Linux kernel is *not* affected; media hype. | https://nvd.nist.gov/vuln/detail/CVE-2014-4608 |
1,235 | sgminer | 910c36089940e81fb85c65b8e63dcd2fac71470c | https://github.com/sgminer-dev/sgminer | https://github.com/sgminer-dev/sgminer/commit/910c36089940e81fb85c65b8e63dcd2fac71470c | stratum: parse_notify(): Don't die on malformed bbversion/prev_hash/nbit/ntime.
Might have introduced a memory leak, don't have time to check. :(
Should the other hex2bin()'s be checked?
Thanks to Mick Ayzenberg <mick.dejavusecurity.com> for finding this. | 1 | static bool parse_notify(struct pool *pool, json_t *val)
{
char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit,
*ntime, *header;
size_t cb1_len, cb2_len, alloc_len;
unsigned char *cb1, *cb2;
bool clean, ret = false;
int merkles, i;
json_t *arr;
arr = json_array_get(val, 4);
if (!arr || !json_is_array(arr))
goto out;
merkles = json_array_size(arr);
job_id = json_array_string(val, 0);
prev_hash = json_array_string(val, 1);
coinbase1 = json_array_string(val, 2);
coinbase2 = json_array_string(val, 3);
bbversion = json_array_string(val, 5);
nbit = json_array_string(val, 6);
ntime = json_array_string(val, 7);
clean = json_is_true(json_array_get(val, 8));
if (!job_id || !prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) {
/* Annoying but we must not leak memory */
if (job_id)
free(job_id);
if (prev_hash)
free(prev_hash);
if (coinbase1)
free(coinbase1);
if (coinbase2)
free(coinbase2);
if (bbversion)
free(bbversion);
if (nbit)
free(nbit);
if (ntime)
free(ntime);
goto out;
}
cg_wlock(&pool->data_lock);
free(pool->swork.job_id);
free(pool->swork.prev_hash);
free(pool->swork.bbversion);
free(pool->swork.nbit);
free(pool->swork.ntime);
pool->swork.job_id = job_id;
pool->swork.prev_hash = prev_hash;
cb1_len = strlen(coinbase1) / 2;
cb2_len = strlen(coinbase2) / 2;
pool->swork.bbversion = bbversion;
pool->swork.nbit = nbit;
pool->swork.ntime = ntime;
pool->swork.clean = clean;
alloc_len = pool->swork.cb_len = cb1_len + pool->n1_len + pool->n2size + cb2_len;
pool->nonce2_offset = cb1_len + pool->n1_len;
for (i = 0; i < pool->swork.merkles; i++)
free(pool->swork.merkle_bin[i]);
if (merkles) {
pool->swork.merkle_bin = (unsigned char **)realloc(pool->swork.merkle_bin,
sizeof(char *) * merkles + 1);
for (i = 0; i < merkles; i++) {
char *merkle = json_array_string(arr, i);
pool->swork.merkle_bin[i] = (unsigned char *)malloc(32);
if (unlikely(!pool->swork.merkle_bin[i]))
quit(1, "Failed to malloc pool swork merkle_bin");
hex2bin(pool->swork.merkle_bin[i], merkle, 32);
free(merkle);
}
}
pool->swork.merkles = merkles;
if (clean)
pool->nonce2 = 0;
pool->merkle_offset = strlen(pool->swork.bbversion) +
strlen(pool->swork.prev_hash);
pool->swork.header_len = pool->merkle_offset +
/* merkle_hash */ 32 +
strlen(pool->swork.ntime) +
strlen(pool->swork.nbit) +
/* nonce */ 8 +
/* workpadding */ 96;
pool->merkle_offset /= 2;
pool->swork.header_len = pool->swork.header_len * 2 + 1;
align_len(&pool->swork.header_len);
header = (char *)alloca(pool->swork.header_len);
snprintf(header, pool->swork.header_len,
"%s%s%s%s%s%s%s",
pool->swork.bbversion,
pool->swork.prev_hash,
blank_merkel,
pool->swork.ntime,
pool->swork.nbit,
"00000000", /* nonce */
workpadding);
if (unlikely(!hex2bin(pool->header_bin, header, 128)))
quit(1, "Failed to convert header to header_bin in parse_notify");
cb1 = (unsigned char *)calloc(cb1_len, 1);
if (unlikely(!cb1))
quithere(1, "Failed to calloc cb1 in parse_notify");
hex2bin(cb1, coinbase1, cb1_len);
cb2 = (unsigned char *)calloc(cb2_len, 1);
if (unlikely(!cb2))
quithere(1, "Failed to calloc cb2 in parse_notify");
hex2bin(cb2, coinbase2, cb2_len);
free(pool->coinbase);
align_len(&alloc_len);
pool->coinbase = (unsigned char *)calloc(alloc_len, 1);
if (unlikely(!pool->coinbase))
quit(1, "Failed to calloc pool coinbase in parse_notify");
memcpy(pool->coinbase, cb1, cb1_len);
memcpy(pool->coinbase + cb1_len, pool->nonce1bin, pool->n1_len);
memcpy(pool->coinbase + cb1_len + pool->n1_len + pool->n2size, cb2, cb2_len);
cg_wunlock(&pool->data_lock);
if (opt_protocol) {
applog(LOG_DEBUG, "job_id: %s", job_id);
applog(LOG_DEBUG, "prev_hash: %s", prev_hash);
applog(LOG_DEBUG, "coinbase1: %s", coinbase1);
applog(LOG_DEBUG, "coinbase2: %s", coinbase2);
applog(LOG_DEBUG, "bbversion: %s", bbversion);
applog(LOG_DEBUG, "nbit: %s", nbit);
applog(LOG_DEBUG, "ntime: %s", ntime);
applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no");
}
free(coinbase1);
free(coinbase2);
free(cb1);
free(cb2);
/* A notify message is the closest stratum gets to a getwork */
pool->getwork_requested++;
total_getworks++;
ret = true;
if (pool == current_pool())
opt_work_update = true;
out:
return ret;
}
| 317,403,549,402,048,870,000,000,000,000,000,000,000 | util.c | 165,475,851,610,177,230,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-4503 | The parse_notify function in util.c in sgminer before 4.2.2 and cgminer 3.3.0 through 4.0.1 allows man-in-the-middle attackers to cause a denial of service (application exit) via a crafted (1) bbversion, (2) prev_hash, (3) nbit, or (4) ntime parameter in a mining.notify action stratum message. | https://nvd.nist.gov/vuln/detail/CVE-2014-4503 |
1,236 | cgminer | e1c5050734123973b99d181c45e74b2cbb00272e | https://github.com/ckolivas/cgminer | https://github.com/ckolivas/cgminer/commit/e1c5050734123973b99d181c45e74b2cbb00272e | Do some random sanity checking for stratum message parsing | 1 | bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
{
char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
char url_address[256], port[6];
int url_len, port_len = 0;
*sockaddr_url = url;
url_begin = strstr(url, "//");
if (!url_begin)
url_begin = url;
else
url_begin += 2;
/* Look for numeric ipv6 entries */
ipv6_begin = strstr(url_begin, "[");
ipv6_end = strstr(url_begin, "]");
if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
url_end = strstr(ipv6_end, ":");
else
url_end = strstr(url_begin, ":");
if (url_end) {
url_len = url_end - url_begin;
port_len = strlen(url_begin) - url_len - 1;
if (port_len < 1)
return false;
port_start = url_end + 1;
} else
url_len = strlen(url_begin);
if (url_len < 1)
return false;
sprintf(url_address, "%.*s", url_len, url_begin);
if (port_len) {
char *slash;
snprintf(port, 6, "%.*s", port_len, port_start);
slash = strchr(port, '/');
if (slash)
*slash = '\0';
} else
strcpy(port, "80");
*sockaddr_port = strdup(port);
*sockaddr_url = strdup(url_address);
return true;
}
| 268,425,937,881,460,200,000,000,000,000,000,000,000 | util.c | 312,757,577,260,077,530,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-4502 | Multiple heap-based buffer overflows in the parse_notify function in sgminer before 4.2.2, cgminer before 4.3.5, and BFGMiner before 4.1.0 allow remote pool servers to have unspecified impact via a (1) large or (2) negative value in the Extranonc2_size parameter in a mining.subscribe response and a crafted mining.notify request. | https://nvd.nist.gov/vuln/detail/CVE-2014-4502 |
1,237 | cgminer | e1c5050734123973b99d181c45e74b2cbb00272e | https://github.com/ckolivas/cgminer | https://github.com/ckolivas/cgminer/commit/e1c5050734123973b99d181c45e74b2cbb00272e | Do some random sanity checking for stratum message parsing | 1 | static bool parse_notify(struct pool *pool, json_t *val)
{
char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit,
*ntime, header[228];
unsigned char *cb1 = NULL, *cb2 = NULL;
size_t cb1_len, cb2_len, alloc_len;
bool clean, ret = false;
int merkles, i;
json_t *arr;
arr = json_array_get(val, 4);
if (!arr || !json_is_array(arr))
goto out;
merkles = json_array_size(arr);
job_id = json_array_string(val, 0);
prev_hash = __json_array_string(val, 1);
coinbase1 = json_array_string(val, 2);
coinbase2 = json_array_string(val, 3);
bbversion = __json_array_string(val, 5);
nbit = __json_array_string(val, 6);
ntime = __json_array_string(val, 7);
clean = json_is_true(json_array_get(val, 8));
if (!job_id || !prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) {
/* Annoying but we must not leak memory */
if (job_id)
free(job_id);
if (coinbase1)
free(coinbase1);
if (coinbase2)
free(coinbase2);
goto out;
}
cg_wlock(&pool->data_lock);
free(pool->swork.job_id);
pool->swork.job_id = job_id;
snprintf(pool->prev_hash, 65, "%s", prev_hash);
cb1_len = strlen(coinbase1) / 2;
cb2_len = strlen(coinbase2) / 2;
snprintf(pool->bbversion, 9, "%s", bbversion);
snprintf(pool->nbit, 9, "%s", nbit);
snprintf(pool->ntime, 9, "%s", ntime);
pool->swork.clean = clean;
alloc_len = pool->coinbase_len = cb1_len + pool->n1_len + pool->n2size + cb2_len;
pool->nonce2_offset = cb1_len + pool->n1_len;
for (i = 0; i < pool->merkles; i++)
free(pool->swork.merkle_bin[i]);
if (merkles) {
pool->swork.merkle_bin = realloc(pool->swork.merkle_bin,
sizeof(char *) * merkles + 1);
for (i = 0; i < merkles; i++) {
char *merkle = json_array_string(arr, i);
pool->swork.merkle_bin[i] = malloc(32);
if (unlikely(!pool->swork.merkle_bin[i]))
quit(1, "Failed to malloc pool swork merkle_bin");
if (opt_protocol)
applog(LOG_DEBUG, "merkle %d: %s", i, merkle);
ret = hex2bin(pool->swork.merkle_bin[i], merkle, 32);
free(merkle);
if (unlikely(!ret)) {
applog(LOG_ERR, "Failed to convert merkle to merkle_bin in parse_notify");
goto out_unlock;
}
}
}
pool->merkles = merkles;
if (clean)
pool->nonce2 = 0;
#if 0
header_len = strlen(pool->bbversion) +
strlen(pool->prev_hash);
/* merkle_hash */ 32 +
strlen(pool->ntime) +
strlen(pool->nbit) +
/* nonce */ 8 +
/* workpadding */ 96;
#endif
snprintf(header, 225,
"%s%s%s%s%s%s%s",
pool->bbversion,
pool->prev_hash,
blank_merkle,
pool->ntime,
pool->nbit,
"00000000", /* nonce */
workpadding);
ret = hex2bin(pool->header_bin, header, 112);
if (unlikely(!ret)) {
applog(LOG_ERR, "Failed to convert header to header_bin in parse_notify");
goto out_unlock;
}
cb1 = alloca(cb1_len);
ret = hex2bin(cb1, coinbase1, cb1_len);
if (unlikely(!ret)) {
applog(LOG_ERR, "Failed to convert cb1 to cb1_bin in parse_notify");
goto out_unlock;
}
cb2 = alloca(cb2_len);
ret = hex2bin(cb2, coinbase2, cb2_len);
if (unlikely(!ret)) {
applog(LOG_ERR, "Failed to convert cb2 to cb2_bin in parse_notify");
goto out_unlock;
}
free(pool->coinbase);
align_len(&alloc_len);
pool->coinbase = calloc(alloc_len, 1);
if (unlikely(!pool->coinbase))
quit(1, "Failed to calloc pool coinbase in parse_notify");
memcpy(pool->coinbase, cb1, cb1_len);
memcpy(pool->coinbase + cb1_len, pool->nonce1bin, pool->n1_len);
memcpy(pool->coinbase + cb1_len + pool->n1_len + pool->n2size, cb2, cb2_len);
if (opt_debug) {
char *cb = bin2hex(pool->coinbase, pool->coinbase_len);
applog(LOG_DEBUG, "Pool %d coinbase %s", pool->pool_no, cb);
free(cb);
}
out_unlock:
cg_wunlock(&pool->data_lock);
if (opt_protocol) {
applog(LOG_DEBUG, "job_id: %s", job_id);
applog(LOG_DEBUG, "prev_hash: %s", prev_hash);
applog(LOG_DEBUG, "coinbase1: %s", coinbase1);
applog(LOG_DEBUG, "coinbase2: %s", coinbase2);
applog(LOG_DEBUG, "bbversion: %s", bbversion);
applog(LOG_DEBUG, "nbit: %s", nbit);
applog(LOG_DEBUG, "ntime: %s", ntime);
applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no");
}
free(coinbase1);
free(coinbase2);
/* A notify message is the closest stratum gets to a getwork */
pool->getwork_requested++;
total_getworks++;
if (pool == current_pool())
opt_work_update = true;
out:
return ret;
}
| 84,334,959,585,750,240,000,000,000,000,000,000,000 | util.c | 206,367,765,513,721,400,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-4502 | Multiple heap-based buffer overflows in the parse_notify function in sgminer before 4.2.2, cgminer before 4.3.5, and BFGMiner before 4.1.0 allow remote pool servers to have unspecified impact via a (1) large or (2) negative value in the Extranonc2_size parameter in a mining.subscribe response and a crafted mining.notify request. | https://nvd.nist.gov/vuln/detail/CVE-2014-4502 |
1,238 | cgminer | e1c5050734123973b99d181c45e74b2cbb00272e | https://github.com/ckolivas/cgminer | https://github.com/ckolivas/cgminer/commit/e1c5050734123973b99d181c45e74b2cbb00272e | Do some random sanity checking for stratum message parsing | 1 | static bool parse_reconnect(struct pool *pool, json_t *val)
{
char *sockaddr_url, *stratum_port, *tmp;
char *url, *port, address[256];
memset(address, 0, 255);
url = (char *)json_string_value(json_array_get(val, 0));
if (!url)
url = pool->sockaddr_url;
else {
char *dot_pool, *dot_reconnect;
dot_pool = strchr(pool->sockaddr_url, '.');
if (!dot_pool) {
applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'",
pool->sockaddr_url);
return false;
}
dot_reconnect = strchr(url, '.');
if (!dot_reconnect) {
applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'",
url);
return false;
}
if (strcmp(dot_pool, dot_reconnect)) {
applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'",
pool->sockaddr_url);
return false;
}
}
port = (char *)json_string_value(json_array_get(val, 1));
if (!port)
port = pool->stratum_port;
sprintf(address, "%s:%s", url, port);
if (!extract_sockaddr(address, &sockaddr_url, &stratum_port))
return false;
applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address);
clear_pool_work(pool);
mutex_lock(&pool->stratum_lock);
__suspend_stratum(pool);
tmp = pool->sockaddr_url;
pool->sockaddr_url = sockaddr_url;
pool->stratum_url = pool->sockaddr_url;
free(tmp);
tmp = pool->stratum_port;
pool->stratum_port = stratum_port;
free(tmp);
mutex_unlock(&pool->stratum_lock);
if (!restart_stratum(pool)) {
pool_failed(pool);
return false;
}
return true;
}
| 137,361,347,459,447,410,000,000,000,000,000,000,000 | util.c | 206,367,765,513,721,400,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-4502 | Multiple heap-based buffer overflows in the parse_notify function in sgminer before 4.2.2, cgminer before 4.3.5, and BFGMiner before 4.1.0 allow remote pool servers to have unspecified impact via a (1) large or (2) negative value in the Extranonc2_size parameter in a mining.subscribe response and a crafted mining.notify request. | https://nvd.nist.gov/vuln/detail/CVE-2014-4502 |
1,239 | krb5 | a7886f0ed1277c69142b14a2c6629175a6331edc | https://github.com/krb5/krb5 | https://github.com/krb5/krb5/commit/a7886f0ed1277c69142b14a2c6629175a6331edc | Fix null deref in SPNEGO acceptor [CVE-2014-4344]
When processing a continuation token, acc_ctx_cont was dereferencing
the initial byte of the token without checking the length. This could
result in a null dereference.
CVE-2014-4344:
In MIT krb5 1.5 and newer, an unauthenticated or partially
authenticated remote attacker can cause a NULL dereference and
application crash during a SPNEGO negotiation by sending an empty
token as the second or later context token from initiator to acceptor.
The attacker must provide at least one valid context token in the
security context negotiation before sending the empty token. This can
be done by an unauthenticated attacker by forcing SPNEGO to
renegotiate the underlying mechanism, or by using IAKERB to wrap an
unauthenticated AS-REQ as the first token.
CVSSv2 Vector: AV:N/AC:L/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[kaduk@mit.edu: CVE summary, CVSSv2 vector]
(cherry picked from commit 524688ce87a15fc75f87efc8c039ba4c7d5c197b)
ticket: 7970
version_fixed: 1.12.2
status: resolved | 1 | acc_ctx_cont(OM_uint32 *minstat,
gss_buffer_t buf,
gss_ctx_id_t *ctx,
gss_buffer_t *responseToken,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 ret, tmpmin;
gss_OID supportedMech;
spnego_gss_ctx_id_t sc;
unsigned int len;
unsigned char *ptr, *bufstart;
sc = (spnego_gss_ctx_id_t)*ctx;
ret = GSS_S_DEFECTIVE_TOKEN;
*negState = REJECT;
*minstat = 0;
supportedMech = GSS_C_NO_OID;
*return_token = ERROR_TOKEN_SEND;
*responseToken = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf->value;
#define REMAIN (buf->length - (ptr - bufstart))
if (REMAIN > INT_MAX)
return GSS_S_DEFECTIVE_TOKEN;
/*
* Attempt to work with old Sun SPNEGO.
*/
if (*ptr == HEADER_ID) {
ret = g_verify_token_header(gss_mech_spnego,
&len, &ptr, 0, REMAIN);
if (ret) {
*minstat = ret;
return GSS_S_DEFECTIVE_TOKEN;
}
}
if (*ptr != (CONTEXT | 0x01)) {
return GSS_S_DEFECTIVE_TOKEN;
}
ret = get_negTokenResp(minstat, ptr, REMAIN,
negState, &supportedMech,
responseToken, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (*responseToken == GSS_C_NO_BUFFER &&
*mechListMIC == GSS_C_NO_BUFFER) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto cleanup;
}
if (supportedMech != GSS_C_NO_OID) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto cleanup;
}
sc->firstpass = 0;
*negState = ACCEPT_INCOMPLETE;
*return_token = CONT_TOKEN_SEND;
cleanup:
if (supportedMech != GSS_C_NO_OID) {
generic_gss_release_oid(&tmpmin, &supportedMech);
}
return ret;
#undef REMAIN
}
| 111,177,546,391,845,930,000,000,000,000,000,000,000 | spnego_mech.c | 290,065,421,106,050,240,000,000,000,000,000,000,000 | [
"CWE-476"
] | CVE-2014-4344 | The acc_ctx_cont function in the SPNEGO acceptor in lib/gssapi/spnego/spnego_mech.c in MIT Kerberos 5 (aka krb5) 1.5.x through 1.12.x before 1.12.2 allows remote attackers to cause a denial of service (NULL pointer dereference and application crash) via an empty continuation token at a certain point during a SPNEGO negotiation. | https://nvd.nist.gov/vuln/detail/CVE-2014-4344 |
1,240 | krb5 | f18ddf5d82de0ab7591a36e465bc24225776940f | https://github.com/krb5/krb5 | https://github.com/krb5/krb5/commit/f18ddf5d82de0ab7591a36e465bc24225776940f | Fix double-free in SPNEGO [CVE-2014-4343]
In commit cd7d6b08 ("Verify acceptor's mech in SPNEGO initiator") the
pointer sc->internal_mech became an alias into sc->mech_set->elements,
which should be considered constant for the duration of the SPNEGO
context. So don't free it.
CVE-2014-4343:
In MIT krb5 releases 1.10 and newer, an unauthenticated remote
attacker with the ability to spoof packets appearing to be from a
GSSAPI acceptor can cause a double-free condition in GSSAPI initiators
(clients) which are using the SPNEGO mechanism, by returning a
different underlying mechanism than was proposed by the initiator. At
this stage of the negotiation, the acceptor is unauthenticated, and
the acceptor's response could be spoofed by an attacker with the
ability to inject traffic to the initiator.
Historically, some double-free vulnerabilities can be translated into
remote code execution, though the necessary exploits must be tailored
to the individual application and are usually quite
complicated. Double-frees can also be exploited to cause an
application crash, for a denial of service. However, most GSSAPI
client applications are not vulnerable, as the SPNEGO mechanism is not
used by default (when GSS_C_NO_OID is passed as the mech_type argument
to gss_init_sec_context()). The most common use of SPNEGO is for
HTTP-Negotiate, used in web browsers and other web clients. Most such
clients are believed to not offer HTTP-Negotiate by default, instead
requiring a whitelist of sites for which it may be used to be
configured. If the whitelist is configured to only allow
HTTP-Negotiate over TLS connections ("https://"), a successful
attacker must also spoof the web server's SSL certificate, due to the
way the WWW-Authenticate header is sent in a 401 (Unauthorized)
response message. Unfortunately, many instructions for enabling
HTTP-Negotiate in common web browsers do not include a TLS
requirement.
CVSSv2 Vector: AV:N/AC:H/Au:N/C:C/I:C/A:C/E:POC/RL:OF/RC:C
[kaduk@mit.edu: CVE summary and CVSSv2 vector]
ticket: 7969 (new)
target_version: 1.12.2
tags: pullup | 1 | init_ctx_reselect(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
OM_uint32 acc_negState, gss_OID supportedMech,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 tmpmin;
size_t i;
generic_gss_release_oid(&tmpmin, &sc->internal_mech);
gss_delete_sec_context(&tmpmin, &sc->ctx_handle,
GSS_C_NO_BUFFER);
/* Find supportedMech in sc->mech_set. */
for (i = 0; i < sc->mech_set->count; i++) {
if (g_OID_equal(supportedMech, &sc->mech_set->elements[i]))
break;
}
if (i == sc->mech_set->count)
return GSS_S_DEFECTIVE_TOKEN;
sc->internal_mech = &sc->mech_set->elements[i];
/*
* Windows 2003 and earlier don't correctly send a
* negState of request-mic when counter-proposing a
* mechanism. They probably don't handle mechListMICs
* properly either.
*/
if (acc_negState != REQUEST_MIC)
return GSS_S_DEFECTIVE_TOKEN;
sc->mech_complete = 0;
sc->mic_reqd = 1;
*negState = REQUEST_MIC;
*tokflag = CONT_TOKEN_SEND;
return GSS_S_CONTINUE_NEEDED;
}
| 116,847,216,199,976,580,000,000,000,000,000,000,000 | spnego_mech.c | 145,186,693,848,278,750,000,000,000,000,000,000,000 | [
"CWE-415"
] | CVE-2014-4343 | Double free vulnerability in the init_ctx_reselect function in the SPNEGO initiator in lib/gssapi/spnego/spnego_mech.c in MIT Kerberos 5 (aka krb5) 1.10.x through 1.12.x before 1.12.2 allows remote attackers to cause a denial of service (memory corruption) or possibly execute arbitrary code via network traffic that appears to come from an intended acceptor, but specifies a security mechanism different from the one proposed by the initiator. | https://nvd.nist.gov/vuln/detail/CVE-2014-4343 |
1,242 | php-src | b34d7849ed90ced9345f8ea1c59bc8d101c18468 | https://github.com/php/php-src | https://github.com/php/php-src/commit/b34d7849ed90ced9345f8ea1c59bc8d101c18468 | Merge branch 'PHP-5.6'
* PHP-5.6:
Fix potential segfault in dns_get_record() | 1 | static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)
{
u_short type, class, dlen;
u_long ttl;
long n, i;
u_short s;
u_char *tp, *p;
char name[MAXHOSTNAMELEN];
int have_v6_break = 0, in_v6_break = 0;
*subarray = NULL;
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
GETSHORT(type, cp);
GETSHORT(class, cp);
GETLONG(ttl, cp);
GETSHORT(dlen, cp);
if (type_to_fetch != T_ANY && type != type_to_fetch) {
cp += dlen;
return cp;
}
if (!store) {
cp += dlen;
return cp;
}
ALLOC_INIT_ZVAL(*subarray);
array_init(*subarray);
add_assoc_string(*subarray, "host", name, 1);
add_assoc_string(*subarray, "class", "IN", 1);
add_assoc_long(*subarray, "ttl", ttl);
if (raw) {
add_assoc_long(*subarray, "type", type);
add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1);
cp += dlen;
return cp;
}
switch (type) {
case DNS_T_A:
add_assoc_string(*subarray, "type", "A", 1);
snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]);
add_assoc_string(*subarray, "ip", name, 1);
cp += dlen;
break;
case DNS_T_MX:
add_assoc_string(*subarray, "type", "MX", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
/* no break; */
case DNS_T_CNAME:
if (type == DNS_T_CNAME) {
add_assoc_string(*subarray, "type", "CNAME", 1);
}
/* no break; */
case DNS_T_NS:
if (type == DNS_T_NS) {
add_assoc_string(*subarray, "type", "NS", 1);
}
/* no break; */
case DNS_T_PTR:
if (type == DNS_T_PTR) {
add_assoc_string(*subarray, "type", "PTR", 1);
}
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_HINFO:
/* See RFC 1010 for values */
add_assoc_string(*subarray, "type", "HINFO", 1);
n = *cp & 0xFF;
cp++;
add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1);
cp += n;
n = *cp & 0xFF;
cp++;
add_assoc_stringl(*subarray, "os", (char*)cp, n, 1);
cp += n;
break;
case DNS_T_TXT:
{
int ll = 0;
zval *entries = NULL;
add_assoc_string(*subarray, "type", "TXT", 1);
tp = emalloc(dlen + 1);
MAKE_STD_ZVAL(entries);
array_init(entries);
while (ll < dlen) {
n = cp[ll];
memcpy(tp + ll , cp + ll + 1, n);
add_next_index_stringl(entries, cp + ll + 1, n, 1);
ll = ll + n + 1;
}
tp[dlen] = '\0';
cp += dlen;
add_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0);
add_assoc_zval(*subarray, "entries", entries);
}
break;
case DNS_T_SOA:
add_assoc_string(*subarray, "type", "SOA", 1);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "mname", name, 1);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "rname", name, 1);
GETLONG(n, cp);
add_assoc_long(*subarray, "serial", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "refresh", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "retry", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "expire", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "minimum-ttl", n);
break;
case DNS_T_AAAA:
tp = (u_char*)name;
for(i=0; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "type", "AAAA", 1);
add_assoc_string(*subarray, "ipv6", name, 1);
break;
case DNS_T_A6:
p = cp;
add_assoc_string(*subarray, "type", "A6", 1);
n = ((int)cp[0]) & 0xFF;
cp++;
add_assoc_long(*subarray, "masklen", n);
tp = (u_char*)name;
if (n > 15) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
}
if (n % 16 > 8) {
/* Partial short */
if (cp[0] != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
sprintf((char*)tp, "%x", cp[0] & 0xFF);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
cp++;
}
for (i = (n + 8) / 16; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "ipv6", name, 1);
if (cp < p + dlen) {
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "chain", name, 1);
}
break;
case DNS_T_SRV:
add_assoc_string(*subarray, "type", "SRV", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "weight", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "port", n);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_NAPTR:
add_assoc_string(*subarray, "type", "NAPTR", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "order", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pref", n);
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1);
cp += n;
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "services", (char*)++cp, n, 1);
cp += n;
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1);
cp += n;
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "replacement", name, 1);
break;
default:
zval_ptr_dtor(subarray);
*subarray = NULL;
cp += dlen;
break;
}
return cp;
}
| 61,555,348,514,619,220,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2014-4049 | Heap-based buffer overflow in the php_parserr function in ext/standard/dns.c in PHP 5.6.0beta4 and earlier allows remote servers to cause a denial of service (crash) and possibly execute arbitrary code via a crafted DNS TXT record, related to the dns_get_record function. | https://nvd.nist.gov/vuln/detail/CVE-2014-4049 |
1,243 | linux | 4442dc8a92b8f9ad8ee9e7f8438f4c04c03a22dc | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/4442dc8a92b8f9ad8ee9e7f8438f4c04c03a22dc | target/rd: Refactor rd_build_device_space + rd_release_device_space
This patch refactors rd_build_device_space() + rd_release_device_space()
into rd_allocate_sgl_table() + rd_release_device_space() so that they
may be used seperatly for setup + release of protection information
scatterlists.
Also add explicit memset of pages within rd_allocate_sgl_table() based
upon passed 'init_payload' value.
v2 changes:
- Drop unused sg_table from rd_release_device_space (Wei)
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Sagi Grimberg <sagig@mellanox.com>
Cc: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org> | 1 | static int rd_build_device_space(struct rd_dev *rd_dev)
{
u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
if (rd_dev->rd_page_count <= 0) {
pr_err("Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -EINVAL;
}
/* Don't need backing pages for NULLIO */
if (rd_dev->rd_flags & RDF_NULLIO)
return 0;
total_sg_needed = rd_dev->rd_page_count;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
if (!sg_table) {
pr_err("Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
return -ENOMEM;
}
rd_dev->sg_table_array = sg_table;
rd_dev->sg_table_count = sg_tables;
while (total_sg_needed) {
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
GFP_KERNEL);
if (!sg) {
pr_err("Unable to allocate scatterlist array"
" for struct rd_dev\n");
return -ENOMEM;
}
sg_init_table(sg, sg_per_table);
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
sg_table[i].page_start_offset = page_offset;
sg_table[i++].page_end_offset = (page_offset + sg_per_table)
- 1;
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
if (!pg) {
pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -ENOMEM;
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
return 0;
}
| 231,707,640,209,773,850,000,000,000,000,000,000,000 | target_core_rd.c | 229,492,553,967,661,880,000,000,000,000,000,000,000 | [
"CWE-264"
] | CVE-2014-4027 | The rd_build_device_space function in drivers/target/target_core_rd.c in the Linux kernel before 3.14 does not properly initialize a certain data structure, which allows local users to obtain sensitive information from ramdisk_mcp memory by leveraging access to a SCSI initiator. | https://nvd.nist.gov/vuln/detail/CVE-2014-4027 |
1,251 | miniupnp | 3a87aa2f10bd7f1408e1849bdb59c41dd63a9fe9 | https://github.com/miniupnp/miniupnp | https://github.com/miniupnp/miniupnp/commit/3a87aa2f10bd7f1408e1849bdb59c41dd63a9fe9 | miniwget.c: fixed potential buffer overrun | 1 | getHTTPResponse(int s, int * size)
{
char buf[2048];
int n;
int endofheaders = 0;
int chunked = 0;
int content_length = -1;
unsigned int chunksize = 0;
unsigned int bytestocopy = 0;
/* buffers : */
char * header_buf;
unsigned int header_buf_len = 2048;
unsigned int header_buf_used = 0;
char * content_buf;
unsigned int content_buf_len = 2048;
unsigned int content_buf_used = 0;
char chunksize_buf[32];
unsigned int chunksize_buf_index;
header_buf = malloc(header_buf_len);
content_buf = malloc(content_buf_len);
chunksize_buf[0] = '\0';
chunksize_buf_index = 0;
while((n = receivedata(s, buf, 2048, 5000, NULL)) > 0)
{
if(endofheaders == 0)
{
int i;
int linestart=0;
int colon=0;
int valuestart=0;
if(header_buf_used + n > header_buf_len) {
header_buf = realloc(header_buf, header_buf_used + n);
header_buf_len = header_buf_used + n;
}
memcpy(header_buf + header_buf_used, buf, n);
header_buf_used += n;
/* search for CR LF CR LF (end of headers)
* recognize also LF LF */
i = 0;
while(i < ((int)header_buf_used-1) && (endofheaders == 0)) {
if(header_buf[i] == '\r') {
i++;
if(header_buf[i] == '\n') {
i++;
if(i < (int)header_buf_used && header_buf[i] == '\r') {
i++;
if(i < (int)header_buf_used && header_buf[i] == '\n') {
endofheaders = i+1;
}
}
}
} else if(header_buf[i] == '\n') {
i++;
if(header_buf[i] == '\n') {
endofheaders = i+1;
}
}
i++;
}
if(endofheaders == 0)
continue;
/* parse header lines */
for(i = 0; i < endofheaders - 1; i++) {
if(colon <= linestart && header_buf[i]==':')
{
colon = i;
while(i < (endofheaders-1)
&& (header_buf[i+1] == ' ' || header_buf[i+1] == '\t'))
i++;
valuestart = i + 1;
}
/* detecting end of line */
else if(header_buf[i]=='\r' || header_buf[i]=='\n')
{
if(colon > linestart && valuestart > colon)
{
#ifdef DEBUG
printf("header='%.*s', value='%.*s'\n",
colon-linestart, header_buf+linestart,
i-valuestart, header_buf+valuestart);
#endif
if(0==strncasecmp(header_buf+linestart, "content-length", colon-linestart))
{
content_length = atoi(header_buf+valuestart);
#ifdef DEBUG
printf("Content-Length: %d\n", content_length);
#endif
}
else if(0==strncasecmp(header_buf+linestart, "transfer-encoding", colon-linestart)
&& 0==strncasecmp(header_buf+valuestart, "chunked", 7))
{
#ifdef DEBUG
printf("chunked transfer-encoding!\n");
#endif
chunked = 1;
}
}
while(header_buf[i]=='\r' || header_buf[i] == '\n')
i++;
linestart = i;
colon = linestart;
valuestart = 0;
}
}
/* copy the remaining of the received data back to buf */
n = header_buf_used - endofheaders;
memcpy(buf, header_buf + endofheaders, n);
/* if(headers) */
}
if(endofheaders)
{
/* content */
if(chunked)
{
int i = 0;
while(i < n)
{
if(chunksize == 0)
{
/* reading chunk size */
if(chunksize_buf_index == 0) {
/* skipping any leading CR LF */
if(i<n && buf[i] == '\r') i++;
if(i<n && buf[i] == '\n') i++;
}
while(i<n && isxdigit(buf[i])
&& chunksize_buf_index < (sizeof(chunksize_buf)-1))
{
chunksize_buf[chunksize_buf_index++] = buf[i];
chunksize_buf[chunksize_buf_index] = '\0';
i++;
}
while(i<n && buf[i] != '\r' && buf[i] != '\n')
i++; /* discarding chunk-extension */
if(i<n && buf[i] == '\r') i++;
if(i<n && buf[i] == '\n') {
unsigned int j;
for(j = 0; j < chunksize_buf_index; j++) {
if(chunksize_buf[j] >= '0'
&& chunksize_buf[j] <= '9')
chunksize = (chunksize << 4) + (chunksize_buf[j] - '0');
else
chunksize = (chunksize << 4) + ((chunksize_buf[j] | 32) - 'a' + 10);
}
chunksize_buf[0] = '\0';
chunksize_buf_index = 0;
i++;
} else {
/* not finished to get chunksize */
continue;
}
#ifdef DEBUG
printf("chunksize = %u (%x)\n", chunksize, chunksize);
#endif
if(chunksize == 0)
{
#ifdef DEBUG
printf("end of HTTP content - %d %d\n", i, n);
/*printf("'%.*s'\n", n-i, buf+i);*/
#endif
goto end_of_stream;
}
}
bytestocopy = ((int)chunksize < (n - i))?chunksize:(unsigned int)(n - i);
if((content_buf_used + bytestocopy) > content_buf_len)
{
if(content_length >= (int)(content_buf_used + bytestocopy)) {
content_buf_len = content_length;
} else {
content_buf_len = content_buf_used + bytestocopy;
}
content_buf = (char *)realloc((void *)content_buf,
content_buf_len);
}
memcpy(content_buf + content_buf_used, buf + i, bytestocopy);
content_buf_used += bytestocopy;
i += bytestocopy;
chunksize -= bytestocopy;
}
}
else
{
/* not chunked */
if(content_length > 0
&& (int)(content_buf_used + n) > content_length) {
/* skipping additional bytes */
n = content_length - content_buf_used;
}
if(content_buf_used + n > content_buf_len)
{
if(content_length >= (int)(content_buf_used + n)) {
content_buf_len = content_length;
} else {
content_buf_len = content_buf_used + n;
}
content_buf = (char *)realloc((void *)content_buf,
content_buf_len);
}
memcpy(content_buf + content_buf_used, buf, n);
content_buf_used += n;
}
}
/* use the Content-Length header value if available */
if(content_length > 0 && (int)content_buf_used >= content_length)
{
#ifdef DEBUG
printf("End of HTTP content\n");
#endif
break;
}
}
end_of_stream:
free(header_buf); header_buf = NULL;
*size = content_buf_used;
if(content_buf_used == 0)
{
free(content_buf);
content_buf = NULL;
}
return content_buf;
}
| 306,297,354,977,511,100,000,000,000,000,000,000,000 | miniwget.c | 99,482,104,209,569,080,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3985 | The getHTTPResponse function in miniwget.c in MiniUPnP 1.9 allows remote attackers to cause a denial of service (crash) via crafted headers that trigger an out-of-bounds read. | https://nvd.nist.gov/vuln/detail/CVE-2014-3985 |
1,254 | file | 39c7ac1106be844a5296d3eb5971946cc09ffda0 | https://github.com/file/file | https://github.com/file/file/commit/39c7ac1106be844a5296d3eb5971946cc09ffda0 | Fix note bounds reading, Francisco Alonso / Red Hat | 1 | donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size,
int clazz, int swap, size_t align, int *flags)
{
Elf32_Nhdr nh32;
Elf64_Nhdr nh64;
size_t noff, doff;
#ifdef ELFCORE
int os_style = -1;
#endif
uint32_t namesz, descsz;
unsigned char *nbuf = CAST(unsigned char *, vbuf);
(void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof);
offset += xnh_sizeof;
namesz = xnh_namesz;
descsz = xnh_descsz;
if ((namesz == 0) && (descsz == 0)) {
/*
* We're out of note headers.
*/
return (offset >= size) ? offset : size;
}
if (namesz & 0x80000000) {
(void)file_printf(ms, ", bad note name size 0x%lx",
(unsigned long)namesz);
return offset;
}
if (descsz & 0x80000000) {
(void)file_printf(ms, ", bad note description size 0x%lx",
(unsigned long)descsz);
return offset;
}
noff = offset;
doff = ELF_ALIGN(offset + namesz);
if (offset + namesz > size) {
/*
* We're past the end of the buffer.
*/
return doff;
}
offset = ELF_ALIGN(doff + descsz);
if (doff + descsz > size) {
/*
* We're past the end of the buffer.
*/
return (offset >= size) ? offset : size;
}
if ((*flags & (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) ==
(FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID))
goto core;
if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 &&
xnh_type == NT_GNU_VERSION && descsz == 2) {
file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]);
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
xnh_type == NT_GNU_VERSION && descsz == 16) {
uint32_t desc[4];
(void)memcpy(desc, &nbuf[doff], sizeof(desc));
if (file_printf(ms, ", for GNU/") == -1)
return size;
switch (elf_getu32(swap, desc[0])) {
case GNU_OS_LINUX:
if (file_printf(ms, "Linux") == -1)
return size;
break;
case GNU_OS_HURD:
if (file_printf(ms, "Hurd") == -1)
return size;
break;
case GNU_OS_SOLARIS:
if (file_printf(ms, "Solaris") == -1)
return size;
break;
case GNU_OS_KFREEBSD:
if (file_printf(ms, "kFreeBSD") == -1)
return size;
break;
case GNU_OS_KNETBSD:
if (file_printf(ms, "kNetBSD") == -1)
return size;
break;
default:
if (file_printf(ms, "<unknown>") == -1)
return size;
}
if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]),
elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1)
return size;
*flags |= FLAGS_DID_NOTE;
return size;
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 &&
xnh_type == NT_GNU_BUILD_ID && (descsz == 16 || descsz == 20)) {
uint8_t desc[20];
uint32_t i;
if (file_printf(ms, ", BuildID[%s]=", descsz == 16 ? "md5/uuid" :
"sha1") == -1)
return size;
(void)memcpy(desc, &nbuf[doff], descsz);
for (i = 0; i < descsz; i++)
if (file_printf(ms, "%02x", desc[i]) == -1)
return size;
*flags |= FLAGS_DID_BUILD_ID;
}
if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 &&
xnh_type == NT_NETBSD_PAX && descsz == 4) {
static const char *pax[] = {
"+mprotect",
"-mprotect",
"+segvguard",
"-segvguard",
"+ASLR",
"-ASLR",
};
uint32_t desc;
size_t i;
int did = 0;
(void)memcpy(&desc, &nbuf[doff], sizeof(desc));
desc = elf_getu32(swap, desc);
if (desc && file_printf(ms, ", PaX: ") == -1)
return size;
for (i = 0; i < __arraycount(pax); i++) {
if (((1 << i) & desc) == 0)
continue;
if (file_printf(ms, "%s%s", did++ ? "," : "",
pax[i]) == -1)
return size;
}
}
if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) {
switch (xnh_type) {
case NT_NETBSD_VERSION:
if (descsz == 4) {
do_note_netbsd_version(ms, swap, &nbuf[doff]);
*flags |= FLAGS_DID_NOTE;
return size;
}
break;
case NT_NETBSD_MARCH:
if (file_printf(ms, ", compiled for: %.*s", (int)descsz,
(const char *)&nbuf[doff]) == -1)
return size;
break;
case NT_NETBSD_CMODEL:
if (file_printf(ms, ", compiler model: %.*s",
(int)descsz, (const char *)&nbuf[doff]) == -1)
return size;
break;
default:
if (file_printf(ms, ", note=%u", xnh_type) == -1)
return size;
break;
}
return size;
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) {
if (xnh_type == NT_FREEBSD_VERSION && descsz == 4) {
do_note_freebsd_version(ms, swap, &nbuf[doff]);
*flags |= FLAGS_DID_NOTE;
return size;
}
}
if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 &&
xnh_type == NT_OPENBSD_VERSION && descsz == 4) {
if (file_printf(ms, ", for OpenBSD") == -1)
return size;
/* Content of note is always 0 */
*flags |= FLAGS_DID_NOTE;
return size;
}
if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 &&
xnh_type == NT_DRAGONFLY_VERSION && descsz == 4) {
uint32_t desc;
if (file_printf(ms, ", for DragonFly") == -1)
return size;
(void)memcpy(&desc, &nbuf[doff], sizeof(desc));
desc = elf_getu32(swap, desc);
if (file_printf(ms, " %d.%d.%d", desc / 100000,
desc / 10000 % 10, desc % 10000) == -1)
return size;
*flags |= FLAGS_DID_NOTE;
return size;
}
core:
/*
* Sigh. The 2.0.36 kernel in Debian 2.1, at
* least, doesn't correctly implement name
* sections, in core dumps, as specified by
* the "Program Linking" section of "UNIX(R) System
* V Release 4 Programmer's Guide: ANSI C and
* Programming Support Tools", because my copy
* clearly says "The first 'namesz' bytes in 'name'
* contain a *null-terminated* [emphasis mine]
* character representation of the entry's owner
* or originator", but the 2.0.36 kernel code
* doesn't include the terminating null in the
* name....
*/
if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) ||
(namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) {
os_style = OS_STYLE_SVR4;
}
if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) {
os_style = OS_STYLE_FREEBSD;
}
if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11)
== 0)) {
os_style = OS_STYLE_NETBSD;
}
#ifdef ELFCORE
if ((*flags & FLAGS_DID_CORE) != 0)
return size;
if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) {
if (file_printf(ms, ", %s-style", os_style_names[os_style])
== -1)
return size;
*flags |= FLAGS_DID_CORE_STYLE;
}
switch (os_style) {
case OS_STYLE_NETBSD:
if (xnh_type == NT_NETBSD_CORE_PROCINFO) {
uint32_t signo;
/*
* Extract the program name. It is at
* offset 0x7c, and is up to 32-bytes,
* including the terminating NUL.
*/
if (file_printf(ms, ", from '%.31s'",
&nbuf[doff + 0x7c]) == -1)
return size;
/*
* Extract the signal number. It is at
* offset 0x08.
*/
(void)memcpy(&signo, &nbuf[doff + 0x08],
sizeof(signo));
if (file_printf(ms, " (signal %u)",
elf_getu32(swap, signo)) == -1)
return size;
*flags |= FLAGS_DID_CORE;
return size;
}
break;
default:
if (xnh_type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) {
size_t i, j;
unsigned char c;
/*
* Extract the program name. We assume
* it to be 16 characters (that's what it
* is in SunOS 5.x and Linux).
*
* Unfortunately, it's at a different offset
* in various OSes, so try multiple offsets.
* If the characters aren't all printable,
* reject it.
*/
for (i = 0; i < NOFFSETS; i++) {
unsigned char *cname, *cp;
size_t reloffset = prpsoffsets(i);
size_t noffset = doff + reloffset;
size_t k;
for (j = 0; j < 16; j++, noffset++,
reloffset++) {
/*
* Make sure we're not past
* the end of the buffer; if
* we are, just give up.
*/
if (noffset >= size)
goto tryanother;
/*
* Make sure we're not past
* the end of the contents;
* if we are, this obviously
* isn't the right offset.
*/
if (reloffset >= descsz)
goto tryanother;
c = nbuf[noffset];
if (c == '\0') {
/*
* A '\0' at the
* beginning is
* obviously wrong.
* Any other '\0'
* means we're done.
*/
if (j == 0)
goto tryanother;
else
break;
} else {
/*
* A nonprintable
* character is also
* wrong.
*/
if (!isprint(c) || isquote(c))
goto tryanother;
}
}
/*
* Well, that worked.
*/
/*
* Try next offsets, in case this match is
* in the middle of a string.
*/
for (k = i + 1 ; k < NOFFSETS ; k++) {
size_t no;
int adjust = 1;
if (prpsoffsets(k) >= prpsoffsets(i))
continue;
for (no = doff + prpsoffsets(k);
no < doff + prpsoffsets(i); no++)
adjust = adjust
&& isprint(nbuf[no]);
if (adjust)
i = k;
}
cname = (unsigned char *)
&nbuf[doff + prpsoffsets(i)];
for (cp = cname; *cp && isprint(*cp); cp++)
continue;
/*
* Linux apparently appends a space at the end
* of the command line: remove it.
*/
while (cp > cname && isspace(cp[-1]))
cp--;
if (file_printf(ms, ", from '%.*s'",
(int)(cp - cname), cname) == -1)
return size;
*flags |= FLAGS_DID_CORE;
return size;
tryanother:
;
}
}
break;
}
#endif
return offset;
}
| 134,716,201,177,067,830,000,000,000,000,000,000,000 | readelf.c | 6,375,476,860,511,039,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-3710 | The donote function in readelf.c in file through 5.20, as used in the Fileinfo component in PHP 5.4.34, does not ensure that sufficient note headers are present, which allows remote attackers to cause a denial of service (out-of-bounds read and application crash) via a crafted ELF file. | https://nvd.nist.gov/vuln/detail/CVE-2014-3710 |
1,257 | linux | 26b87c7881006311828bb0ab271a551a62dcceb4 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/26b87c7881006311828bb0ab271a551a62dcceb4 | net: sctp: fix remote memory pressure from excessive queueing
This scenario is not limited to ASCONF, just taken as one
example triggering the issue. When receiving ASCONF probes
in the form of ...
-------------- INIT[ASCONF; ASCONF_ACK] ------------->
<----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
-------------------- COOKIE-ECHO -------------------->
<-------------------- COOKIE-ACK ---------------------
---- ASCONF_a; [ASCONF_b; ...; ASCONF_n;] JUNK ------>
[...]
---- ASCONF_m; [ASCONF_o; ...; ASCONF_z;] JUNK ------>
... where ASCONF_a, ASCONF_b, ..., ASCONF_z are good-formed
ASCONFs and have increasing serial numbers, we process such
ASCONF chunk(s) marked with !end_of_packet and !singleton,
since we have not yet reached the SCTP packet end. SCTP does
only do verification on a chunk by chunk basis, as an SCTP
packet is nothing more than just a container of a stream of
chunks which it eats up one by one.
We could run into the case that we receive a packet with a
malformed tail, above marked as trailing JUNK. All previous
chunks are here goodformed, so the stack will eat up all
previous chunks up to this point. In case JUNK does not fit
into a chunk header and there are no more other chunks in
the input queue, or in case JUNK contains a garbage chunk
header, but the encoded chunk length would exceed the skb
tail, or we came here from an entirely different scenario
and the chunk has pdiscard=1 mark (without having had a flush
point), it will happen, that we will excessively queue up
the association's output queue (a correct final chunk may
then turn it into a response flood when flushing the
queue ;)): I ran a simple script with incremental ASCONF
serial numbers and could see the server side consuming
excessive amount of RAM [before/after: up to 2GB and more].
The issue at heart is that the chunk train basically ends
with !end_of_packet and !singleton markers and since commit
2e3216cd54b1 ("sctp: Follow security requirement of responding
with 1 packet") therefore preventing an output queue flush
point in sctp_do_sm() -> sctp_cmd_interpreter() on the input
chunk (chunk = event_arg) even though local_cork is set,
but its precedence has changed since then. In the normal
case, the last chunk with end_of_packet=1 would trigger the
queue flush to accommodate possible outgoing bundling.
In the input queue, sctp_inq_pop() seems to do the right thing
in terms of discarding invalid chunks. So, above JUNK will
not enter the state machine and instead be released and exit
the sctp_assoc_bh_rcv() chunk processing loop. It's simply
the flush point being missing at loop exit. Adding a try-flush
approach on the output queue might not work as the underlying
infrastructure might be long gone at this point due to the
side-effect interpreter run.
One possibility, albeit a bit of a kludge, would be to defer
invalid chunk freeing into the state machine in order to
possibly trigger packet discards and thus indirectly a queue
flush on error. It would surely be better to discard chunks
as in the current, perhaps better controlled environment, but
going back and forth, it's simply architecturally not possible.
I tried various trailing JUNK attack cases and it seems to
look good now.
Joint work with Vlad Yasevich.
Fixes: 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
{
struct sctp_chunk *chunk;
sctp_chunkhdr_t *ch = NULL;
/* The assumption is that we are safe to process the chunks
* at this time.
*/
if ((chunk = queue->in_progress)) {
/* There is a packet that we have been working on.
* Any post processing work to do before we move on?
*/
if (chunk->singleton ||
chunk->end_of_packet ||
chunk->pdiscard) {
sctp_chunk_free(chunk);
chunk = queue->in_progress = NULL;
} else {
/* Nothing to do. Next chunk in the packet, please. */
ch = (sctp_chunkhdr_t *) chunk->chunk_end;
/* Force chunk->skb->data to chunk->chunk_end. */
skb_pull(chunk->skb,
chunk->chunk_end - chunk->skb->data);
/* Verify that we have at least chunk headers
* worth of buffer left.
*/
if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
sctp_chunk_free(chunk);
chunk = queue->in_progress = NULL;
}
}
}
/* Do we need to take the next packet out of the queue to process? */
if (!chunk) {
struct list_head *entry;
/* Is the queue empty? */
if (list_empty(&queue->in_chunk_list))
return NULL;
entry = queue->in_chunk_list.next;
chunk = queue->in_progress =
list_entry(entry, struct sctp_chunk, list);
list_del_init(entry);
/* This is the first chunk in the packet. */
chunk->singleton = 1;
ch = (sctp_chunkhdr_t *) chunk->skb->data;
chunk->data_accepted = 0;
}
chunk->chunk_hdr = ch;
chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
/* In the unlikely case of an IP reassembly, the skb could be
* non-linear. If so, update chunk_end so that it doesn't go past
* the skb->tail.
*/
if (unlikely(skb_is_nonlinear(chunk->skb))) {
if (chunk->chunk_end > skb_tail_pointer(chunk->skb))
chunk->chunk_end = skb_tail_pointer(chunk->skb);
}
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
/* This is not a singleton */
chunk->singleton = 0;
} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
/* RFC 2960, Section 6.10 Bundling
*
* Partial chunks MUST NOT be placed in an SCTP packet.
* If the receiver detects a partial chunk, it MUST drop
* the chunk.
*
* Since the end of the chunk is past the end of our buffer
* (which contains the whole packet, we can freely discard
* the whole packet.
*/
sctp_chunk_free(chunk);
chunk = queue->in_progress = NULL;
return NULL;
} else {
/* We are at the end of the packet, so mark the chunk
* in case we need to send a SACK.
*/
chunk->end_of_packet = 1;
}
pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
ntohs(chunk->chunk_hdr->length), chunk->skb->len);
return chunk;
}
| 146,497,261,003,956,660,000,000,000,000,000,000,000 | inqueue.c | 183,685,648,427,300,330,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-3688 | The SCTP implementation in the Linux kernel before 3.17.4 allows remote attackers to cause a denial of service (memory consumption) by triggering a large number of chunks in an association's output queue, as demonstrated by ASCONF probes, related to net/sctp/inqueue.c and net/sctp/sm_statefuns.c. | https://nvd.nist.gov/vuln/detail/CVE-2014-3688 |
1,258 | linux | 26b87c7881006311828bb0ab271a551a62dcceb4 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/26b87c7881006311828bb0ab271a551a62dcceb4 | net: sctp: fix remote memory pressure from excessive queueing
This scenario is not limited to ASCONF, just taken as one
example triggering the issue. When receiving ASCONF probes
in the form of ...
-------------- INIT[ASCONF; ASCONF_ACK] ------------->
<----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
-------------------- COOKIE-ECHO -------------------->
<-------------------- COOKIE-ACK ---------------------
---- ASCONF_a; [ASCONF_b; ...; ASCONF_n;] JUNK ------>
[...]
---- ASCONF_m; [ASCONF_o; ...; ASCONF_z;] JUNK ------>
... where ASCONF_a, ASCONF_b, ..., ASCONF_z are good-formed
ASCONFs and have increasing serial numbers, we process such
ASCONF chunk(s) marked with !end_of_packet and !singleton,
since we have not yet reached the SCTP packet end. SCTP does
only do verification on a chunk by chunk basis, as an SCTP
packet is nothing more than just a container of a stream of
chunks which it eats up one by one.
We could run into the case that we receive a packet with a
malformed tail, above marked as trailing JUNK. All previous
chunks are here goodformed, so the stack will eat up all
previous chunks up to this point. In case JUNK does not fit
into a chunk header and there are no more other chunks in
the input queue, or in case JUNK contains a garbage chunk
header, but the encoded chunk length would exceed the skb
tail, or we came here from an entirely different scenario
and the chunk has pdiscard=1 mark (without having had a flush
point), it will happen, that we will excessively queue up
the association's output queue (a correct final chunk may
then turn it into a response flood when flushing the
queue ;)): I ran a simple script with incremental ASCONF
serial numbers and could see the server side consuming
excessive amount of RAM [before/after: up to 2GB and more].
The issue at heart is that the chunk train basically ends
with !end_of_packet and !singleton markers and since commit
2e3216cd54b1 ("sctp: Follow security requirement of responding
with 1 packet") therefore preventing an output queue flush
point in sctp_do_sm() -> sctp_cmd_interpreter() on the input
chunk (chunk = event_arg) even though local_cork is set,
but its precedence has changed since then. In the normal
case, the last chunk with end_of_packet=1 would trigger the
queue flush to accommodate possible outgoing bundling.
In the input queue, sctp_inq_pop() seems to do the right thing
in terms of discarding invalid chunks. So, above JUNK will
not enter the state machine and instead be released and exit
the sctp_assoc_bh_rcv() chunk processing loop. It's simply
the flush point being missing at loop exit. Adding a try-flush
approach on the output queue might not work as the underlying
infrastructure might be long gone at this point due to the
side-effect interpreter run.
One possibility, albeit a bit of a kludge, would be to defer
invalid chunk freeing into the state machine in order to
possibly trigger packet discards and thus indirectly a queue
flush on error. It would surely be better to discard chunks
as in the current, perhaps better controlled environment, but
going back and forth, it's simply architecturally not possible.
I tried various trailing JUNK attack cases and it seems to
look good now.
Joint work with Vlad Yasevich.
Fixes: 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | sctp_chunk_length_valid(struct sctp_chunk *chunk,
__u16 required_length)
{
__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
if (unlikely(chunk_length < required_length))
return 0;
return 1;
}
| 256,102,226,120,872,380,000,000,000,000,000,000,000 | sm_statefuns.c | 238,370,719,693,466,800,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-3688 | The SCTP implementation in the Linux kernel before 3.17.4 allows remote attackers to cause a denial of service (memory consumption) by triggering a large number of chunks in an association's output queue, as demonstrated by ASCONF probes, related to net/sctp/inqueue.c and net/sctp/sm_statefuns.c. | https://nvd.nist.gov/vuln/detail/CVE-2014-3688 |
1,259 | linux | b69040d8e39f20d5215a03502a8e8b4c6ab78395 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/b69040d8e39f20d5215a03502a8e8b4c6ab78395 | net: sctp: fix panic on duplicate ASCONF chunks
When receiving a e.g. semi-good formed connection scan in the
form of ...
-------------- INIT[ASCONF; ASCONF_ACK] ------------->
<----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
-------------------- COOKIE-ECHO -------------------->
<-------------------- COOKIE-ACK ---------------------
---------------- ASCONF_a; ASCONF_b ----------------->
... where ASCONF_a equals ASCONF_b chunk (at least both serials
need to be equal), we panic an SCTP server!
The problem is that good-formed ASCONF chunks that we reply with
ASCONF_ACK chunks are cached per serial. Thus, when we receive a
same ASCONF chunk twice (e.g. through a lost ASCONF_ACK), we do
not need to process them again on the server side (that was the
idea, also proposed in the RFC). Instead, we know it was cached
and we just resend the cached chunk instead. So far, so good.
Where things get nasty is in SCTP's side effect interpreter, that
is, sctp_cmd_interpreter():
While incoming ASCONF_a (chunk = event_arg) is being marked
!end_of_packet and !singleton, and we have an association context,
we do not flush the outqueue the first time after processing the
ASCONF_ACK singleton chunk via SCTP_CMD_REPLY. Instead, we keep it
queued up, although we set local_cork to 1. Commit 2e3216cd54b1
changed the precedence, so that as long as we get bundled, incoming
chunks we try possible bundling on outgoing queue as well. Before
this commit, we would just flush the output queue.
Now, while ASCONF_a's ASCONF_ACK sits in the corked outq, we
continue to process the same ASCONF_b chunk from the packet. As
we have cached the previous ASCONF_ACK, we find it, grab it and
do another SCTP_CMD_REPLY command on it. So, effectively, we rip
the chunk->list pointers and requeue the same ASCONF_ACK chunk
another time. Since we process ASCONF_b, it's correctly marked
with end_of_packet and we enforce an uncork, and thus flush, thus
crashing the kernel.
Fix it by testing if the ASCONF_ACK is currently pending and if
that is the case, do not requeue it. When flushing the output
queue we may relink the chunk for preparing an outgoing packet,
but eventually unlink it when it's copied into the skb right
before transmission.
Joint work with Vlad Yasevich.
Fixes: 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
const struct sctp_association *asoc,
__be32 serial)
{
struct sctp_chunk *ack;
/* Walk through the list of cached ASCONF-ACKs and find the
* ack chunk whose serial number matches that of the request.
*/
list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
if (ack->subh.addip_hdr->serial == serial) {
sctp_chunk_hold(ack);
return ack;
}
}
return NULL;
}
| 285,652,383,158,838,000,000,000,000,000,000,000,000 | associola.c | 116,881,181,770,839,720,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-3687 | The sctp_assoc_lookup_asconf_ack function in net/sctp/associola.c in the SCTP implementation in the Linux kernel through 3.17.2 allows remote attackers to cause a denial of service (panic) via duplicate ASCONF chunks that trigger an incorrect uncork within the side-effect interpreter. | https://nvd.nist.gov/vuln/detail/CVE-2014-3687 |
1,269 | linux | bfd0a56b90005f8c8a004baf407ad90045c2b11e | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/bfd0a56b90005f8c8a004baf407ad90045c2b11e | nEPT: Nested INVEPT
If we let L1 use EPT, we should probably also support the INVEPT instruction.
In our current nested EPT implementation, when L1 changes its EPT table
for L2 (i.e., EPT12), L0 modifies the shadow EPT table (EPT02), and in
the course of this modification already calls INVEPT. But if last level
of shadow page is unsync not all L1's changes to EPT12 are intercepted,
which means roots need to be synced when L1 calls INVEPT. Global INVEPT
should not be different since roots are synced by kvm_mmu_load() each
time EPTP02 changes.
Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Nadav Har'El <nyh@il.ibm.com>
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Xinhao Xu <xinhao.xu@intel.com>
Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 1 | static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
{
u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 exit_reason = vmx->exit_reason;
if (vmx->nested.nested_run_pending)
return 0;
if (unlikely(vmx->fail)) {
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
vmcs_read32(VM_INSTRUCTION_ERROR));
return 1;
}
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
if (!is_exception(intr_info))
return 0;
else if (is_page_fault(intr_info))
return enable_ept;
return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
case EXIT_REASON_EXTERNAL_INTERRUPT:
return 0;
case EXIT_REASON_TRIPLE_FAULT:
return 1;
case EXIT_REASON_PENDING_INTERRUPT:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
case EXIT_REASON_NMI_WINDOW:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
case EXIT_REASON_TASK_SWITCH:
return 1;
case EXIT_REASON_CPUID:
return 1;
case EXIT_REASON_HLT:
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
case EXIT_REASON_INVD:
return 1;
case EXIT_REASON_INVLPG:
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
case EXIT_REASON_RDPMC:
return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
case EXIT_REASON_RDTSC:
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
/*
* VMX instructions trap unconditionally. This allows L1 to
* emulate them for its L2 guest, i.e., allows 3-level nesting!
*/
return 1;
case EXIT_REASON_CR_ACCESS:
return nested_vmx_exit_handled_cr(vcpu, vmcs12);
case EXIT_REASON_DR_ACCESS:
return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
case EXIT_REASON_IO_INSTRUCTION:
return nested_vmx_exit_handled_io(vcpu, vmcs12);
case EXIT_REASON_MSR_READ:
case EXIT_REASON_MSR_WRITE:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
case EXIT_REASON_INVALID_STATE:
return 1;
case EXIT_REASON_MWAIT_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
case EXIT_REASON_MONITOR_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
case EXIT_REASON_PAUSE_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
nested_cpu_has2(vmcs12,
SECONDARY_EXEC_PAUSE_LOOP_EXITING);
case EXIT_REASON_MCE_DURING_VMENTRY:
return 0;
case EXIT_REASON_TPR_BELOW_THRESHOLD:
return 1;
case EXIT_REASON_APIC_ACCESS:
return nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
case EXIT_REASON_EPT_VIOLATION:
case EXIT_REASON_EPT_MISCONFIG:
return 0;
case EXIT_REASON_PREEMPTION_TIMER:
return vmcs12->pin_based_vm_exec_control &
PIN_BASED_VMX_PREEMPTION_TIMER;
case EXIT_REASON_WBINVD:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV:
return 1;
default:
return 1;
}
}
| 74,454,302,223,687,580,000,000,000,000,000,000,000 | vmx.c | 251,867,498,831,949,350,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-3645 | arch/x86/kvm/vmx.c in the KVM subsystem in the Linux kernel before 3.12 does not have an exit handler for the INVEPT instruction, which allows guest OS users to cause a denial of service (guest OS crash) via a crafted application. | https://nvd.nist.gov/vuln/detail/CVE-2014-3645 |
1,270 | linux | 95389b08d93d5c06ec63ab49bd732b0069b7c35e | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/95389b08d93d5c06ec63ab49bd732b0069b7c35e | KEYS: Fix termination condition in assoc array garbage collection
This fixes CVE-2014-3631.
It is possible for an associative array to end up with a shortcut node at the
root of the tree if there are more than fan-out leaves in the tree, but they
all crowd into the same slot in the lowest level (ie. they all have the same
first nibble of their index keys).
When assoc_array_gc() returns back up the tree after scanning some leaves, it
can fall off of the root and crash because it assumes that the back pointer
from a shortcut (after label ascend_old_tree) must point to a normal node -
which isn't true of a shortcut node at the root.
Should we find we're ascending rootwards over a shortcut, we should check to
see if the backpointer is zero - and if it is, we have completed the scan.
This particular bug cannot occur if the root node is not a shortcut - ie. if
you have fewer than 17 keys in a keyring or if you have at least two keys that
sit into separate slots (eg. a keyring and a non keyring).
This can be reproduced by:
ring=`keyctl newring bar @s`
for ((i=1; i<=18; i++)); do last_key=`keyctl newring foo$i $ring`; done
keyctl timeout $last_key 2
Doing this:
echo 3 >/proc/sys/kernel/keys/gc_delay
first will speed things up.
If we do fall off of the top of the tree, we get the following oops:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000018
IP: [<ffffffff8136cea7>] assoc_array_gc+0x2f7/0x540
PGD dae15067 PUD cfc24067 PMD 0
Oops: 0000 [#1] SMP
Modules linked in: xt_nat xt_mark nf_conntrack_netbios_ns nf_conntrack_broadcast ip6t_rpfilter ip6t_REJECT xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_ni
CPU: 0 PID: 26011 Comm: kworker/0:1 Not tainted 3.14.9-200.fc20.x86_64 #1
Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
Workqueue: events key_garbage_collector
task: ffff8800918bd580 ti: ffff8800aac14000 task.ti: ffff8800aac14000
RIP: 0010:[<ffffffff8136cea7>] [<ffffffff8136cea7>] assoc_array_gc+0x2f7/0x540
RSP: 0018:ffff8800aac15d40 EFLAGS: 00010206
RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff8800aaecacc0
RDX: ffff8800daecf440 RSI: 0000000000000001 RDI: ffff8800aadc2bc0
RBP: ffff8800aac15da8 R08: 0000000000000001 R09: 0000000000000003
R10: ffffffff8136ccc7 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000000 R14: 0000000000000070 R15: 0000000000000001
FS: 0000000000000000(0000) GS:ffff88011fc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: 0000000000000018 CR3: 00000000db10d000 CR4: 00000000000006f0
Stack:
ffff8800aac15d50 0000000000000011 ffff8800aac15db8 ffffffff812e2a70
ffff880091a00600 0000000000000000 ffff8800aadc2bc3 00000000cd42c987
ffff88003702df20 ffff88003702dfa0 0000000053b65c09 ffff8800aac15fd8
Call Trace:
[<ffffffff812e2a70>] ? keyring_detect_cycle_iterator+0x30/0x30
[<ffffffff812e3e75>] keyring_gc+0x75/0x80
[<ffffffff812e1424>] key_garbage_collector+0x154/0x3c0
[<ffffffff810a67b6>] process_one_work+0x176/0x430
[<ffffffff810a744b>] worker_thread+0x11b/0x3a0
[<ffffffff810a7330>] ? rescuer_thread+0x3b0/0x3b0
[<ffffffff810ae1a8>] kthread+0xd8/0xf0
[<ffffffff810ae0d0>] ? insert_kthread_work+0x40/0x40
[<ffffffff816ffb7c>] ret_from_fork+0x7c/0xb0
[<ffffffff810ae0d0>] ? insert_kthread_work+0x40/0x40
Code: 08 4c 8b 22 0f 84 bf 00 00 00 41 83 c7 01 49 83 e4 fc 41 83 ff 0f 4c 89 65 c0 0f 8f 5a fe ff ff 48 8b 45 c0 4d 63 cf 49 83 c1 02 <4e> 8b 34 c8 4d 85 f6 0f 84 be 00 00 00 41 f6 c6 01 0f 84 92
RIP [<ffffffff8136cea7>] assoc_array_gc+0x2f7/0x540
RSP <ffff8800aac15d40>
CR2: 0000000000000018
---[ end trace 1129028a088c0cbd ]---
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Don Zickus <dzickus@redhat.com>
Signed-off-by: James Morris <james.l.morris@oracle.com> | 1 | int assoc_array_gc(struct assoc_array *array,
const struct assoc_array_ops *ops,
bool (*iterator)(void *object, void *iterator_data),
void *iterator_data)
{
struct assoc_array_shortcut *shortcut, *new_s;
struct assoc_array_node *node, *new_n;
struct assoc_array_edit *edit;
struct assoc_array_ptr *cursor, *ptr;
struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
unsigned long nr_leaves_on_tree;
int keylen, slot, nr_free, next_slot, i;
pr_devel("-->%s()\n", __func__);
if (!array->root)
return 0;
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
if (!edit)
return -ENOMEM;
edit->array = array;
edit->ops = ops;
edit->ops_for_excised_subtree = ops;
edit->set[0].ptr = &array->root;
edit->excised_subtree = array->root;
new_root = new_parent = NULL;
new_ptr_pp = &new_root;
cursor = array->root;
descend:
/* If this point is a shortcut, then we need to duplicate it and
* advance the target cursor.
*/
if (assoc_array_ptr_is_shortcut(cursor)) {
shortcut = assoc_array_ptr_to_shortcut(cursor);
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s = kmalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
if (!new_s)
goto enomem;
pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long)));
new_s->back_pointer = new_parent;
new_s->parent_slot = shortcut->parent_slot;
*new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
new_ptr_pp = &new_s->next_node;
cursor = shortcut->next_node;
}
/* Duplicate the node at this position */
node = assoc_array_ptr_to_node(cursor);
new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n)
goto enomem;
pr_devel("dup node %p -> %p\n", node, new_n);
new_n->back_pointer = new_parent;
new_n->parent_slot = node->parent_slot;
*new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n);
new_ptr_pp = NULL;
slot = 0;
continue_node:
/* Filter across any leaves and gc any subtrees */
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = node->slots[slot];
if (!ptr)
continue;
if (assoc_array_ptr_is_leaf(ptr)) {
if (iterator(assoc_array_ptr_to_leaf(ptr),
iterator_data))
/* The iterator will have done any reference
* counting on the object for us.
*/
new_n->slots[slot] = ptr;
continue;
}
new_ptr_pp = &new_n->slots[slot];
cursor = ptr;
goto descend;
}
pr_devel("-- compress node %p --\n", new_n);
/* Count up the number of empty slots in this node and work out the
* subtree leaf count.
*/
new_n->nr_leaves_on_branch = 0;
nr_free = 0;
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = new_n->slots[slot];
if (!ptr)
nr_free++;
else if (assoc_array_ptr_is_leaf(ptr))
new_n->nr_leaves_on_branch++;
}
pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
/* See what we can fold in */
next_slot = 0;
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
struct assoc_array_shortcut *s;
struct assoc_array_node *child;
ptr = new_n->slots[slot];
if (!ptr || assoc_array_ptr_is_leaf(ptr))
continue;
s = NULL;
if (assoc_array_ptr_is_shortcut(ptr)) {
s = assoc_array_ptr_to_shortcut(ptr);
ptr = s->next_node;
}
child = assoc_array_ptr_to_node(ptr);
new_n->nr_leaves_on_branch += child->nr_leaves_on_branch;
if (child->nr_leaves_on_branch <= nr_free + 1) {
/* Fold the child node into this one */
pr_devel("[%d] fold node %lu/%d [nx %d]\n",
slot, child->nr_leaves_on_branch, nr_free + 1,
next_slot);
/* We would already have reaped an intervening shortcut
* on the way back up the tree.
*/
BUG_ON(s);
new_n->slots[slot] = NULL;
nr_free++;
if (slot < next_slot)
next_slot = slot;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
struct assoc_array_ptr *p = child->slots[i];
if (!p)
continue;
BUG_ON(assoc_array_ptr_is_meta(p));
while (new_n->slots[next_slot])
next_slot++;
BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT);
new_n->slots[next_slot++] = p;
nr_free--;
}
kfree(child);
} else {
pr_devel("[%d] retain node %lu/%d [nx %d]\n",
slot, child->nr_leaves_on_branch, nr_free + 1,
next_slot);
}
}
pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
nr_leaves_on_tree = new_n->nr_leaves_on_branch;
/* Excise this node if it is singly occupied by a shortcut */
if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) {
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++)
if ((ptr = new_n->slots[slot]))
break;
if (assoc_array_ptr_is_meta(ptr) &&
assoc_array_ptr_is_shortcut(ptr)) {
pr_devel("excise node %p with 1 shortcut\n", new_n);
new_s = assoc_array_ptr_to_shortcut(ptr);
new_parent = new_n->back_pointer;
slot = new_n->parent_slot;
kfree(new_n);
if (!new_parent) {
new_s->back_pointer = NULL;
new_s->parent_slot = 0;
new_root = ptr;
goto gc_complete;
}
if (assoc_array_ptr_is_shortcut(new_parent)) {
/* We can discard any preceding shortcut also */
struct assoc_array_shortcut *s =
assoc_array_ptr_to_shortcut(new_parent);
pr_devel("excise preceding shortcut\n");
new_parent = new_s->back_pointer = s->back_pointer;
slot = new_s->parent_slot = s->parent_slot;
kfree(s);
if (!new_parent) {
new_s->back_pointer = NULL;
new_s->parent_slot = 0;
new_root = ptr;
goto gc_complete;
}
}
new_s->back_pointer = new_parent;
new_s->parent_slot = slot;
new_n = assoc_array_ptr_to_node(new_parent);
new_n->slots[slot] = ptr;
goto ascend_old_tree;
}
}
/* Excise any shortcuts we might encounter that point to nodes that
* only contain leaves.
*/
ptr = new_n->back_pointer;
if (!ptr)
goto gc_complete;
if (assoc_array_ptr_is_shortcut(ptr)) {
new_s = assoc_array_ptr_to_shortcut(ptr);
new_parent = new_s->back_pointer;
slot = new_s->parent_slot;
if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
struct assoc_array_node *n;
pr_devel("excise shortcut\n");
new_n->back_pointer = new_parent;
new_n->parent_slot = slot;
kfree(new_s);
if (!new_parent) {
new_root = assoc_array_node_to_ptr(new_n);
goto gc_complete;
}
n = assoc_array_ptr_to_node(new_parent);
n->slots[slot] = assoc_array_node_to_ptr(new_n);
}
} else {
new_parent = ptr;
}
new_n = assoc_array_ptr_to_node(new_parent);
ascend_old_tree:
ptr = node->back_pointer;
if (assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
} else {
slot = node->parent_slot;
cursor = ptr;
}
BUG_ON(!ptr);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
gc_complete:
edit->set[0].to = new_root;
assoc_array_apply_edit(edit);
array->nr_leaves_on_tree = nr_leaves_on_tree;
return 0;
enomem:
pr_devel("enomem\n");
assoc_array_destroy_subtree(new_root, edit->ops);
kfree(edit);
return -ENOMEM;
}
| 32,773,475,763,762,925,000,000,000,000,000,000,000 | assoc_array.c | 80,273,721,649,681,460,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-3631 | The assoc_array_gc function in the associative-array implementation in lib/assoc_array.c in the Linux kernel before 3.16.3 does not properly implement garbage collection, which allows local users to cause a denial of service (NULL pointer dereference and system crash) or possibly have unspecified other impact via multiple "keyctl newring" operations followed by a "keyctl timeout" operation. | https://nvd.nist.gov/vuln/detail/CVE-2014-3631 |
1,271 | linux | 2febc839133280d5a5e8e1179c94ea674489dae2 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/2febc839133280d5a5e8e1179c94ea674489dae2 | KVM: x86: Improve thread safety in pit
There's a race condition in the PIT emulation code in KVM. In
__kvm_migrate_pit_timer the pit_timer object is accessed without
synchronization. If the race condition occurs at the wrong time this
can crash the host kernel.
This fixes CVE-2014-3611.
Cc: stable@vger.kernel.org
Signed-off-by: Andrew Honig <ahonig@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 1 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
{
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
struct hrtimer *timer;
if (!kvm_vcpu_is_bsp(vcpu) || !pit)
return;
timer = &pit->pit_state.timer;
if (hrtimer_cancel(timer))
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
| 119,883,143,059,227,320,000,000,000,000,000,000,000 | i8254.c | 104,037,544,635,274,550,000,000,000,000,000,000,000 | [
"CWE-362"
] | CVE-2014-3611 | Race condition in the __kvm_migrate_pit_timer function in arch/x86/kvm/i8254.c in the KVM subsystem in the Linux kernel through 3.17.2 allows guest OS users to cause a denial of service (host OS crash) by leveraging incorrect PIT emulation. | https://nvd.nist.gov/vuln/detail/CVE-2014-3611 |
1,272 | linux | 854e8bb1aa06c578c2c9145fa6bfe3680ef63b23 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23 | KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: stable@vger.kernel.org
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 1 | static int handle_wrmsr(struct kvm_vcpu *vcpu)
{
struct msr_data msr;
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
msr.data = data;
msr.index = ecx;
msr.host_initiated = false;
if (vmx_set_msr(vcpu, &msr) != 0) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(vcpu, 0);
return 1;
}
trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(vcpu);
return 1;
}
| 236,270,974,209,097,240,000,000,000,000,000,000,000 | vmx.c | 77,404,791,920,628,900,000,000,000,000,000,000,000 | [
"CWE-264"
] | CVE-2014-3610 | The WRMSR processing functionality in the KVM subsystem in the Linux kernel through 3.17.2 does not properly handle the writing of a non-canonical address to a model-specific register, which allows guest OS users to cause a denial of service (host OS crash) by leveraging guest OS privileges, related to the wrmsr_interception function in arch/x86/kvm/svm.c and the handle_wrmsr function in arch/x86/kvm/vmx.c. | https://nvd.nist.gov/vuln/detail/CVE-2014-3610 |
1,274 | linux | 350b8bdd689cd2ab2c67c8a86a0be86cfa0751a7 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/350b8bdd689cd2ab2c67c8a86a0be86cfa0751a7 | kvm: iommu: fix the third parameter of kvm_iommu_put_pages (CVE-2014-3601)
The third parameter of kvm_iommu_put_pages is wrong,
It should be 'gfn - slot->base_gfn'.
By making gfn very large, malicious guest or userspace can cause kvm to
go to this error path, and subsequently to pass a huge value as size.
Alternatively if gfn is small, then pages would be pinned but never
unpinned, causing host memory leak and local DOS.
Passing a reasonable but large value could be the most dangerous case,
because it would unpin a page that should have stayed pinned, and thus
allow the device to DMA into arbitrary memory. However, this cannot
happen because of the condition that can trigger the error:
- out of memory (where you can't allocate even a single page)
should not be possible for the attacker to trigger
- when exceeding the iommu's address space, guest pages after gfn
will also exceed the iommu's address space, and inside
kvm_iommu_put_pages() the iommu_iova_to_phys() will fail. The
page thus would not be unpinned at all.
Reported-by: Jack Morgenstein <jackm@mellanox.com>
Cc: stable@vger.kernel.org
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 1 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
gfn_t gfn, end_gfn;
pfn_t pfn;
int r = 0;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int flags;
/* check if iommu exists and in use */
if (!domain)
return 0;
gfn = slot->base_gfn;
end_gfn = gfn + slot->npages;
flags = IOMMU_READ;
if (!(slot->flags & KVM_MEM_READONLY))
flags |= IOMMU_WRITE;
if (!kvm->arch.iommu_noncoherent)
flags |= IOMMU_CACHE;
while (gfn < end_gfn) {
unsigned long page_size;
/* Check if already mapped */
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
gfn += 1;
continue;
}
/* Get the page size we could use to map */
page_size = kvm_host_page_size(kvm, gfn);
/* Make sure the page_size does not exceed the memslot */
while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
page_size >>= 1;
/* Make sure gfn is aligned to the page size we want to map */
while ((gfn << PAGE_SHIFT) & (page_size - 1))
page_size >>= 1;
/* Make sure hva is aligned to the page size we want to map */
while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
page_size >>= 1;
/*
* Pin all pages we are about to map in memory. This is
* important because we unmap and unpin in 4kb steps later.
*/
pfn = kvm_pin_pages(slot, gfn, page_size);
if (is_error_noslot_pfn(pfn)) {
gfn += 1;
continue;
}
/* Map into IO address space */
r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
page_size, flags);
if (r) {
printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%llx\n", pfn);
goto unmap_pages;
}
gfn += page_size >> PAGE_SHIFT;
}
return 0;
unmap_pages:
kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
return r;
}
| 150,168,140,098,793,640,000,000,000,000,000,000,000 | iommu.c | 222,325,957,747,450,100,000,000,000,000,000,000,000 | [
"CWE-189"
] | CVE-2014-3601 | The kvm_iommu_map_pages function in virt/kvm/iommu.c in the Linux kernel through 3.16.1 miscalculates the number of pages during the handling of a mapping failure, which allows guest OS users to (1) cause a denial of service (host OS memory corruption) or possibly have unspecified other impact by triggering a large gfn value or (2) cause a denial of service (host OS memory consumption) by triggering a small gfn value that leads to permanently pinned pages. | https://nvd.nist.gov/vuln/detail/CVE-2014-3601 |
1,277 | php-src | 2fefae47716d501aec41c1102f3fd4531f070b05 | https://github.com/php/php-src | https://github.com/php/php-src/commit/2fefae47716d501aec41c1102f3fd4531f070b05 | Fixed Sec Bug #67717 segfault in dns_get_record CVE-2014-3597
Incomplete fix for CVE-2014-4049
Check possible buffer overflow
- pass real buffer end to dn_expand calls
- check buffer len before each read | 1 | static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)
{
u_short type, class, dlen;
u_long ttl;
long n, i;
u_short s;
u_char *tp, *p;
char name[MAXHOSTNAMELEN];
int have_v6_break = 0, in_v6_break = 0;
*subarray = NULL;
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
GETSHORT(type, cp);
GETSHORT(class, cp);
GETLONG(ttl, cp);
GETSHORT(dlen, cp);
if (type_to_fetch != T_ANY && type != type_to_fetch) {
cp += dlen;
return cp;
}
if (!store) {
cp += dlen;
return cp;
}
ALLOC_INIT_ZVAL(*subarray);
array_init(*subarray);
add_assoc_string(*subarray, "host", name, 1);
add_assoc_string(*subarray, "class", "IN", 1);
add_assoc_long(*subarray, "ttl", ttl);
if (raw) {
add_assoc_long(*subarray, "type", type);
add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1);
cp += dlen;
return cp;
}
switch (type) {
case DNS_T_A:
add_assoc_string(*subarray, "type", "A", 1);
snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]);
add_assoc_string(*subarray, "ip", name, 1);
cp += dlen;
break;
case DNS_T_MX:
add_assoc_string(*subarray, "type", "MX", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
/* no break; */
case DNS_T_CNAME:
if (type == DNS_T_CNAME) {
add_assoc_string(*subarray, "type", "CNAME", 1);
}
/* no break; */
case DNS_T_NS:
if (type == DNS_T_NS) {
add_assoc_string(*subarray, "type", "NS", 1);
}
/* no break; */
case DNS_T_PTR:
if (type == DNS_T_PTR) {
add_assoc_string(*subarray, "type", "PTR", 1);
}
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_HINFO:
/* See RFC 1010 for values */
add_assoc_string(*subarray, "type", "HINFO", 1);
n = *cp & 0xFF;
cp++;
add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1);
cp += n;
n = *cp & 0xFF;
cp++;
add_assoc_stringl(*subarray, "os", (char*)cp, n, 1);
cp += n;
break;
case DNS_T_TXT:
{
int ll = 0;
zval *entries = NULL;
add_assoc_string(*subarray, "type", "TXT", 1);
tp = emalloc(dlen + 1);
MAKE_STD_ZVAL(entries);
array_init(entries);
while (ll < dlen) {
n = cp[ll];
if ((ll + n) >= dlen) {
n = dlen - (ll + 1);
}
memcpy(tp + ll , cp + ll + 1, n);
add_next_index_stringl(entries, cp + ll + 1, n, 1);
ll = ll + n + 1;
}
tp[dlen] = '\0';
cp += dlen;
add_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0);
add_assoc_zval(*subarray, "entries", entries);
}
break;
case DNS_T_SOA:
add_assoc_string(*subarray, "type", "SOA", 1);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "mname", name, 1);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "rname", name, 1);
GETLONG(n, cp);
add_assoc_long(*subarray, "serial", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "refresh", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "retry", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "expire", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "minimum-ttl", n);
break;
case DNS_T_AAAA:
tp = (u_char*)name;
for(i=0; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "type", "AAAA", 1);
add_assoc_string(*subarray, "ipv6", name, 1);
break;
case DNS_T_A6:
p = cp;
add_assoc_string(*subarray, "type", "A6", 1);
n = ((int)cp[0]) & 0xFF;
cp++;
add_assoc_long(*subarray, "masklen", n);
tp = (u_char*)name;
if (n > 15) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
}
if (n % 16 > 8) {
/* Partial short */
if (cp[0] != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
sprintf((char*)tp, "%x", cp[0] & 0xFF);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
cp++;
}
for (i = (n + 8) / 16; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "ipv6", name, 1);
if (cp < p + dlen) {
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "chain", name, 1);
}
break;
case DNS_T_SRV:
add_assoc_string(*subarray, "type", "SRV", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "weight", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "port", n);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_NAPTR:
add_assoc_string(*subarray, "type", "NAPTR", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "order", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pref", n);
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1);
cp += n;
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "services", (char*)++cp, n, 1);
cp += n;
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1);
cp += n;
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "replacement", name, 1);
break;
default:
zval_ptr_dtor(subarray);
*subarray = NULL;
cp += dlen;
break;
}
return cp;
}
| 192,550,270,728,735,170,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2014-3597 | Multiple buffer overflows in the php_parserr function in ext/standard/dns.c in PHP before 5.4.32 and 5.5.x before 5.5.16 allow remote DNS servers to cause a denial of service (application crash) or possibly execute arbitrary code via a crafted DNS record, related to the dns_get_record function and the dn_expand function. NOTE: this issue exists because of an incomplete fix for CVE-2014-4049. | https://nvd.nist.gov/vuln/detail/CVE-2014-3597 |
1,285 | file | 93e063ee374b6a75729df9e7201fb511e47e259d | https://github.com/file/file | https://github.com/file/file/commit/93e063ee374b6a75729df9e7201fb511e47e259d | Add missing check offset test (Francisco Alonso, Jan Kaluza at RedHat) | 1 | cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
if (nelements == 0) {
DPRINTF(("CDF_VECTOR with nelements == 0\n"));
goto out;
}
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements && i < sh.sh_properties;
j++, i++)
{
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
| 39,554,699,879,590,963,000,000,000,000,000,000,000 | cdf.c | 38,142,421,459,194,790,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-3487 | The cdf_read_property_info function in file before 5.19, as used in the Fileinfo component in PHP before 5.4.30 and 5.5.x before 5.5.14, does not properly validate a stream offset, which allows remote attackers to cause a denial of service (application crash) via a crafted CDF file. | https://nvd.nist.gov/vuln/detail/CVE-2014-3487 |
1,286 | file | 40bade80cbe2af1d0b2cd0420cebd5d5905a2382 | https://github.com/file/file | https://github.com/file/file/commit/40bade80cbe2af1d0b2cd0420cebd5d5905a2382 | Fix incorrect bounds check for sector count. (Francisco Alonso and Jan Kaluza
at RedHat) | 1 | cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size)
{
size_t i, j;
cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size);
DPRINTF(("Chain:"));
for (j = i = 0; sid >= 0; i++, j++) {
DPRINTF((" %d", sid));
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Counting chain loop limit"));
errno = EFTYPE;
return (size_t)-1;
}
if (sid > maxsector) {
DPRINTF(("Sector %d > %d\n", sid, maxsector));
errno = EFTYPE;
return (size_t)-1;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (i == 0) {
DPRINTF((" none, sid: %d\n", sid));
return (size_t)-1;
}
DPRINTF(("\n"));
return i;
}
| 128,863,893,313,877,960,000,000,000,000,000,000,000 | None | null | [
"CWE-20"
] | CVE-2014-3480 | The cdf_count_chain function in cdf.c in file before 5.19, as used in the Fileinfo component in PHP before 5.4.30 and 5.5.x before 5.5.14, does not properly validate sector-count data, which allows remote attackers to cause a denial of service (application crash) via a crafted CDF file. | https://nvd.nist.gov/vuln/detail/CVE-2014-3480 |
1,287 | file | 36fadd29849b8087af9f4586f89dbf74ea45be67 | https://github.com/file/file | https://github.com/file/file/commit/36fadd29849b8087af9f4586f89dbf74ea45be67 | Use the proper sector size when checking stream offsets (Francisco Alonso and
Jan Kaluza at RedHat) | 1 | cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h,
const void *p, size_t tail, int line)
{
const char *b = (const char *)sst->sst_tab;
const char *e = ((const char *)p) + tail;
(void)&line;
if (e >= b && (size_t)(e - b) <= CDF_SEC_SIZE(h) * sst->sst_len)
return 0;
DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u"
" > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %"
SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b),
CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len));
errno = EFTYPE;
return -1;
}
| 126,331,237,157,471,830,000,000,000,000,000,000,000 | cdf.c | 252,409,831,567,122,700,000,000,000,000,000,000,000 | [
"CWE-189"
] | CVE-2014-3479 | The cdf_check_stream_offset function in cdf.c in file before 5.19, as used in the Fileinfo component in PHP before 5.4.30 and 5.5.x before 5.5.14, relies on incorrect sector-size data, which allows remote attackers to cause a denial of service (application crash) via a crafted stream offset in a CDF file. | https://nvd.nist.gov/vuln/detail/CVE-2014-3479 |
1,288 | linux | 844817e47eef14141cf59b8d5ac08dd11c0a9189 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/844817e47eef14141cf59b8d5ac08dd11c0a9189 | HID: picolcd: sanity check report size in raw_event() callback
The report passed to us from transport driver could potentially be
arbitrarily large, therefore we better sanity-check it so that raw_data
that we hold in picolcd_pending structure are always kept within proper
bounds.
Cc: stable@vger.kernel.org
Reported-by: Steven Vittitoe <scvitti@google.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static int picolcd_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *raw_data, int size)
{
struct picolcd_data *data = hid_get_drvdata(hdev);
unsigned long flags;
int ret = 0;
if (!data)
return 1;
if (report->id == REPORT_KEY_STATE) {
if (data->input_keys)
ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
} else if (report->id == REPORT_IR_DATA) {
ret = picolcd_raw_cir(data, report, raw_data+1, size-1);
} else {
spin_lock_irqsave(&data->lock, flags);
/*
* We let the caller of picolcd_send_and_wait() check if the
* report we got is one of the expected ones or not.
*/
if (data->pending) {
memcpy(data->pending->raw_data, raw_data+1, size-1);
data->pending->raw_size = size-1;
data->pending->in_report = report;
complete(&data->pending->ready);
}
spin_unlock_irqrestore(&data->lock, flags);
}
picolcd_debug_raw_event(data, hdev, report, raw_data, size);
return 1;
}
| 336,846,250,312,050,030,000,000,000,000,000,000,000 | hid-picolcd_core.c | 186,782,659,027,372,800,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3186 | Buffer overflow in the picolcd_raw_event function in devices/hid/hid-picolcd_core.c in the PicoLCD HID device driver in the Linux kernel through 3.16.3, as used in Android on Nexus 7 devices, allows physically proximate attackers to cause a denial of service (system crash) or possibly execute arbitrary code via a crafted device that sends a large report. | https://nvd.nist.gov/vuln/detail/CVE-2014-3186 |
1,289 | linux | 6817ae225cd650fb1c3295d769298c38b1eba818 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/6817ae225cd650fb1c3295d769298c38b1eba818 | USB: whiteheat: Added bounds checking for bulk command response
This patch fixes a potential security issue in the whiteheat USB driver
which might allow a local attacker to cause kernel memory corrpution. This
is due to an unchecked memcpy into a fixed size buffer (of 64 bytes). On
EHCI and XHCI busses it's possible to craft responses greater than 64
bytes leading a buffer overflow.
Signed-off-by: James Forshaw <forshaw@google.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 1 | static void command_port_read_callback(struct urb *urb)
{
struct usb_serial_port *command_port = urb->context;
struct whiteheat_command_private *command_info;
int status = urb->status;
unsigned char *data = urb->transfer_buffer;
int result;
command_info = usb_get_serial_port_data(command_port);
if (!command_info) {
dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
return;
}
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
if (status != -ENOENT)
command_info->command_finished = WHITEHEAT_CMD_FAILURE;
wake_up(&command_info->wait_command);
return;
}
usb_serial_debug_data(&command_port->dev, __func__, urb->actual_length, data);
if (data[0] == WHITEHEAT_CMD_COMPLETE) {
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
wake_up(&command_info->wait_command);
} else if (data[0] == WHITEHEAT_CMD_FAILURE) {
command_info->command_finished = WHITEHEAT_CMD_FAILURE;
wake_up(&command_info->wait_command);
} else if (data[0] == WHITEHEAT_EVENT) {
/* These are unsolicited reports from the firmware, hence no
waiting command to wakeup */
dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
} else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
memcpy(command_info->result_buffer, &data[1],
urb->actual_length - 1);
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
wake_up(&command_info->wait_command);
} else
dev_dbg(&urb->dev->dev, "%s - bad reply from firmware\n", __func__);
/* Continue trying to always read */
result = usb_submit_urb(command_port->read_urb, GFP_ATOMIC);
if (result)
dev_dbg(&urb->dev->dev, "%s - failed resubmitting read urb, error %d\n",
__func__, result);
}
| 11,995,144,287,059,040,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2014-3185 | Multiple buffer overflows in the command_port_read_callback function in drivers/usb/serial/whiteheat.c in the Whiteheat USB Serial Driver in the Linux kernel before 3.16.2 allow physically proximate attackers to execute arbitrary code or cause a denial of service (memory corruption and system crash) via a crafted device that provides a large amount of (1) EHCI or (2) XHCI data associated with a bulk response. | https://nvd.nist.gov/vuln/detail/CVE-2014-3185 |
1,290 | linux | 4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | HID: fix a couple of off-by-ones
There are a few very theoretical off-by-one bugs in report descriptor size
checking when performing a pre-parsing fixup. Fix those.
Cc: stable@vger.kernel.org
Reported-by: Ben Hawkes <hawkes@google.com>
Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
rdesc[11] = rdesc[16] = 0xff;
rdesc[12] = rdesc[17] = 0x03;
}
return rdesc;
}
| 45,538,586,871,323,170,000,000,000,000,000,000,000 | hid-cherry.c | 187,455,207,934,208,830,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3184 | The report_fixup functions in the HID subsystem in the Linux kernel before 3.16.2 might allow physically proximate attackers to cause a denial of service (out-of-bounds write) via a crafted device that provides a small report descriptor, related to (1) drivers/hid/hid-cherry.c, (2) drivers/hid/hid-kye.c, (3) drivers/hid/hid-lg.c, (4) drivers/hid/hid-monterey.c, (5) drivers/hid/hid-petalynx.c, and (6) drivers/hid/hid-sunplus.c. | https://nvd.nist.gov/vuln/detail/CVE-2014-3184 |
1,291 | linux | 4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | HID: fix a couple of off-by-ones
There are a few very theoretical off-by-one bugs in report descriptor size
checking when performing a pre-parsing fixup. Fix those.
Cc: stable@vger.kernel.org
Reported-by: Ben Hawkes <hawkes@google.com>
Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
switch (hdev->product) {
case USB_DEVICE_ID_KYE_ERGO_525V:
/* the fixups that need to be done:
* - change led usage page to button for extra buttons
* - report size 8 count 1 must be size 1 count 8 for button
* bitfield
* - change the button usage range to 4-7 for the extra
* buttons
*/
if (*rsize >= 74 &&
rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
rdesc[71] == 0x75 && rdesc[72] == 0x08 &&
rdesc[73] == 0x95 && rdesc[74] == 0x01) {
hid_info(hdev,
"fixing up Kye/Genius Ergo Mouse "
"report descriptor\n");
rdesc[62] = 0x09;
rdesc[64] = 0x04;
rdesc[66] = 0x07;
rdesc[72] = 0x01;
rdesc[74] = 0x08;
}
break;
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
if (*rsize == EASYPEN_I405X_RDESC_ORIG_SIZE) {
rdesc = easypen_i405x_rdesc_fixed;
*rsize = sizeof(easypen_i405x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
rdesc = mousepen_i608x_rdesc_fixed;
*rsize = sizeof(mousepen_i608x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
if (*rsize == EASYPEN_M610X_RDESC_ORIG_SIZE) {
rdesc = easypen_m610x_rdesc_fixed;
*rsize = sizeof(easypen_m610x_rdesc_fixed);
}
break;
case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
"Genius Gila Gaming Mouse");
break;
case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
"Genius Gx Imperator Keyboard");
break;
case USB_DEVICE_ID_GENIUS_MANTICORE:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
"Genius Manticore Keyboard");
break;
}
return rdesc;
}
| 61,701,421,720,071,810,000,000,000,000,000,000,000 | hid-kye.c | 236,592,989,497,276,160,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3184 | The report_fixup functions in the HID subsystem in the Linux kernel before 3.16.2 might allow physically proximate attackers to cause a denial of service (out-of-bounds write) via a crafted device that provides a small report descriptor, related to (1) drivers/hid/hid-cherry.c, (2) drivers/hid/hid-kye.c, (3) drivers/hid/hid-lg.c, (4) drivers/hid/hid-monterey.c, (5) drivers/hid/hid-petalynx.c, and (6) drivers/hid/hid-sunplus.c. | https://nvd.nist.gov/vuln/detail/CVE-2014-3184 |
1,292 | linux | 4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | HID: fix a couple of off-by-ones
There are a few very theoretical off-by-one bugs in report descriptor size
checking when performing a pre-parsing fixup. Fix those.
Cc: stable@vger.kernel.org
Reported-by: Ben Hawkes <hawkes@google.com>
Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
struct usb_device_descriptor *udesc;
__u16 bcdDevice, rev_maj, rev_min;
if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
rdesc[84] == 0x8c && rdesc[85] == 0x02) {
hid_info(hdev,
"fixing up Logitech keyboard report descriptor\n");
rdesc[84] = rdesc[89] = 0x4d;
rdesc[85] = rdesc[90] = 0x10;
}
if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
rdesc[49] == 0x81 && rdesc[50] == 0x06) {
hid_info(hdev,
"fixing up rel/abs in Logitech report descriptor\n");
rdesc[33] = rdesc[50] = 0x02;
}
switch (hdev->product) {
/* Several wheels report as this id when operating in emulation mode. */
case USB_DEVICE_ID_LOGITECH_WHEEL:
udesc = &(hid_to_usb_dev(hdev)->descriptor);
if (!udesc) {
hid_err(hdev, "NULL USB device descriptor\n");
break;
}
bcdDevice = le16_to_cpu(udesc->bcdDevice);
rev_maj = bcdDevice >> 8;
rev_min = bcdDevice & 0xff;
/* Update the report descriptor for only the Driving Force wheel */
if (rev_maj == 1 && rev_min == 2 &&
*rsize == DF_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Driving Force report descriptor\n");
rdesc = df_rdesc_fixed;
*rsize = sizeof(df_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
if (*rsize == MOMO_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Momo Force (Red) report descriptor\n");
rdesc = momo_rdesc_fixed;
*rsize = sizeof(momo_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Momo Racing Force (Black) report descriptor\n");
rdesc = momo2_rdesc_fixed;
*rsize = sizeof(momo2_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
if (*rsize == FV_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Formula Vibration report descriptor\n");
rdesc = fv_rdesc_fixed;
*rsize = sizeof(fv_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
if (*rsize == DFP_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Driving Force Pro report descriptor\n");
rdesc = dfp_rdesc_fixed;
*rsize = sizeof(dfp_rdesc_fixed);
}
break;
case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
if (*rsize >= 101 && rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
rdesc[47] == 0x05 && rdesc[48] == 0x09) {
hid_info(hdev, "fixing up Logitech Speed Force Wireless report descriptor\n");
rdesc[41] = 0x05;
rdesc[42] = 0x09;
rdesc[47] = 0x95;
rdesc[48] = 0x0B;
}
break;
}
return rdesc;
}
| 279,704,939,167,865,600,000,000,000,000,000,000,000 | hid-lg.c | 90,537,780,722,919,720,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3184 | The report_fixup functions in the HID subsystem in the Linux kernel before 3.16.2 might allow physically proximate attackers to cause a denial of service (out-of-bounds write) via a crafted device that provides a small report descriptor, related to (1) drivers/hid/hid-cherry.c, (2) drivers/hid/hid-kye.c, (3) drivers/hid/hid-lg.c, (4) drivers/hid/hid-monterey.c, (5) drivers/hid/hid-petalynx.c, and (6) drivers/hid/hid-sunplus.c. | https://nvd.nist.gov/vuln/detail/CVE-2014-3184 |
1,293 | linux | 4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | HID: fix a couple of off-by-ones
There are a few very theoretical off-by-one bugs in report descriptor size
checking when performing a pre-parsing fixup. Fix those.
Cc: stable@vger.kernel.org
Reported-by: Ben Hawkes <hawkes@google.com>
Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
rdesc[30] = 0x0c;
}
return rdesc;
}
| 85,432,680,744,687,480,000,000,000,000,000,000,000 | hid-monterey.c | 41,416,622,423,334,174,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3184 | The report_fixup functions in the HID subsystem in the Linux kernel before 3.16.2 might allow physically proximate attackers to cause a denial of service (out-of-bounds write) via a crafted device that provides a small report descriptor, related to (1) drivers/hid/hid-cherry.c, (2) drivers/hid/hid-kye.c, (3) drivers/hid/hid-lg.c, (4) drivers/hid/hid-monterey.c, (5) drivers/hid/hid-petalynx.c, and (6) drivers/hid/hid-sunplus.c. | https://nvd.nist.gov/vuln/detail/CVE-2014-3184 |
1,294 | linux | 4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | HID: fix a couple of off-by-ones
There are a few very theoretical off-by-one bugs in report descriptor size
checking when performing a pre-parsing fixup. Fix those.
Cc: stable@vger.kernel.org
Reported-by: Ben Hawkes <hawkes@google.com>
Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
rdesc[60] = 0xfa;
rdesc[40] = 0xfa;
}
return rdesc;
}
| 284,095,831,257,846,900,000,000,000,000,000,000,000 | hid-petalynx.c | 174,245,996,495,446,770,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3184 | The report_fixup functions in the HID subsystem in the Linux kernel before 3.16.2 might allow physically proximate attackers to cause a denial of service (out-of-bounds write) via a crafted device that provides a small report descriptor, related to (1) drivers/hid/hid-cherry.c, (2) drivers/hid/hid-kye.c, (3) drivers/hid/hid-lg.c, (4) drivers/hid/hid-monterey.c, (5) drivers/hid/hid-petalynx.c, and (6) drivers/hid/hid-sunplus.c. | https://nvd.nist.gov/vuln/detail/CVE-2014-3184 |
1,295 | linux | 4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | HID: fix a couple of off-by-ones
There are a few very theoretical off-by-one bugs in report descriptor size
checking when performing a pre-parsing fixup. Fix those.
Cc: stable@vger.kernel.org
Reported-by: Ben Hawkes <hawkes@google.com>
Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
rdesc[106] == 0x03) {
hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
rdesc[105] = rdesc[110] = 0x03;
rdesc[106] = rdesc[111] = 0x21;
}
return rdesc;
}
| 187,317,300,382,672,400,000,000,000,000,000,000,000 | hid-sunplus.c | 175,340,908,370,041,700,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3184 | The report_fixup functions in the HID subsystem in the Linux kernel before 3.16.2 might allow physically proximate attackers to cause a denial of service (out-of-bounds write) via a crafted device that provides a small report descriptor, related to (1) drivers/hid/hid-cherry.c, (2) drivers/hid/hid-kye.c, (3) drivers/hid/hid-lg.c, (4) drivers/hid/hid-monterey.c, (5) drivers/hid/hid-petalynx.c, and (6) drivers/hid/hid-sunplus.c. | https://nvd.nist.gov/vuln/detail/CVE-2014-3184 |
1,296 | linux | 51217e69697fba92a06e07e16f55c9a52d8e8945 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/51217e69697fba92a06e07e16f55c9a52d8e8945 | HID: logitech: fix bounds checking on LED report size
The check on report size for REPORT_TYPE_LEDS in logi_dj_ll_raw_request()
is wrong; the current check doesn't make any sense -- the report allocated
by HID core in hid_hw_raw_request() can be much larger than
DJREPORT_SHORT_LENGTH, and currently logi_dj_ll_raw_request() doesn't
handle this properly at all.
Fix the check by actually trimming down the report size properly if it is
too large.
Cc: stable@vger.kernel.org
Reported-by: Ben Hawkes <hawkes@google.com>
Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static int logi_dj_ll_raw_request(struct hid_device *hid,
unsigned char reportnum, __u8 *buf,
size_t count, unsigned char report_type,
int reqtype)
{
struct dj_device *djdev = hid->driver_data;
struct dj_receiver_dev *djrcv_dev = djdev->dj_receiver_dev;
u8 *out_buf;
int ret;
if (buf[0] != REPORT_TYPE_LEDS)
return -EINVAL;
out_buf = kzalloc(DJREPORT_SHORT_LENGTH, GFP_ATOMIC);
if (!out_buf)
return -ENOMEM;
if (count < DJREPORT_SHORT_LENGTH - 2)
count = DJREPORT_SHORT_LENGTH - 2;
out_buf[0] = REPORT_ID_DJ_SHORT;
out_buf[1] = djdev->device_index;
memcpy(out_buf + 2, buf, count);
ret = hid_hw_raw_request(djrcv_dev->hdev, out_buf[0], out_buf,
DJREPORT_SHORT_LENGTH, report_type, reqtype);
kfree(out_buf);
return ret;
}
| 256,114,427,603,892,440,000,000,000,000,000,000,000 | hid-logitech-dj.c | 259,749,799,322,006,870,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3183 | Heap-based buffer overflow in the logi_dj_ll_raw_request function in drivers/hid/hid-logitech-dj.c in the Linux kernel before 3.16.2 allows physically proximate attackers to cause a denial of service (system crash) or possibly execute arbitrary code via a crafted device that specifies a large report size for an LED report. | https://nvd.nist.gov/vuln/detail/CVE-2014-3183 |
1,297 | linux | ad3e14d7c5268c2e24477c6ef54bbdf88add5d36 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/ad3e14d7c5268c2e24477c6ef54bbdf88add5d36 | HID: logitech: perform bounds checking on device_id early enough
device_index is a char type and the size of paired_dj_deivces is 7
elements, therefore proper bounds checking has to be applied to
device_index before it is used.
We are currently performing the bounds checking in
logi_dj_recv_add_djhid_device(), which is too late, as malicious device
could send REPORT_TYPE_NOTIF_DEVICE_UNPAIRED early enough and trigger the
problem in one of the report forwarding functions called from
logi_dj_raw_event().
Fix this by performing the check at the earliest possible ocasion in
logi_dj_raw_event().
Cc: stable@vger.kernel.org
Reported-by: Ben Hawkes <hawkes@google.com>
Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static int logi_dj_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data,
int size)
{
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
struct dj_report *dj_report = (struct dj_report *) data;
unsigned long flags;
bool report_processed = false;
dbg_hid("%s, size:%d\n", __func__, size);
/* Here we receive all data coming from iface 2, there are 4 cases:
*
* 1) Data should continue its normal processing i.e. data does not
* come from the DJ collection, in which case we do nothing and
* return 0, so hid-core can continue normal processing (will forward
* to associated hidraw device)
*
* 2) Data is from DJ collection, and is intended for this driver i. e.
* data contains arrival, departure, etc notifications, in which case
* we queue them for delayed processing by the work queue. We return 1
* to hid-core as no further processing is required from it.
*
* 3) Data is from DJ collection, and informs a connection change,
* if the change means rf link loss, then we must send a null report
* to the upper layer to discard potentially pressed keys that may be
* repeated forever by the input layer. Return 1 to hid-core as no
* further processing is required.
*
* 4) Data is from DJ collection and is an actual input event from
* a paired DJ device in which case we forward it to the correct hid
* device (via hid_input_report() ) and return 1 so hid-core does not do
* anything else with it.
*/
spin_lock_irqsave(&djrcv_dev->lock, flags);
if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
switch (dj_report->report_type) {
case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
logi_dj_recv_queue_notification(djrcv_dev, dj_report);
break;
case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
STATUS_LINKLOSS) {
logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
}
break;
default:
logi_dj_recv_forward_report(djrcv_dev, dj_report);
}
report_processed = true;
}
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
return report_processed;
}
| 260,936,126,132,763,400,000,000,000,000,000,000,000 | hid-logitech-dj.c | 139,550,206,137,274,650,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3182 | Array index error in the logi_dj_raw_event function in drivers/hid/hid-logitech-dj.c in the Linux kernel before 3.16.2 allows physically proximate attackers to execute arbitrary code or cause a denial of service (invalid kfree) via a crafted device that provides a malformed REPORT_TYPE_NOTIF_DEVICE_UNPAIRED value. | https://nvd.nist.gov/vuln/detail/CVE-2014-3182 |
1,299 | linux | c54def7bd64d7c0b6993336abcffb8444795bf38 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/c54def7bd64d7c0b6993336abcffb8444795bf38 | HID: magicmouse: sanity check report size in raw_event() callback
The report passed to us from transport driver could potentially be
arbitrarily large, therefore we better sanity-check it so that
magicmouse_emit_touch() gets only valid values of raw_id.
Cc: stable@vger.kernel.org
Reported-by: Steven Vittitoe <scvitti@google.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static int magicmouse_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
struct input_dev *input = msc->input;
int x = 0, y = 0, ii, clicks = 0, npoints;
switch (data[0]) {
case TRACKPAD_REPORT_ID:
/* Expect four bytes of prefix, and N*9 bytes of touch data. */
if (size < 4 || ((size - 4) % 9) != 0)
return 0;
npoints = (size - 4) / 9;
msc->ntouches = 0;
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
clicks = data[1];
/* The following bits provide a device specific timestamp. They
* are unused here.
*
* ts = data[1] >> 6 | data[2] << 2 | data[3] << 10;
*/
break;
case MOUSE_REPORT_ID:
/* Expect six bytes of prefix, and N*8 bytes of touch data. */
if (size < 6 || ((size - 6) % 8) != 0)
return 0;
npoints = (size - 6) / 8;
msc->ntouches = 0;
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
/* When emulating three-button mode, it is important
* to have the current touch information before
* generating a click event.
*/
x = (int)(((data[3] & 0x0c) << 28) | (data[1] << 22)) >> 22;
y = (int)(((data[3] & 0x30) << 26) | (data[2] << 22)) >> 22;
clicks = data[3];
/* The following bits provide a device specific timestamp. They
* are unused here.
*
* ts = data[3] >> 6 | data[4] << 2 | data[5] << 10;
*/
break;
case DOUBLE_REPORT_ID:
/* Sometimes the trackpad sends two touch reports in one
* packet.
*/
magicmouse_raw_event(hdev, report, data + 2, data[1]);
magicmouse_raw_event(hdev, report, data + 2 + data[1],
size - 2 - data[1]);
break;
default:
return 0;
}
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_report_key(input, BTN_MOUSE, clicks & 1);
input_mt_report_pointer_emulation(input, true);
}
input_sync(input);
return 1;
}
| 281,911,557,577,455,930,000,000,000,000,000,000,000 | hid-magicmouse.c | 134,181,979,015,041,700,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3181 | Multiple stack-based buffer overflows in the magicmouse_raw_event function in drivers/hid/hid-magicmouse.c in the Magic Mouse HID driver in the Linux kernel through 3.16.3 allow physically proximate attackers to cause a denial of service (system crash) or possibly execute arbitrary code via a crafted device that provides a large amount of (1) EHCI or (2) XHCI data associated with an event. | https://nvd.nist.gov/vuln/detail/CVE-2014-3181 |
1,300 | ppp | 7658e8257183f062dc01f87969c140707c7e52cb | https://github.com/paulusmack/ppp | https://github.com/paulusmack/ppp/commit/7658e8257183f062dc01f87969c140707c7e52cb | pppd: Eliminate potential integer overflow in option parsing
When we are reading in a word from an options file, we maintain a count
of the length we have seen so far in 'len', which is an int. When len
exceeds MAXWORDLEN - 1 (i.e. 1023) we cease storing characters in the
buffer but we continue to increment len. Since len is an int, it will
wrap around to -2147483648 after it reaches 2147483647. At that point
our test of (len < MAXWORDLEN-1) will succeed and we will start writing
characters to memory again.
This may enable an attacker to overwrite the heap and thereby corrupt
security-relevant variables. For this reason it has been assigned a
CVE identifier, CVE-2014-3158.
This fixes the bug by ceasing to increment len once it reaches MAXWORDLEN.
Reported-by: Lee Campbell <leecam@google.com>
Signed-off-by: Paul Mackerras <paulus@samba.org> | 1 | getword(f, word, newlinep, filename)
FILE *f;
char *word;
int *newlinep;
char *filename;
{
int c, len, escape;
int quoted, comment;
int value, digit, got, n;
#define isoctal(c) ((c) >= '0' && (c) < '8')
*newlinep = 0;
len = 0;
escape = 0;
comment = 0;
quoted = 0;
/*
* First skip white-space and comments.
*/
for (;;) {
c = getc(f);
if (c == EOF)
break;
/*
* A newline means the end of a comment; backslash-newline
* is ignored. Note that we cannot have escape && comment.
*/
if (c == '\n') {
if (!escape) {
*newlinep = 1;
comment = 0;
} else
escape = 0;
continue;
}
/*
* Ignore characters other than newline in a comment.
*/
if (comment)
continue;
/*
* If this character is escaped, we have a word start.
*/
if (escape)
break;
/*
* If this is the escape character, look at the next character.
*/
if (c == '\\') {
escape = 1;
continue;
}
/*
* If this is the start of a comment, ignore the rest of the line.
*/
if (c == '#') {
comment = 1;
continue;
}
/*
* A non-whitespace character is the start of a word.
*/
if (!isspace(c))
break;
}
/*
* Process characters until the end of the word.
*/
while (c != EOF) {
if (escape) {
/*
* This character is escaped: backslash-newline is ignored,
* various other characters indicate particular values
* as for C backslash-escapes.
*/
escape = 0;
if (c == '\n') {
c = getc(f);
continue;
}
got = 0;
switch (c) {
case 'a':
value = '\a';
break;
case 'b':
value = '\b';
break;
case 'f':
value = '\f';
break;
case 'n':
value = '\n';
break;
case 'r':
value = '\r';
break;
case 's':
value = ' ';
break;
case 't':
value = '\t';
break;
default:
if (isoctal(c)) {
/*
* \ddd octal sequence
*/
value = 0;
for (n = 0; n < 3 && isoctal(c); ++n) {
value = (value << 3) + (c & 07);
c = getc(f);
}
got = 1;
break;
}
if (c == 'x') {
/*
* \x<hex_string> sequence
*/
value = 0;
c = getc(f);
for (n = 0; n < 2 && isxdigit(c); ++n) {
digit = toupper(c) - '0';
if (digit > 10)
digit += '0' + 10 - 'A';
value = (value << 4) + digit;
c = getc (f);
}
got = 1;
break;
}
/*
* Otherwise the character stands for itself.
*/
value = c;
break;
}
/*
* Store the resulting character for the escape sequence.
*/
if (len < MAXWORDLEN-1)
word[len] = value;
++len;
if (!got)
c = getc(f);
continue;
}
/*
* Backslash starts a new escape sequence.
*/
if (c == '\\') {
escape = 1;
c = getc(f);
continue;
}
/*
* Not escaped: check for the start or end of a quoted
* section and see if we've reached the end of the word.
*/
if (quoted) {
if (c == quoted) {
quoted = 0;
c = getc(f);
continue;
}
} else if (c == '"' || c == '\'') {
quoted = c;
c = getc(f);
continue;
} else if (isspace(c) || c == '#') {
ungetc (c, f);
break;
}
/*
* An ordinary character: store it in the word and get another.
*/
if (len < MAXWORDLEN-1)
word[len] = c;
++len;
c = getc(f);
}
/*
* End of the word: check for errors.
*/
if (c == EOF) {
if (ferror(f)) {
if (errno == 0)
errno = EIO;
option_error("Error reading %s: %m", filename);
die(1);
}
/*
* If len is zero, then we didn't find a word before the
* end of the file.
*/
if (len == 0)
return 0;
if (quoted)
option_error("warning: quoted word runs to end of file (%.20s...)",
filename, word);
}
/*
* Warn if the word was too long, and append a terminating null.
*/
if (len >= MAXWORDLEN) {
option_error("warning: word in file %s too long (%.20s...)",
filename, word);
len = MAXWORDLEN - 1;
}
word[len] = 0;
return 1;
#undef isoctal
}
| 136,171,222,581,299,580,000,000,000,000,000,000,000 | options.c | 280,913,819,064,269,670,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-3158 | Integer overflow in the getword function in options.c in pppd in Paul's PPP Package (ppp) before 2.4.7 allows attackers to "access privileged options" via a long word in an options file, which triggers a heap-based buffer overflow that "[corrupts] security-relevant variables." | https://nvd.nist.gov/vuln/detail/CVE-2014-3158 |
1,301 | linux | e9c243a5a6de0be8e584c604d353412584b592f8 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/e9c243a5a6de0be8e584c604d353412584b592f8 | futex-prevent-requeue-pi-on-same-futex.patch futex: Forbid uaddr == uaddr2 in futex_requeue(..., requeue_pi=1)
If uaddr == uaddr2, then we have broken the rule of only requeueing from
a non-pi futex to a pi futex with this call. If we attempt this, then
dangling pointers may be left for rt_waiter resulting in an exploitable
condition.
This change brings futex_requeue() in line with futex_wait_requeue_pi()
which performs the same check as per commit 6f7b0a2a5c0f ("futex: Forbid
uaddr == uaddr2 in futex_wait_requeue_pi()")
[ tglx: Compare the resulting keys as well, as uaddrs might be
different depending on the mapping ]
Fixes CVE-2014-3153.
Reported-by: Pinkie Pie
Signed-off-by: Will Drewry <wad@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: stable@vger.kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Darren Hart <dvhart@linux.intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | 1 | static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
u32 __user *uaddr2, int nr_wake, int nr_requeue,
u32 *cmpval, int requeue_pi)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
int drop_count = 0, task_count = 0, ret;
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
if (requeue_pi) {
/*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
*/
if (refill_pi_state_cache())
return -ENOMEM;
/*
* requeue_pi must wake as many tasks as it can, up to nr_wake
* + nr_requeue, since it acquires the rt_mutex prior to
* returning to userspace, so as to not leave the rt_mutex with
* waiters and no owner. However, second and third wake-ups
* cannot be predicted as they involve race conditions with the
* first wake and a fault while looking up the pi_state. Both
* pthread_cond_signal() and pthread_cond_broadcast() should
* use nr_wake=1.
*/
if (nr_wake != 1)
return -EINVAL;
}
retry:
if (pi_state != NULL) {
/*
* We will have to lookup the pi_state again, so free this one
* to keep the accounting correct.
*/
free_pi_state(pi_state);
pi_state = NULL;
}
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
hb_waiters_inc(hb2);
double_lock_hb(hb1, hb2);
if (likely(cmpval != NULL)) {
u32 curval;
ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
ret = get_user(curval, uaddr1);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
if (curval != *cmpval) {
ret = -EAGAIN;
goto out_unlock;
}
}
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
/*
* Attempt to acquire uaddr2 and wake the top waiter. If we
* intend to requeue waiters, force setting the FUTEX_WAITERS
* bit. We force this here where we are able to easily handle
* faults rather in the requeue loop below.
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
&key2, &pi_state, nr_requeue);
/*
* At this point the top_waiter has either taken uaddr2 or is
* waiting on it. If the former, then the pi_state will not
* exist yet, look it up one more time to ensure we have a
* reference to it. If the lock was taken, ret contains the
* vpid of the top waiter task.
*/
if (ret > 0) {
WARN_ON(pi_state);
drop_count++;
task_count++;
/*
* If we acquired the lock, then the user
* space value of uaddr2 should be vpid. It
* cannot be changed by the top waiter as it
* is blocked on hb2 lock if it tries to do
* so. If something fiddled with it behind our
* back the pi state lookup might unearth
* it. So we rather use the known value than
* rereading and handing potential crap to
* lookup_pi_state.
*/
ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL);
}
switch (ret) {
case 0:
break;
case -EFAULT:
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
case -EAGAIN:
/* The owner was exiting, try again. */
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
cond_resched();
goto retry;
default:
goto out_unlock;
}
}
plist_for_each_entry_safe(this, next, &hb1->chain, list) {
if (task_count - nr_wake >= nr_requeue)
break;
if (!match_futex(&this->key, &key1))
continue;
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
*
* We should never be requeueing a futex_q with a pi_state,
* which is awaiting a futex_unlock_pi().
*/
if ((requeue_pi && !this->rt_waiter) ||
(!requeue_pi && this->rt_waiter) ||
this->pi_state) {
ret = -EINVAL;
break;
}
/*
* Wake nr_wake waiters. For requeue_pi, if we acquired the
* lock, we already woke the top_waiter. If not, it will be
* woken by futex_unlock_pi().
*/
if (++task_count <= nr_wake && !requeue_pi) {
wake_futex(this);
continue;
}
/* Ensure we requeue to the expected futex for requeue_pi. */
if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
ret = -EINVAL;
break;
}
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
*/
if (requeue_pi) {
/* Prepare the waiter to take the rt_mutex. */
atomic_inc(&pi_state->refcount);
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
this->task, 1);
if (ret == 1) {
/* We got the lock. */
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
free_pi_state(pi_state);
goto out_unlock;
}
}
requeue_futex(this, hb1, hb2, &key2);
drop_count++;
}
out_unlock:
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
/*
* drop_futex_key_refs() must be called outside the spinlocks. During
* the requeue we moved futex_q's from the hash bucket at key1 to the
* one at key2 and updated their key pointer. We no longer need to
* hold the references to key1.
*/
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
if (pi_state != NULL)
free_pi_state(pi_state);
return ret ? ret : task_count;
}
| 17,926,925,629,239,018,000,000,000,000,000,000,000 | futex.c | 242,837,931,013,533,500,000,000,000,000,000,000,000 | [
"CWE-264"
] | CVE-2014-3153 | The futex_requeue function in kernel/futex.c in the Linux kernel through 3.14.5 does not ensure that calls have two different futex addresses, which allows local users to gain privileges via a crafted FUTEX_REQUEUE command that facilitates unsafe waiter modification. | https://nvd.nist.gov/vuln/detail/CVE-2014-3153 |
1,303 | linux | 05ab8f2647e4221cbdb3856dd7d32bd5407316b3 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/05ab8f2647e4221cbdb3856dd7d32bd5407316b3 | filter: prevent nla extensions to peek beyond the end of the message
The BPF_S_ANC_NLATTR and BPF_S_ANC_NLATTR_NEST extensions fail to check
for a minimal message length before testing the supplied offset to be
within the bounds of the message. This allows the subtraction of the nla
header to underflow and therefore -- as the data type is unsigned --
allowing far to big offset and length values for the search of the
netlink attribute.
The remainder calculation for the BPF_S_ANC_NLATTR_NEST extension is
also wrong. It has the minuend and subtrahend mixed up, therefore
calculates a huge length value, allowing to overrun the end of the
message while looking for the netlink attribute.
The following three BPF snippets will trigger the bugs when attached to
a UNIX datagram socket and parsing a message with length 1, 2 or 3.
,-[ PoC for missing size check in BPF_S_ANC_NLATTR ]--
| ld #0x87654321
| ldx #42
| ld #nla
| ret a
`---
,-[ PoC for the same bug in BPF_S_ANC_NLATTR_NEST ]--
| ld #0x87654321
| ldx #42
| ld #nlan
| ret a
`---
,-[ PoC for wrong remainder calculation in BPF_S_ANC_NLATTR_NEST ]--
| ; (needs a fake netlink header at offset 0)
| ld #0
| ldx #42
| ld #nlan
| ret a
`---
Fix the first issue by ensuring the message length fulfills the minimal
size constrains of a nla header. Fix the second bug by getting the math
for the remainder calculation right.
Fixes: 4738c1db15 ("[SKFILTER]: Add SKF_ADF_NLATTR instruction")
Fixes: d214c7537b ("filter: add SKF_AD_NLATTR_NEST to look for nested..")
Cc: Patrick McHardy <kaber@trash.net>
Cc: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Acked-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = (struct nlattr *) &skb->data[A];
if (nla->nla_len > A - skb->len)
return 0;
nla = nla_find_nested(nla, X);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
| 282,497,948,287,924,030,000,000,000,000,000,000,000 | filter.c | 228,552,301,451,129,950,000,000,000,000,000,000,000 | [
"CWE-189"
] | CVE-2014-3144 | The (1) BPF_S_ANC_NLATTR and (2) BPF_S_ANC_NLATTR_NEST extension implementations in the sk_run_filter function in net/core/filter.c in the Linux kernel through 3.14.3 do not check whether a certain length value is sufficiently large, which allows local users to cause a denial of service (integer underflow and system crash) via crafted BPF instructions. NOTE: the affected code was moved to the __skb_get_nlattr and __skb_get_nlattr_nest functions before the vulnerability was announced. | https://nvd.nist.gov/vuln/detail/CVE-2014-3144 |
1,305 | linux | 57e68e9cd65b4b8eb4045a1e0d0746458502554c | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/57e68e9cd65b4b8eb4045a1e0d0746458502554c | mm: try_to_unmap_cluster() should lock_page() before mlocking
A BUG_ON(!PageLocked) was triggered in mlock_vma_page() by Sasha Levin
fuzzing with trinity. The call site try_to_unmap_cluster() does not lock
the pages other than its check_page parameter (which is already locked).
The BUG_ON in mlock_vma_page() is not documented and its purpose is
somewhat unclear, but apparently it serializes against page migration,
which could otherwise fail to transfer the PG_mlocked flag. This would
not be fatal, as the page would be eventually encountered again, but
NR_MLOCK accounting would become distorted nevertheless. This patch adds
a comment to the BUG_ON in mlock_vma_page() and munlock_vma_page() to that
effect.
The call site try_to_unmap_cluster() is fixed so that for page !=
check_page, trylock_page() is attempted (to avoid possible deadlocks as we
already have check_page locked) and mlock_vma_page() is performed only
upon success. If the page lock cannot be obtained, the page is left
without PG_mlocked, which is again not a problem in the whole unevictable
memory design.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Bob Liu <bob.liu@oracle.com>
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | 1 | static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
struct vm_area_struct *vma, struct page *check_page)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
struct page *page;
unsigned long address;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
unsigned long end;
int ret = SWAP_AGAIN;
int locked_vma = 0;
address = (vma->vm_start + cursor) & CLUSTER_MASK;
end = address + CLUSTER_SIZE;
if (address < vma->vm_start)
address = vma->vm_start;
if (end > vma->vm_end)
end = vma->vm_end;
pmd = mm_find_pmd(mm, address);
if (!pmd)
return ret;
mmun_start = address;
mmun_end = end;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
* If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
* keep the sem while scanning the cluster for mlocking pages.
*/
if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
locked_vma = (vma->vm_flags & VM_LOCKED);
if (!locked_vma)
up_read(&vma->vm_mm->mmap_sem); /* don't need it */
}
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
for (; address < end; pte++, address += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
page = vm_normal_page(vma, address, *pte);
BUG_ON(!page || PageAnon(page));
if (locked_vma) {
mlock_vma_page(page); /* no-op if already mlocked */
if (page == check_page)
ret = SWAP_MLOCK;
continue; /* don't unmap */
}
if (ptep_clear_flush_young_notify(vma, address, pte))
continue;
/* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pte));
pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
if (page->index != linear_page_index(vma, address)) {
pte_t ptfile = pgoff_to_pte(page->index);
if (pte_soft_dirty(pteval))
pte_file_mksoft_dirty(ptfile);
set_pte_at(mm, address, pte, ptfile);
}
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
set_page_dirty(page);
page_remove_rmap(page);
page_cache_release(page);
dec_mm_counter(mm, MM_FILEPAGES);
(*mapcount)--;
}
pte_unmap_unlock(pte - 1, ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (locked_vma)
up_read(&vma->vm_mm->mmap_sem);
return ret;
}
| 1,665,666,048,709,289,200,000,000,000,000,000,000 | rmap.c | 257,810,476,724,514,140,000,000,000,000,000,000,000 | [
"CWE-264"
] | CVE-2014-3122 | The try_to_unmap_cluster function in mm/rmap.c in the Linux kernel before 3.14.3 does not properly consider which pages must be locked, which allows local users to cause a denial of service (system crash) by triggering a memory-usage pattern that requires removal of page-table mappings. | https://nvd.nist.gov/vuln/detail/CVE-2014-3122 |
1,306 | linux | a03ffcf873fe0f2565386ca8ef832144c42e67fa | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/a03ffcf873fe0f2565386ca8ef832144c42e67fa | net: bpf_jit: fix an off-one bug in x86_64 cond jump target
x86 jump instruction size is 2 or 5 bytes (near/long jump), not 2 or 6
bytes.
In case a conditional jump is followed by a long jump, conditional jump
target is one byte past the start of target instruction.
Signed-off-by: Markus Kötter <nepenthesdev@gmail.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | void bpf_jit_compile(struct sk_filter *fp)
{
u8 temp[64];
u8 *prog;
unsigned int proglen, oldproglen = 0;
int ilen, i;
int t_offset, f_offset;
u8 t_op, f_op, seen = 0, pass;
u8 *image = NULL;
u8 *func;
int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
unsigned int cleanup_addr; /* epilogue code offset */
unsigned int *addrs;
const struct sock_filter *filter = fp->insns;
int flen = fp->len;
if (!bpf_jit_enable)
return;
addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;
/* Before first pass, make a rough estimation of addrs[]
* each bpf instruction is translated to less than 64 bytes
*/
for (proglen = 0, i = 0; i < flen; i++) {
proglen += 64;
addrs[i] = proglen;
}
cleanup_addr = proglen; /* epilogue address */
for (pass = 0; pass < 10; pass++) {
/* no prologue/epilogue for trivial filters (RET something) */
proglen = 0;
prog = temp;
if (seen) {
EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
/* note : must save %rbx in case bpf_error is hit */
if (seen & (SEEN_XREG | SEEN_DATAREF))
EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
if (seen & SEEN_XREG)
CLEAR_X(); /* make sure we dont leek kernel memory */
/*
* If this filter needs to access skb data,
* loads r9 and r8 with :
* r9 = skb->len - skb->data_len
* r8 = skb->data
*/
if (seen & SEEN_DATAREF) {
if (offsetof(struct sk_buff, len) <= 127)
/* mov off8(%rdi),%r9d */
EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
else {
/* mov off32(%rdi),%r9d */
EMIT3(0x44, 0x8b, 0x8f);
EMIT(offsetof(struct sk_buff, len), 4);
}
if (is_imm8(offsetof(struct sk_buff, data_len)))
/* sub off8(%rdi),%r9d */
EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
else {
EMIT3(0x44, 0x2b, 0x8f);
EMIT(offsetof(struct sk_buff, data_len), 4);
}
if (is_imm8(offsetof(struct sk_buff, data)))
/* mov off8(%rdi),%r8 */
EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
else {
/* mov off32(%rdi),%r8 */
EMIT3(0x4c, 0x8b, 0x87);
EMIT(offsetof(struct sk_buff, data), 4);
}
}
}
switch (filter[0].code) {
case BPF_S_RET_K:
case BPF_S_LD_W_LEN:
case BPF_S_ANC_PROTOCOL:
case BPF_S_ANC_IFINDEX:
case BPF_S_ANC_MARK:
case BPF_S_ANC_RXHASH:
case BPF_S_ANC_CPU:
case BPF_S_ANC_QUEUE:
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
/* first instruction sets A register (or is RET 'constant') */
break;
default:
/* make sure we dont leak kernel information to user */
CLEAR_A(); /* A = 0 */
}
for (i = 0; i < flen; i++) {
unsigned int K = filter[i].k;
switch (filter[i].code) {
case BPF_S_ALU_ADD_X: /* A += X; */
seen |= SEEN_XREG;
EMIT2(0x01, 0xd8); /* add %ebx,%eax */
break;
case BPF_S_ALU_ADD_K: /* A += K; */
if (!K)
break;
if (is_imm8(K))
EMIT3(0x83, 0xc0, K); /* add imm8,%eax */
else
EMIT1_off32(0x05, K); /* add imm32,%eax */
break;
case BPF_S_ALU_SUB_X: /* A -= X; */
seen |= SEEN_XREG;
EMIT2(0x29, 0xd8); /* sub %ebx,%eax */
break;
case BPF_S_ALU_SUB_K: /* A -= K */
if (!K)
break;
if (is_imm8(K))
EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
else
EMIT1_off32(0x2d, K); /* sub imm32,%eax */
break;
case BPF_S_ALU_MUL_X: /* A *= X; */
seen |= SEEN_XREG;
EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */
break;
case BPF_S_ALU_MUL_K: /* A *= K */
if (is_imm8(K))
EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
else {
EMIT2(0x69, 0xc0); /* imul imm32,%eax */
EMIT(K, 4);
}
break;
case BPF_S_ALU_DIV_X: /* A /= X; */
seen |= SEEN_XREG;
EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
if (pc_ret0 != -1)
EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4));
else {
EMIT_COND_JMP(X86_JNE, 2 + 5);
CLEAR_A();
EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
}
EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
break;
case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
EMIT(K, 4);
EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
break;
case BPF_S_ALU_AND_X:
seen |= SEEN_XREG;
EMIT2(0x21, 0xd8); /* and %ebx,%eax */
break;
case BPF_S_ALU_AND_K:
if (K >= 0xFFFFFF00) {
EMIT2(0x24, K & 0xFF); /* and imm8,%al */
} else if (K >= 0xFFFF0000) {
EMIT2(0x66, 0x25); /* and imm16,%ax */
EMIT2(K, 2);
} else {
EMIT1_off32(0x25, K); /* and imm32,%eax */
}
break;
case BPF_S_ALU_OR_X:
seen |= SEEN_XREG;
EMIT2(0x09, 0xd8); /* or %ebx,%eax */
break;
case BPF_S_ALU_OR_K:
if (is_imm8(K))
EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
else
EMIT1_off32(0x0d, K); /* or imm32,%eax */
break;
case BPF_S_ALU_LSH_X: /* A <<= X; */
seen |= SEEN_XREG;
EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
break;
case BPF_S_ALU_LSH_K:
if (K == 0)
break;
else if (K == 1)
EMIT2(0xd1, 0xe0); /* shl %eax */
else
EMIT3(0xc1, 0xe0, K);
break;
case BPF_S_ALU_RSH_X: /* A >>= X; */
seen |= SEEN_XREG;
EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */
break;
case BPF_S_ALU_RSH_K: /* A >>= K; */
if (K == 0)
break;
else if (K == 1)
EMIT2(0xd1, 0xe8); /* shr %eax */
else
EMIT3(0xc1, 0xe8, K);
break;
case BPF_S_ALU_NEG:
EMIT2(0xf7, 0xd8); /* neg %eax */
break;
case BPF_S_RET_K:
if (!K) {
if (pc_ret0 == -1)
pc_ret0 = i;
CLEAR_A();
} else {
EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
}
/* fallinto */
case BPF_S_RET_A:
if (seen) {
if (i != flen - 1) {
EMIT_JMP(cleanup_addr - addrs[i]);
break;
}
if (seen & SEEN_XREG)
EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
EMIT1(0xc9); /* leaveq */
}
EMIT1(0xc3); /* ret */
break;
case BPF_S_MISC_TAX: /* X = A */
seen |= SEEN_XREG;
EMIT2(0x89, 0xc3); /* mov %eax,%ebx */
break;
case BPF_S_MISC_TXA: /* A = X */
seen |= SEEN_XREG;
EMIT2(0x89, 0xd8); /* mov %ebx,%eax */
break;
case BPF_S_LD_IMM: /* A = K */
if (!K)
CLEAR_A();
else
EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
break;
case BPF_S_LDX_IMM: /* X = K */
seen |= SEEN_XREG;
if (!K)
CLEAR_X();
else
EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
break;
case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
seen |= SEEN_MEM;
EMIT3(0x8b, 0x45, 0xf0 - K*4);
break;
case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
seen |= SEEN_XREG | SEEN_MEM;
EMIT3(0x8b, 0x5d, 0xf0 - K*4);
break;
case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
seen |= SEEN_MEM;
EMIT3(0x89, 0x45, 0xf0 - K*4);
break;
case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
seen |= SEEN_XREG | SEEN_MEM;
EMIT3(0x89, 0x5d, 0xf0 - K*4);
break;
case BPF_S_LD_W_LEN: /* A = skb->len; */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
if (is_imm8(offsetof(struct sk_buff, len)))
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
else {
EMIT2(0x8b, 0x87);
EMIT(offsetof(struct sk_buff, len), 4);
}
break;
case BPF_S_LDX_W_LEN: /* X = skb->len; */
seen |= SEEN_XREG;
if (is_imm8(offsetof(struct sk_buff, len)))
/* mov off8(%rdi),%ebx */
EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
else {
EMIT2(0x8b, 0x9f);
EMIT(offsetof(struct sk_buff, len), 4);
}
break;
case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
if (is_imm8(offsetof(struct sk_buff, protocol))) {
/* movzwl off8(%rdi),%eax */
EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
} else {
EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
EMIT(offsetof(struct sk_buff, protocol), 4);
}
EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
break;
case BPF_S_ANC_IFINDEX:
if (is_imm8(offsetof(struct sk_buff, dev))) {
/* movq off8(%rdi),%rax */
EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
} else {
EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
EMIT(offsetof(struct sk_buff, dev), 4);
}
EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */
EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */
EMIT(offsetof(struct net_device, ifindex), 4);
break;
case BPF_S_ANC_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
if (is_imm8(offsetof(struct sk_buff, mark))) {
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
} else {
EMIT2(0x8b, 0x87);
EMIT(offsetof(struct sk_buff, mark), 4);
}
break;
case BPF_S_ANC_RXHASH:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
if (is_imm8(offsetof(struct sk_buff, rxhash))) {
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash));
} else {
EMIT2(0x8b, 0x87);
EMIT(offsetof(struct sk_buff, rxhash), 4);
}
break;
case BPF_S_ANC_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
/* movzwl off8(%rdi),%eax */
EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
} else {
EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
EMIT(offsetof(struct sk_buff, queue_mapping), 4);
}
break;
case BPF_S_ANC_CPU:
#ifdef CONFIG_SMP
EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
#else
CLEAR_A();
#endif
break;
case BPF_S_LD_W_ABS:
func = sk_load_word;
common_load: seen |= SEEN_DATAREF;
if ((int)K < 0)
goto out;
t_offset = func - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call */
break;
case BPF_S_LD_H_ABS:
func = sk_load_half;
goto common_load;
case BPF_S_LD_B_ABS:
func = sk_load_byte;
goto common_load;
case BPF_S_LDX_B_MSH:
if ((int)K < 0) {
if (pc_ret0 != -1) {
EMIT_JMP(addrs[pc_ret0] - addrs[i]);
break;
}
CLEAR_A();
EMIT_JMP(cleanup_addr - addrs[i]);
break;
}
seen |= SEEN_DATAREF | SEEN_XREG;
t_offset = sk_load_byte_msh - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
break;
case BPF_S_LD_W_IND:
func = sk_load_word_ind;
common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
t_offset = func - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */
break;
case BPF_S_LD_H_IND:
func = sk_load_half_ind;
goto common_load_ind;
case BPF_S_LD_B_IND:
func = sk_load_byte_ind;
goto common_load_ind;
case BPF_S_JMP_JA:
t_offset = addrs[i + K] - addrs[i];
EMIT_JMP(t_offset);
break;
COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
t_offset = addrs[i + filter[i].jt] - addrs[i];
/* same targets, can avoid doing the test :) */
if (filter[i].jt == filter[i].jf) {
EMIT_JMP(t_offset);
break;
}
switch (filter[i].code) {
case BPF_S_JMP_JGT_X:
case BPF_S_JMP_JGE_X:
case BPF_S_JMP_JEQ_X:
seen |= SEEN_XREG;
EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
break;
case BPF_S_JMP_JSET_X:
seen |= SEEN_XREG;
EMIT2(0x85, 0xd8); /* test %ebx,%eax */
break;
case BPF_S_JMP_JEQ_K:
if (K == 0) {
EMIT2(0x85, 0xc0); /* test %eax,%eax */
break;
}
case BPF_S_JMP_JGT_K:
case BPF_S_JMP_JGE_K:
if (K <= 127)
EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
else
EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
break;
case BPF_S_JMP_JSET_K:
if (K <= 0xFF)
EMIT2(0xa8, K); /* test imm8,%al */
else if (!(K & 0xFFFF00FF))
EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
else if (K <= 0xFFFF) {
EMIT2(0x66, 0xa9); /* test imm16,%ax */
EMIT(K, 2);
} else {
EMIT1_off32(0xa9, K); /* test imm32,%eax */
}
break;
}
if (filter[i].jt != 0) {
if (filter[i].jf)
t_offset += is_near(f_offset) ? 2 : 6;
EMIT_COND_JMP(t_op, t_offset);
if (filter[i].jf)
EMIT_JMP(f_offset);
break;
}
EMIT_COND_JMP(f_op, f_offset);
break;
default:
/* hmm, too complex filter, give up with jit compiler */
goto out;
}
ilen = prog - temp;
if (image) {
if (unlikely(proglen + ilen > oldproglen)) {
pr_err("bpb_jit_compile fatal error\n");
kfree(addrs);
module_free(NULL, image);
return;
}
memcpy(image + proglen, temp, ilen);
}
proglen += ilen;
addrs[i] = proglen;
prog = temp;
}
/* last bpf instruction is always a RET :
* use it to give the cleanup instruction(s) addr
*/
cleanup_addr = proglen - 1; /* ret */
if (seen)
cleanup_addr -= 1; /* leaveq */
if (seen & SEEN_XREG)
cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
if (image) {
WARN_ON(proglen != oldproglen);
break;
}
if (proglen == oldproglen) {
image = module_alloc(max_t(unsigned int,
proglen,
sizeof(struct work_struct)));
if (!image)
goto out;
}
oldproglen = proglen;
}
if (bpf_jit_enable > 1)
pr_err("flen=%d proglen=%u pass=%d image=%p\n",
flen, proglen, pass, image);
if (image) {
if (bpf_jit_enable > 1)
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
16, 1, image, proglen, false);
bpf_flush_icache(image, image + proglen);
fp->bpf_func = (void *)image;
}
out:
kfree(addrs);
return;
}
| 197,306,910,127,583,670,000,000,000,000,000,000,000 | bpf_jit_comp.c | 118,916,419,377,068,630,000,000,000,000,000,000,000 | [
"CWE-189"
] | CVE-2014-2889 | Off-by-one error in the bpf_jit_compile function in arch/x86/net/bpf_jit_comp.c in the Linux kernel before 3.1.8, when BPF JIT is enabled, allows local users to cause a denial of service (system crash) or possibly gain privileges via a long jump after a conditional jump. | https://nvd.nist.gov/vuln/detail/CVE-2014-2889 |
1,308 | linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/b2853fd6c2d0f383dbdf7427e263eb576a633867 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <monis@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com> | 1 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
int offset, ret;
u8 smac[ETH_ALEN];
u8 alt_smac[ETH_ALEN];
u8 *psmac = smac;
u8 *palt_smac = alt_smac;
int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
RDMA_TRANSPORT_IB) &&
(rdma_port_get_link_layer(cm_id->device,
ib_event->param.req_rcvd.port) ==
IB_LINK_LAYER_ETHERNET));
listen_id = cm_id->context;
if (!cma_check_req_qp_type(&listen_id->id, ib_event))
return -EINVAL;
if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
return -ECONNABORTED;
memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
conn_id = cma_new_udp_id(&listen_id->id, ib_event);
event.param.ud.private_data = ib_event->private_data + offset;
event.param.ud.private_data_len =
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
} else {
conn_id = cma_new_conn_id(&listen_id->id, ib_event);
cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
ib_event->private_data, offset);
}
if (!conn_id) {
ret = -ENOMEM;
goto err1;
}
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
ret = cma_acquire_dev(conn_id, listen_id);
if (ret)
goto err2;
conn_id->cm_id.ib = cm_id;
cm_id->context = conn_id;
cm_id->cm_handler = cma_ib_handler;
/*
* Protect against the user destroying conn_id from another thread
* until we're done accessing it.
*/
atomic_inc(&conn_id->refcount);
ret = conn_id->id.event_handler(&conn_id->id, &event);
if (ret)
goto err3;
if (is_iboe) {
if (ib_event->param.req_rcvd.primary_path != NULL)
rdma_addr_find_smac_by_sgid(
&ib_event->param.req_rcvd.primary_path->sgid,
psmac, NULL);
else
psmac = NULL;
if (ib_event->param.req_rcvd.alternate_path != NULL)
rdma_addr_find_smac_by_sgid(
&ib_event->param.req_rcvd.alternate_path->sgid,
palt_smac, NULL);
else
palt_smac = NULL;
}
/*
* Acquire mutex to prevent user executing rdma_destroy_id()
* while we're accessing the cm_id.
*/
mutex_lock(&lock);
if (is_iboe)
ib_update_cm_av(cm_id, psmac, palt_smac);
if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
(conn_id->id.qp_type != IB_QPT_UD))
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
cma_deref_id(conn_id);
return 0;
err3:
cma_deref_id(conn_id);
/* Destroy the CM ID by returning a non-zero value. */
conn_id->cm_id.ib = NULL;
err2:
cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
err1:
mutex_unlock(&listen_id->handler_mutex);
if (conn_id)
rdma_destroy_id(&conn_id->id);
return ret;
}
| 47,485,012,312,962,950,000,000,000,000,000,000,000 | cma.c | 19,353,323,018,740,810,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-2739 | The cma_req_handler function in drivers/infiniband/core/cma.c in the Linux kernel 3.14.x through 3.14.1 attempts to resolve an RDMA over Converged Ethernet (aka RoCE) address that is properly resolved within a different module, which allows remote attackers to cause a denial of service (incorrect pointer dereference and system crash) via crafted network traffic. | https://nvd.nist.gov/vuln/detail/CVE-2014-2739 |
1,310 | linux | 1d147bfa64293b2723c4fec50922168658e613ba | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/1d147bfa64293b2723c4fec50922168658e613ba | mac80211: fix AP powersave TX vs. wakeup race
There is a race between the TX path and the STA wakeup: while
a station is sleeping, mac80211 buffers frames until it wakes
up, then the frames are transmitted. However, the RX and TX
path are concurrent, so the packet indicating wakeup can be
processed while a packet is being transmitted.
This can lead to a situation where the buffered frames list
is emptied on the one side, while a frame is being added on
the other side, as the station is still seen as sleeping in
the TX path.
As a result, the newly added frame will not be send anytime
soon. It might be sent much later (and out of order) when the
station goes to sleep and wakes up the next time.
Additionally, it can lead to the crash below.
Fix all this by synchronising both paths with a new lock.
Both path are not fastpath since they handle PS situations.
In a later patch we'll remove the extra skb queue locks to
reduce locking overhead.
BUG: unable to handle kernel
NULL pointer dereference at 000000b0
IP: [<ff6f1791>] ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
*pde = 00000000
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
EIP: 0060:[<ff6f1791>] EFLAGS: 00210282 CPU: 1
EIP is at ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
EAX: e5900da0 EBX: 00000000 ECX: 00000001 EDX: 00000000
ESI: e41d00c0 EDI: e5900da0 EBP: ebe458e4 ESP: ebe458b0
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
CR0: 8005003b CR2: 000000b0 CR3: 25a78000 CR4: 000407d0
DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
DR6: ffff0ff0 DR7: 00000400
Process iperf (pid: 3934, ti=ebe44000 task=e757c0b0 task.ti=ebe44000)
iwlwifi 0000:02:00.0: I iwl_pcie_enqueue_hcmd Sending command LQ_CMD (#4e), seq: 0x0903, 92 bytes at 3[3]:9
Stack:
e403b32c ebe458c4 00200002 00200286 e403b338 ebe458cc c10960bb e5900da0
ff76a6ec ebe458d8 00000000 e41d00c0 e5900da0 ebe458f0 ff6f1b75 e403b210
ebe4598c ff723dc1 00000000 ff76a6ec e597c978 e403b758 00000002 00000002
Call Trace:
[<ff6f1b75>] ieee80211_free_txskb+0x15/0x20 [mac80211]
[<ff723dc1>] invoke_tx_handlers+0x1661/0x1780 [mac80211]
[<ff7248a5>] ieee80211_tx+0x75/0x100 [mac80211]
[<ff7249bf>] ieee80211_xmit+0x8f/0xc0 [mac80211]
[<ff72550e>] ieee80211_subif_start_xmit+0x4fe/0xe20 [mac80211]
[<c149ef70>] dev_hard_start_xmit+0x450/0x950
[<c14b9aa9>] sch_direct_xmit+0xa9/0x250
[<c14b9c9b>] __qdisc_run+0x4b/0x150
[<c149f732>] dev_queue_xmit+0x2c2/0xca0
Cc: stable@vger.kernel.org
Reported-by: Yaara Rozenblum <yaara.rozenblum@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Reviewed-by: Stanislaw Gruszka <sgruszka@redhat.com>
[reword commit log, use a separate lock]
Signed-off-by: Johannes Berg <johannes.berg@intel.com> | 1 | ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
{
struct sta_info *sta = tx->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_local *local = tx->local;
if (unlikely(!sta))
return TX_CONTINUE;
if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
!(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
int ac = skb_get_queue_mapping(tx->skb);
ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
sta->sta.addr, sta->sta.aid, ac);
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
ps_dbg(tx->sdata,
"STA %pM TX buffer for AC %d full - dropping oldest frame\n",
sta->sta.addr, ac);
ieee80211_free_txskb(&local->hw, old);
} else
tx->local->total_ps_buffered++;
info->control.jiffies = jiffies;
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
round_jiffies(jiffies +
STA_INFO_CLEANUP_INTERVAL));
/*
* We queued up some frames, so the TIM bit might
* need to be set, recalculate it.
*/
sta_info_recalc_tim(sta);
return TX_QUEUED;
} else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
ps_dbg(tx->sdata,
"STA %pM in PS mode, but polling/in SP -> send frame\n",
sta->sta.addr);
}
return TX_CONTINUE;
}
| 293,546,470,110,660,300,000,000,000,000,000,000,000 | tx.c | 290,987,740,570,977,330,000,000,000,000,000,000,000 | [
"CWE-362"
] | CVE-2014-2706 | Race condition in the mac80211 subsystem in the Linux kernel before 3.13.7 allows remote attackers to cause a denial of service (system crash) via network traffic that improperly interacts with the WLAN_STA_PS_STA state (aka power-save mode), related to sta_info.c and tx.c. | https://nvd.nist.gov/vuln/detail/CVE-2014-2706 |
1,311 | linux | 621b5060e823301d0cba4cb52a7ee3491922d291 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/621b5060e823301d0cba4cb52a7ee3491922d291 | powerpc/tm: Fix crash when forking inside a transaction
When we fork/clone we currently don't copy any of the TM state to the new
thread. This results in a TM bad thing (program check) when the new process is
switched in as the kernel does a tmrechkpt with TEXASR FS not set. Also, since
R1 is from userspace, we trigger the bad kernel stack pointer detection. So we
end up with something like this:
Bad kernel stack pointer 0 at c0000000000404fc
cpu 0x2: Vector: 700 (Program Check) at [c00000003ffefd40]
pc: c0000000000404fc: restore_gprs+0xc0/0x148
lr: 0000000000000000
sp: 0
msr: 9000000100201030
current = 0xc000001dd1417c30
paca = 0xc00000000fe00800 softe: 0 irq_happened: 0x01
pid = 0, comm = swapper/2
WARNING: exception is not recoverable, can't continue
The below fixes this by flushing the TM state before we copy the task_struct to
the clone. To do this we go through the tmreclaim patch, which removes the
checkpointed registers from the CPU and transitions the CPU out of TM suspend
mode. Hence we need to call tmrechkpt after to restore the checkpointed state
and the TM mode for the current task.
To make this fail from userspace is simply:
tbegin
li r0, 2
sc
<boom>
Kudos to Adhemerval Zanella Neto for finding this.
Signed-off-by: Michael Neuling <mikey@neuling.org>
cc: Adhemerval Zanella Neto <azanella@br.ibm.com>
cc: stable@vger.kernel.org
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> | 1 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
flush_fp_to_thread(src);
flush_altivec_to_thread(src);
flush_vsx_to_thread(src);
flush_spe_to_thread(src);
*dst = *src;
clear_task_ebb(dst);
return 0;
}
| 5,326,801,381,502,656,000,000,000,000,000,000,000 | process.c | 319,797,184,082,093,000,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-2673 | The arch_dup_task_struct function in the Transactional Memory (TM) implementation in arch/powerpc/kernel/process.c in the Linux kernel before 3.13.7 on the powerpc platform does not properly interact with the clone and fork system calls, which allows local users to cause a denial of service (Program Check and system crash) via certain instructions that are executed with the processor in the Transactional state. | https://nvd.nist.gov/vuln/detail/CVE-2014-2673 |
1,312 | linux | 21f8aaee0c62708654988ce092838aa7df4d25d8 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/21f8aaee0c62708654988ce092838aa7df4d25d8 | ath9k: protect tid->sched check
We check tid->sched without a lock taken on ath_tx_aggr_sleep(). That
is race condition which can result of doing list_del(&tid->list) twice
(second time with poisoned list node) and cause crash like shown below:
[424271.637220] BUG: unable to handle kernel paging request at 00100104
[424271.637328] IP: [<f90fc072>] ath_tx_aggr_sleep+0x62/0xe0 [ath9k]
...
[424271.639953] Call Trace:
[424271.639998] [<f90f6900>] ? ath9k_get_survey+0x110/0x110 [ath9k]
[424271.640083] [<f90f6942>] ath9k_sta_notify+0x42/0x50 [ath9k]
[424271.640177] [<f809cfef>] sta_ps_start+0x8f/0x1c0 [mac80211]
[424271.640258] [<c10f730e>] ? free_compound_page+0x2e/0x40
[424271.640346] [<f809e915>] ieee80211_rx_handlers+0x9d5/0x2340 [mac80211]
[424271.640437] [<c112f048>] ? kmem_cache_free+0x1d8/0x1f0
[424271.640510] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640578] [<c10fc23c>] ? put_page+0x2c/0x40
[424271.640640] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640706] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640787] [<f809dde3>] ? ieee80211_rx_handlers_result+0x73/0x1d0 [mac80211]
[424271.640897] [<f80a07a0>] ieee80211_prepare_and_rx_handle+0x520/0xad0 [mac80211]
[424271.641009] [<f809e22d>] ? ieee80211_rx_handlers+0x2ed/0x2340 [mac80211]
[424271.641104] [<c13846ce>] ? ip_output+0x7e/0xd0
[424271.641182] [<f80a1057>] ieee80211_rx+0x307/0x7c0 [mac80211]
[424271.641266] [<f90fa6ee>] ath_rx_tasklet+0x88e/0xf70 [ath9k]
[424271.641358] [<f80a0f2c>] ? ieee80211_rx+0x1dc/0x7c0 [mac80211]
[424271.641445] [<f90f82db>] ath9k_tasklet+0xcb/0x130 [ath9k]
Bug report:
https://bugzilla.kernel.org/show_bug.cgi?id=70551
Reported-and-tested-by: Max Sydorenko <maxim.stargazer@gmail.com>
Cc: stable@vger.kernel.org
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com> | 1 | void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_node *an)
{
struct ath_atx_tid *tid;
struct ath_atx_ac *ac;
struct ath_txq *txq;
bool buffered;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
if (!tid->sched)
continue;
ac = tid->ac;
txq = ac->txq;
ath_txq_lock(sc, txq);
buffered = ath_tid_has_buffered(tid);
tid->sched = false;
list_del(&tid->list);
if (ac->sched) {
ac->sched = false;
list_del(&ac->list);
}
ath_txq_unlock(sc, txq);
ieee80211_sta_set_buffered(sta, tidno, buffered);
}
}
| 27,558,184,441,602,360,000,000,000,000,000,000,000 | xmit.c | 52,409,064,949,040,600,000,000,000,000,000,000,000 | [
"CWE-362"
] | CVE-2014-2672 | Race condition in the ath_tx_aggr_sleep function in drivers/net/wireless/ath/ath9k/xmit.c in the Linux kernel before 3.13.7 allows remote attackers to cause a denial of service (system crash) via a large amount of network traffic that triggers certain list deletions. | https://nvd.nist.gov/vuln/detail/CVE-2014-2672 |
1,313 | linux | b22f5126a24b3b2f15448c3f2a254fc10cbc2b92 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/b22f5126a24b3b2f15448c3f2a254fc10cbc2b92 | netfilter: nf_conntrack_dccp: fix skb_header_pointer API usages
Some occurences in the netfilter tree use skb_header_pointer() in
the following way ...
struct dccp_hdr _dh, *dh;
...
skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
... where dh itself is a pointer that is being passed as the copy
buffer. Instead, we need to use &_dh as the forth argument so that
we're copying the data into an actual buffer that sits on the stack.
Currently, we probably could overwrite memory on the stack (e.g.
with a possibly mal-formed DCCP packet), but unintentionally, as
we only want the buffer to be placed into _dh variable.
Fixes: 2bc780499aa3 ("[NETFILTER]: nf_conntrack: add DCCP protocol support")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> | 1 | static int dccp_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
enum ip_conntrack_info *ctinfo,
u_int8_t pf, unsigned int hooknum)
{
struct dccp_hdr _dh, *dh;
unsigned int dccp_len = skb->len - dataoff;
unsigned int cscov;
const char *msg;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
if (dh == NULL) {
msg = "nf_ct_dccp: short packet ";
goto out_invalid;
}
if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
dh->dccph_doff * 4 > dccp_len) {
msg = "nf_ct_dccp: truncated/malformed packet ";
goto out_invalid;
}
cscov = dccp_len;
if (dh->dccph_cscov) {
cscov = (dh->dccph_cscov - 1) * 4;
if (cscov > dccp_len) {
msg = "nf_ct_dccp: bad checksum coverage ";
goto out_invalid;
}
}
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP,
pf)) {
msg = "nf_ct_dccp: bad checksum ";
goto out_invalid;
}
if (dh->dccph_type >= DCCP_PKT_INVALID) {
msg = "nf_ct_dccp: reserved packet type ";
goto out_invalid;
}
return NF_ACCEPT;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
return -NF_ACCEPT;
}
| 324,117,670,837,559,900,000,000,000,000,000,000,000 | nf_conntrack_proto_dccp.c | 173,212,850,459,003,400,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-2523 | net/netfilter/nf_conntrack_proto_dccp.c in the Linux kernel through 3.13.6 uses a DCCP header pointer incorrectly, which allows remote attackers to cause a denial of service (system crash) or possibly execute arbitrary code via a DCCP packet that triggers a call to the (1) dccp_new, (2) dccp_packet, or (3) dccp_error function. | https://nvd.nist.gov/vuln/detail/CVE-2014-2523 |
1,314 | linux | b22f5126a24b3b2f15448c3f2a254fc10cbc2b92 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/b22f5126a24b3b2f15448c3f2a254fc10cbc2b92 | netfilter: nf_conntrack_dccp: fix skb_header_pointer API usages
Some occurences in the netfilter tree use skb_header_pointer() in
the following way ...
struct dccp_hdr _dh, *dh;
...
skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
... where dh itself is a pointer that is being passed as the copy
buffer. Instead, we need to use &_dh as the forth argument so that
we're copying the data into an actual buffer that sits on the stack.
Currently, we probably could overwrite memory on the stack (e.g.
with a possibly mal-formed DCCP packet), but unintentionally, as
we only want the buffer to be placed into _dh variable.
Fixes: 2bc780499aa3 ("[NETFILTER]: nf_conntrack: add DCCP protocol support")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> | 1 | static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct dccp_net *dn;
struct dccp_hdr _dh, *dh;
const char *msg;
u_int8_t state;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
BUG_ON(dh == NULL);
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
dn = dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "nf_ct_dccp: not picking up existing connection ";
goto out_invalid;
}
case CT_DCCP_REQUEST:
break;
case CT_DCCP_INVALID:
msg = "nf_ct_dccp: invalid state transition ";
goto out_invalid;
}
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.state = CT_DCCP_NONE;
ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
ct->proto.dccp.handshake_seq = 0;
return true;
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
NULL, "%s", msg);
return false;
}
| 71,271,037,971,402,570,000,000,000,000,000,000,000 | nf_conntrack_proto_dccp.c | 173,212,850,459,003,400,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-2523 | net/netfilter/nf_conntrack_proto_dccp.c in the Linux kernel through 3.13.6 uses a DCCP header pointer incorrectly, which allows remote attackers to cause a denial of service (system crash) or possibly execute arbitrary code via a DCCP packet that triggers a call to the (1) dccp_new, (2) dccp_packet, or (3) dccp_error function. | https://nvd.nist.gov/vuln/detail/CVE-2014-2523 |
1,315 | linux | b22f5126a24b3b2f15448c3f2a254fc10cbc2b92 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/b22f5126a24b3b2f15448c3f2a254fc10cbc2b92 | netfilter: nf_conntrack_dccp: fix skb_header_pointer API usages
Some occurences in the netfilter tree use skb_header_pointer() in
the following way ...
struct dccp_hdr _dh, *dh;
...
skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
... where dh itself is a pointer that is being passed as the copy
buffer. Instead, we need to use &_dh as the forth argument so that
we're copying the data into an actual buffer that sits on the stack.
Currently, we probably could overwrite memory on the stack (e.g.
with a possibly mal-formed DCCP packet), but unintentionally, as
we only want the buffer to be placed into _dh variable.
Fixes: 2bc780499aa3 ("[NETFILTER]: nf_conntrack: add DCCP protocol support")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> | 1 | static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, enum ip_conntrack_info ctinfo,
u_int8_t pf, unsigned int hooknum,
unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
BUG_ON(dh == NULL);
type = dh->dccph_type;
if (type == DCCP_PKT_RESET &&
!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
/* Tear down connection immediately if only reply is a RESET */
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
spin_lock_bh(&ct->lock);
role = ct->proto.dccp.role[dir];
old_state = ct->proto.dccp.state;
new_state = dccp_state_table[role][type][old_state];
switch (new_state) {
case CT_DCCP_REQUEST:
if (old_state == CT_DCCP_TIMEWAIT &&
role == CT_DCCP_ROLE_SERVER) {
/* Reincarnation in the reverse direction: reopen and
* reverse client/server roles. */
ct->proto.dccp.role[dir] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_SERVER;
}
break;
case CT_DCCP_RESPOND:
if (old_state == CT_DCCP_REQUEST)
ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
break;
case CT_DCCP_PARTOPEN:
if (old_state == CT_DCCP_RESPOND &&
type == DCCP_PKT_ACK &&
dccp_ack_seq(dh) == ct->proto.dccp.handshake_seq)
set_bit(IPS_ASSURED_BIT, &ct->status);
break;
case CT_DCCP_IGNORE:
/*
* Connection tracking might be out of sync, so we ignore
* packets that might establish a new connection and resync
* if the server responds with a valid Response.
*/
if (ct->proto.dccp.last_dir == !dir &&
ct->proto.dccp.last_pkt == DCCP_PKT_REQUEST &&
type == DCCP_PKT_RESPONSE) {
ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[dir] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
new_state = CT_DCCP_RESPOND;
break;
}
ct->proto.dccp.last_dir = dir;
ct->proto.dccp.last_pkt = type;
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_dccp: invalid packet ignored ");
return NF_ACCEPT;
case CT_DCCP_INVALID:
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_dccp: invalid state transition ");
return -NF_ACCEPT;
}
ct->proto.dccp.last_dir = dir;
ct->proto.dccp.last_pkt = type;
ct->proto.dccp.state = new_state;
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
}
| 192,674,082,710,633,720,000,000,000,000,000,000,000 | nf_conntrack_proto_dccp.c | 173,212,850,459,003,400,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-2523 | net/netfilter/nf_conntrack_proto_dccp.c in the Linux kernel through 3.13.6 uses a DCCP header pointer incorrectly, which allows remote attackers to cause a denial of service (system crash) or possibly execute arbitrary code via a DCCP packet that triggers a call to the (1) dccp_new, (2) dccp_packet, or (3) dccp_error function. | https://nvd.nist.gov/vuln/detail/CVE-2014-2523 |
1,316 | linux | 263b4509ec4d47e0da3e753f85a39ea12d1eff24 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/263b4509ec4d47e0da3e753f85a39ea12d1eff24 | nfs: always make sure page is up-to-date before extending a write to cover the entire page
We should always make sure the cached page is up-to-date when we're
determining whether we can extend a write to cover the full page -- even
if we've received a write delegation from the server.
Commit c7559663 added logic to skip this check if we have a write
delegation, which can lead to data corruption such as the following
scenario if client B receives a write delegation from the NFS server:
Client A:
# echo 123456789 > /mnt/file
Client B:
# echo abcdefghi >> /mnt/file
# cat /mnt/file
0�D0�abcdefghi
Just because we hold a write delegation doesn't mean that we've read in
the entire page contents.
Cc: <stable@vger.kernel.org> # v3.11+
Signed-off-by: Scott Mayhew <smayhew@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com> | 1 | static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
{
if (file->f_flags & O_DSYNC)
return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1;
if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL ||
(inode->i_flock->fl_start == 0 &&
inode->i_flock->fl_end == OFFSET_MAX &&
inode->i_flock->fl_type != F_RDLCK)))
return 1;
return 0;
}
| 289,835,272,320,192,980,000,000,000,000,000,000,000 | write.c | 168,110,051,995,489,260,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2014-2038 | The nfs_can_extend_write function in fs/nfs/write.c in the Linux kernel before 3.13.3 relies on a write delegation to extend a write operation without a certain up-to-date verification, which allows local users to obtain sensitive information from kernel memory in opportunistic circumstances by writing to a file in an NFS filesystem and then reading the same file. | https://nvd.nist.gov/vuln/detail/CVE-2014-2038 |
1,323 | linux | e6a623460e5fc960ac3ee9f946d3106233fd28d8 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/e6a623460e5fc960ac3ee9f946d3106233fd28d8 | [media] media-device: fix infoleak in ioctl media_enum_entities()
This fixes CVE-2014-1739.
Signed-off-by: Salva Peiró <speiro@ai2.upv.es>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Cc: stable@vger.kernel.org
Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com> | 1 | static long media_device_enum_entities(struct media_device *mdev,
struct media_entity_desc __user *uent)
{
struct media_entity *ent;
struct media_entity_desc u_ent;
if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
return -EFAULT;
ent = find_entity(mdev, u_ent.id);
if (ent == NULL)
return -EINVAL;
u_ent.id = ent->id;
if (ent->name) {
strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
u_ent.name[sizeof(u_ent.name) - 1] = '\0';
} else {
memset(u_ent.name, 0, sizeof(u_ent.name));
}
u_ent.type = ent->type;
u_ent.revision = ent->revision;
u_ent.flags = ent->flags;
u_ent.group_id = ent->group_id;
u_ent.pads = ent->num_pads;
u_ent.links = ent->num_links - ent->num_backlinks;
memcpy(&u_ent.raw, &ent->info, sizeof(ent->info));
if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
return -EFAULT;
return 0;
}
| 291,851,125,023,156,100,000,000,000,000,000,000,000 | media-device.c | 35,810,193,786,496,604,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2014-1739 | The media_device_enum_entities function in drivers/media/media-device.c in the Linux kernel before 3.14.6 does not initialize a certain data structure, which allows local users to obtain sensitive information from kernel memory by leveraging /dev/media0 read access for a MEDIA_IOC_ENUM_ENTITIES ioctl call. | https://nvd.nist.gov/vuln/detail/CVE-2014-1739 |
1,324 | linux | 2145e15e0557a01b9195d1c7199a1b92cb9be81f | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/2145e15e0557a01b9195d1c7199a1b92cb9be81f | floppy: don't write kernel-only members to FDRAWCMD ioctl output
Do not leak kernel-only floppy_raw_cmd structure members to userspace.
This includes the linked-list pointer and the pointer to the allocated
DMA space.
Signed-off-by: Matthew Daley <mattd@bugfuzz.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | 1 | static int raw_cmd_copyout(int cmd, void __user *param,
struct floppy_raw_cmd *ptr)
{
int ret;
while (ptr) {
ret = copy_to_user(param, ptr, sizeof(*ptr));
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) {
if (ptr->length >= 0 &&
ptr->length <= ptr->buffer_length) {
long length = ptr->buffer_length - ptr->length;
ret = fd_copyout(ptr->data, ptr->kernel_data,
length);
if (ret)
return ret;
}
}
ptr = ptr->next;
}
return 0;
}
| 339,071,363,535,589,500,000,000,000,000,000,000,000 | floppy.c | 32,077,231,849,682,140,000,000,000,000,000,000,000 | [
"CWE-264"
] | CVE-2014-1738 | The raw_cmd_copyout function in drivers/block/floppy.c in the Linux kernel through 3.14.3 does not properly restrict access to certain pointers during processing of an FDRAWCMD ioctl call, which allows local users to obtain sensitive information from kernel heap memory by leveraging write access to a /dev/fd device. | https://nvd.nist.gov/vuln/detail/CVE-2014-1738 |
1,325 | linux | 2690d97ade05c5325cbf7c72b94b90d265659886 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/2690d97ade05c5325cbf7c72b94b90d265659886 | netfilter: nf_nat: fix access to uninitialized buffer in IRC NAT helper
Commit 5901b6be885e attempted to introduce IPv6 support into
IRC NAT helper. By doing so, the following code seemed to be removed
by accident:
ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip);
sprintf(buffer, "%u %u", ip, port);
pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", buffer, &ip, port);
This leads to the fact that buffer[] was left uninitialized and
contained some stack value. When we call nf_nat_mangle_tcp_packet(),
we call strlen(buffer) on excatly this uninitialized buffer. If we
are unlucky and the skb has enough tailroom, we overwrite resp. leak
contents with values that sit on our stack into the packet and send
that out to the receiver.
Since the rather informal DCC spec [1] does not seem to specify
IPv6 support right now, we log such occurences so that admins can
act accordingly, and drop the packet. I've looked into XChat source,
and IPv6 is not supported there: addresses are in u32 and print
via %u format string.
Therefore, restore old behaviour as in IPv4, use snprintf(). The
IRC helper does not support IPv6 by now. By this, we can safely use
strlen(buffer) in nf_nat_mangle_tcp_packet() and prevent a buffer
overflow. Also simplify some code as we now have ct variable anyway.
[1] http://www.irchelp.org/irchelp/rfc/ctcpspec.html
Fixes: 5901b6be885e ("netfilter: nf_nat: support IPv6 in IRC NAT helper")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Harald Welte <laforge@gnumonks.org>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> | 1 | static unsigned int help(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned int matchoff,
unsigned int matchlen,
struct nf_conntrack_expect *exp)
{
char buffer[sizeof("4294967296 65635")];
u_int16_t port;
unsigned int ret;
/* Reply comes from server. */
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->dir = IP_CT_DIR_REPLY;
exp->expectfn = nf_nat_follow_master;
/* Try to get same port: if not, try to change it. */
for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
int ret;
exp->tuple.dst.u.tcp.port = htons(port);
ret = nf_ct_expect_related(exp);
if (ret == 0)
break;
else if (ret != -EBUSY) {
port = 0;
break;
}
}
if (port == 0) {
nf_ct_helper_log(skb, exp->master, "all ports in use");
return NF_DROP;
}
ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
protoff, matchoff, matchlen, buffer,
strlen(buffer));
if (ret != NF_ACCEPT) {
nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
nf_ct_unexpect_related(exp);
}
return ret;
}
| 59,351,750,141,192,130,000,000,000,000,000,000,000 | nf_nat_irc.c | 209,901,964,006,613,200,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-1690 | The help function in net/netfilter/nf_nat_irc.c in the Linux kernel before 3.12.8 allows remote attackers to obtain sensitive information from kernel memory by establishing an IRC DCC session in which incorrect packet data is transmitted during use of the NAT mangle feature. | https://nvd.nist.gov/vuln/detail/CVE-2014-1690 |
1,326 | linux | 8e3fbf870481eb53b2d3a322d1fc395ad8b367ed | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/8e3fbf870481eb53b2d3a322d1fc395ad8b367ed | hamradio/yam: fix info leak in ioctl
The yam_ioctl() code fails to initialise the cmd field
of the struct yamdrv_ioctl_cfg. Add an explicit memset(0)
before filling the structure to avoid the 4-byte info leak.
Signed-off-by: Salva Peiró <speiro@ai2.upv.es>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct yam_port *yp = netdev_priv(dev);
struct yamdrv_ioctl_cfg yi;
struct yamdrv_ioctl_mcs *ym;
int ioctl_cmd;
if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(int)))
return -EFAULT;
if (yp->magic != YAM_MAGIC)
return -EINVAL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd != SIOCDEVPRIVATE)
return -EINVAL;
switch (ioctl_cmd) {
case SIOCYAMRESERVED:
return -EINVAL; /* unused */
case SIOCYAMSMCS:
if (netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
return -ENOBUFS;
if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
kfree(ym);
return -EFAULT;
}
if (ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}
/* setting predef as 0 for loading userdefined mcs data */
add_mcs(ym->bits, ym->bitrate, 0);
kfree(ym);
break;
case SIOCYAMSCFG:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BITRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BAUDRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if (yi.cfg.mask & YAM_IOBASE) {
yp->iobase = yi.cfg.iobase;
dev->base_addr = yi.cfg.iobase;
}
if (yi.cfg.mask & YAM_IRQ) {
if (yi.cfg.irq > 15)
return -EINVAL;
yp->irq = yi.cfg.irq;
dev->irq = yi.cfg.irq;
}
if (yi.cfg.mask & YAM_BITRATE) {
if (yi.cfg.bitrate > YAM_MAXBITRATE)
return -EINVAL;
yp->bitrate = yi.cfg.bitrate;
}
if (yi.cfg.mask & YAM_BAUDRATE) {
if (yi.cfg.baudrate > YAM_MAXBAUDRATE)
return -EINVAL;
yp->baudrate = yi.cfg.baudrate;
}
if (yi.cfg.mask & YAM_MODE) {
if (yi.cfg.mode > YAM_MAXMODE)
return -EINVAL;
yp->dupmode = yi.cfg.mode;
}
if (yi.cfg.mask & YAM_HOLDDLY) {
if (yi.cfg.holddly > YAM_MAXHOLDDLY)
return -EINVAL;
yp->holdd = yi.cfg.holddly;
}
if (yi.cfg.mask & YAM_TXDELAY) {
if (yi.cfg.txdelay > YAM_MAXTXDELAY)
return -EINVAL;
yp->txd = yi.cfg.txdelay;
}
if (yi.cfg.mask & YAM_TXTAIL) {
if (yi.cfg.txtail > YAM_MAXTXTAIL)
return -EINVAL;
yp->txtail = yi.cfg.txtail;
}
if (yi.cfg.mask & YAM_PERSIST) {
if (yi.cfg.persist > YAM_MAXPERSIST)
return -EINVAL;
yp->pers = yi.cfg.persist;
}
if (yi.cfg.mask & YAM_SLOTTIME) {
if (yi.cfg.slottime > YAM_MAXSLOTTIME)
return -EINVAL;
yp->slot = yi.cfg.slottime;
yp->slotcnt = yp->slot / 10;
}
break;
case SIOCYAMGCFG:
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
yi.cfg.bitrate = yp->bitrate;
yi.cfg.baudrate = yp->baudrate;
yi.cfg.mode = yp->dupmode;
yi.cfg.txdelay = yp->txd;
yi.cfg.holddly = yp->holdd;
yi.cfg.txtail = yp->txtail;
yi.cfg.persist = yp->pers;
yi.cfg.slottime = yp->slot;
if (copy_to_user(ifr->ifr_data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
break;
default:
return -EINVAL;
}
return 0;
}
| 11,306,301,792,634,982,000,000,000,000,000,000,000 | None | null | [
"CWE-399"
] | CVE-2014-1446 | The yam_ioctl function in drivers/net/hamradio/yam.c in the Linux kernel before 3.12.8 does not initialize a certain structure member, which allows local users to obtain sensitive information from kernel memory by leveraging the CAP_NET_ADMIN capability for an SIOCYAMGCFG ioctl call. | https://nvd.nist.gov/vuln/detail/CVE-2014-1446 |
1,327 | linux | 2b13d06c9584b4eb773f1e80bbaedab9a1c344e1 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/2b13d06c9584b4eb773f1e80bbaedab9a1c344e1 | wanxl: fix info leak in ioctl
The wanxl_ioctl() code fails to initialize the two padding bytes of
struct sync_serial_settings after the ->loopback member. Add an explicit
memset(0) before filling the structure to avoid the info leak.
Signed-off-by: Salva Peiró <speiro@ai2.upv.es>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings line;
port_t *port = dev_to_port(dev);
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
line.clock_type = get_status(port)->clocking;
line.clock_rate = 0;
line.loopback = 0;
if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
size))
return -EFAULT;
if (line.clock_type != CLOCK_EXT &&
line.clock_type != CLOCK_TXFROMRX)
return -EINVAL; /* No such clock setting */
if (line.loopback != 0)
return -EINVAL;
get_status(port)->clocking = line.clock_type;
return 0;
default:
return hdlc_ioctl(dev, ifr, cmd);
}
}
| 289,977,000,113,180,630,000,000,000,000,000,000,000 | wanxl.c | 1,534,528,475,873,357,800,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-1445 | The wanxl_ioctl function in drivers/net/wan/wanxl.c in the Linux kernel before 3.11.7 does not properly initialize a certain data structure, which allows local users to obtain sensitive information from kernel memory via an ioctl call. | https://nvd.nist.gov/vuln/detail/CVE-2014-1445 |
1,328 | linux | 96b340406724d87e4621284ebac5e059d67b2194 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/96b340406724d87e4621284ebac5e059d67b2194 | farsync: fix info leak in ioctl
The fst_get_iface() code fails to initialize the two padding bytes of
struct sync_serial_settings after the ->loopback member. Add an explicit
memset(0) before filling the structure to avoid the info leak.
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
struct ifreq *ifr)
{
sync_serial_settings sync;
int i;
/* First check what line type is set, we'll default to reporting X.21
* if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be
* changed
*/
switch (port->hwif) {
case E1:
ifr->ifr_settings.type = IF_IFACE_E1;
break;
case T1:
ifr->ifr_settings.type = IF_IFACE_T1;
break;
case V35:
ifr->ifr_settings.type = IF_IFACE_V35;
break;
case V24:
ifr->ifr_settings.type = IF_IFACE_V24;
break;
case X21D:
ifr->ifr_settings.type = IF_IFACE_X21D;
break;
case X21:
default:
ifr->ifr_settings.type = IF_IFACE_X21;
break;
}
if (ifr->ifr_settings.size == 0) {
return 0; /* only type requested */
}
if (ifr->ifr_settings.size < sizeof (sync)) {
return -ENOMEM;
}
i = port->index;
sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
/* Lucky card and linux use same encoding here */
sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
INTCLK ? CLOCK_INT : CLOCK_EXT;
sync.loopback = 0;
if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof (sync))) {
return -EFAULT;
}
ifr->ifr_settings.size = sizeof (sync);
return 0;
}
| 123,961,501,830,758,970,000,000,000,000,000,000,000 | None | null | [
"CWE-399"
] | CVE-2014-1444 | The fst_get_iface function in drivers/net/wan/farsync.c in the Linux kernel before 3.11.7 does not properly initialize a certain data structure, which allows local users to obtain sensitive information from kernel memory by leveraging the CAP_NET_ADMIN capability for an SIOCWANDEV ioctl call. | https://nvd.nist.gov/vuln/detail/CVE-2014-1444 |
1,329 | FreeRDP | e2745807c4c3e0a590c0f69a9b655dc74ebaa03e | https://github.com/FreeRDP/FreeRDP | https://github.com/sidhpurwala-huzaifa/FreeRDP/commit/e2745807c4c3e0a590c0f69a9b655dc74ebaa03e | None | 1 | BOOL license_read_scope_list(wStream* s, SCOPE_LIST* scopeList)
{
UINT32 i;
UINT32 scopeCount;
if (Stream_GetRemainingLength(s) < 4)
return FALSE;
Stream_Read_UINT32(s, scopeCount); /* ScopeCount (4 bytes) */
scopeList->count = scopeCount;
scopeList->array = (LICENSE_BLOB*) malloc(sizeof(LICENSE_BLOB) * scopeCount);
/* ScopeArray */
for (i = 0; i < scopeCount; i++)
{
scopeList->array[i].type = BB_SCOPE_BLOB;
if (!license_read_binary_blob(s, &scopeList->array[i]))
return FALSE;
}
return TRUE;
}
| 136,158,675,070,583,430,000,000,000,000,000,000,000 | None | null | [
"CWE-189"
] | CVE-2014-0791 | Integer overflow in the license_read_scope_list function in libfreerdp/core/license.c in FreeRDP through 1.0.2 allows remote RDP servers to cause a denial of service (application crash) or possibly have unspecified other impact via a large ScopeCount value in a Scope List in a Server License Request packet. | https://nvd.nist.gov/vuln/detail/CVE-2014-0791 |
1,330 | file | f97486ef5dc3e8735440edc4fc8808c63e1a3ef0 | https://github.com/file/file | https://github.com/file/file/commit/f97486ef5dc3e8735440edc4fc8808c63e1a3ef0 | CVE-2014-0207: Prevent 0 element vectors and vectors longer than the number
of properties from accessing random memory. | 1 | cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements; j++, i++) {
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
| 203,768,158,432,833,200,000,000,000,000,000,000,000 | cdf.c | 252,409,831,567,122,700,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-0238 | The cdf_read_property_info function in cdf.c in the Fileinfo component in PHP before 5.4.29 and 5.5.x before 5.5.13 allows remote attackers to cause a denial of service (infinite loop or out-of-bounds memory access) via a vector that (1) has zero length or (2) is too long. | https://nvd.nist.gov/vuln/detail/CVE-2014-0238 |
1,331 | file | b8acc83781d5a24cc5101e525d15efe0482c280d | https://github.com/file/file | https://github.com/file/file/commit/b8acc83781d5a24cc5101e525d15efe0482c280d | Remove loop that kept reading the same offset (Jan Kaluza) | 1 | cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
{
size_t i, maxcount;
const cdf_summary_info_header_t *si =
CAST(const cdf_summary_info_header_t *, sst->sst_tab);
const cdf_section_declaration_t *sd =
CAST(const cdf_section_declaration_t *, (const void *)
((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET));
if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
return -1;
ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
ssi->si_os_version = CDF_TOLE2(si->si_os_version);
ssi->si_os = CDF_TOLE2(si->si_os);
ssi->si_class = si->si_class;
cdf_swap_class(&ssi->si_class);
ssi->si_count = CDF_TOLE2(si->si_count);
*count = 0;
maxcount = 0;
*info = NULL;
for (i = 0; i < CDF_TOLE4(si->si_count); i++) {
if (i >= CDF_LOOP_LIMIT) {
DPRINTF(("Unpack summary info loop limit"));
errno = EFTYPE;
return -1;
}
if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset),
info, count, &maxcount) == -1) {
return -1;
}
}
return 0;
}
| 256,057,506,035,934,500,000,000,000,000,000,000,000 | cdf.c | 252,409,831,567,122,700,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2014-0237 | The cdf_unpack_summary_info function in cdf.c in the Fileinfo component in PHP before 5.4.29 and 5.5.x before 5.5.13 allows remote attackers to cause a denial of service (performance degradation) by triggering many file_printf calls. | https://nvd.nist.gov/vuln/detail/CVE-2014-0237 |
1,332 | file | 6d209c1c489457397a5763bca4b28e43aac90391 | https://github.com/file/file | https://github.com/file/file/commit/6d209c1c489457397a5763bca4b28e43aac90391 | Apply patches from file-CVE-2012-1571.patch
From Francisco Alonso Espejo:
file < 5.18/git version can be made to crash when checking some
corrupt CDF files (Using an invalid cdf_read_short_sector size)
The problem I found here, is that in most situations (if
h_short_sec_size_p2 > 8) because the blocksize is 512 and normal
values are 06 which means reading 64 bytes.As long as the check
for the block size copy is not checked properly (there's an assert
that makes wrong/invalid assumptions) | 1 | cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + pos, len);
return len;
}
| 17,449,660,697,413,268,000,000,000,000,000,000,000 | cdf.c | 252,409,831,567,122,700,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-0207 | The cdf_read_short_sector function in cdf.c in file before 5.19, as used in the Fileinfo component in PHP before 5.4.30 and 5.5.x before 5.5.14, allows remote attackers to cause a denial of service (assertion failure and application exit) via a crafted CDF file. | https://nvd.nist.gov/vuln/detail/CVE-2014-0207 |
1,335 | linux | 7ada876a8703f23befbb20a7465a702ee39b1704 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/7ada876a8703f23befbb20a7465a702ee39b1704 | futex: Fix errors in nested key ref-counting
futex_wait() is leaking key references due to futex_wait_setup()
acquiring an additional reference via the queue_lock() routine. The
nested key ref-counting has been masking bugs and complicating code
analysis. queue_lock() is only called with a previously ref-counted
key, so remove the additional ref-counting from the queue_(un)lock()
functions.
Also futex_wait_requeue_pi() drops one key reference too many in
unqueue_me_pi(). Remove the key reference handling from
unqueue_me_pi(). This was paired with a queue_lock() in
futex_lock_pi(), so the count remains unchanged.
Document remaining nested key ref-counting sites.
Signed-off-by: Darren Hart <dvhart@linux.intel.com>
Reported-and-tested-by: Matthieu Fertré<matthieu.fertre@kerlabs.com>
Reported-by: Louis Rilling<louis.rilling@kerlabs.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: John Kacur <jkacur@redhat.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
LKML-Reference: <4CBB17A8.70401@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@kernel.org | 1 | static int futex_wait(u32 __user *uaddr, int fshared,
u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct restart_block *restart;
struct futex_hash_bucket *hb;
struct futex_q q;
int ret;
if (!bitset)
return -EINVAL;
q.pi_state = NULL;
q.bitset = bitset;
q.rt_waiter = NULL;
q.requeue_pi_key = NULL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
retry:
/* Prepare to wait on uaddr. */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
goto out;
/* queue_me and wait for wakeup, timeout, or a signal. */
futex_wait_queue_me(hb, &q, to);
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
if (!unqueue_me(&q))
goto out_put_key;
ret = -ETIMEDOUT;
if (to && !to->task)
goto out_put_key;
/*
* We expect signal_pending(current), but we might be the
* victim of a spurious wakeup as well.
*/
if (!signal_pending(current)) {
put_futex_key(fshared, &q.key);
goto retry;
}
ret = -ERESTARTSYS;
if (!abs_time)
goto out_put_key;
restart = ¤t_thread_info()->restart_block;
restart->fn = futex_wait_restart;
restart->futex.uaddr = (u32 *)uaddr;
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
restart->futex.flags = FLAGS_HAS_TIMEOUT;
if (fshared)
restart->futex.flags |= FLAGS_SHARED;
if (clockrt)
restart->futex.flags |= FLAGS_CLOCKRT;
ret = -ERESTART_RESTARTBLOCK;
out_put_key:
put_futex_key(fshared, &q.key);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
| 173,721,295,716,039,500,000,000,000,000,000,000,000 | futex.c | 310,482,782,767,060,830,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2014-0205 | The futex_wait function in kernel/futex.c in the Linux kernel before 2.6.37 does not properly maintain a certain reference count during requeue operations, which allows local users to cause a denial of service (use-after-free and system crash) or possibly gain privileges via a crafted application that triggers a zero count. | https://nvd.nist.gov/vuln/detail/CVE-2014-0205 |