func
stringlengths
0
484k
target
int64
0
1
cwe
sequence
project
stringlengths
2
29
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
int _gnutls_ciphertext2compressed(gnutls_session_t session, opaque * compress_data, int compress_size, gnutls_datum_t ciphertext, uint8 type) { uint8 MAC[MAX_HASH_SIZE]; uint16 c_length; uint8 pad; int length; mac_hd_t td; uint16 blocksize; int ret, i, pad_failed = 0; uint8 major, minor; gnutls_protocol_t ver; int hash_size = _gnutls_hash_get_algo_len(session->security_parameters. read_mac_algorithm); ver = gnutls_protocol_get_version(session); minor = _gnutls_version_get_minor(ver); major = _gnutls_version_get_major(ver); blocksize = _gnutls_cipher_get_block_size(session->security_parameters. read_bulk_cipher_algorithm); /* initialize MAC */ td = mac_init(session->security_parameters.read_mac_algorithm, session->connection_state.read_mac_secret.data, session->connection_state.read_mac_secret.size, ver); if (td == GNUTLS_MAC_FAILED && session->security_parameters.read_mac_algorithm != GNUTLS_MAC_NULL) { gnutls_assert(); return GNUTLS_E_INTERNAL_ERROR; } /* actual decryption (inplace) */ switch (_gnutls_cipher_is_block (session->security_parameters.read_bulk_cipher_algorithm)) { case CIPHER_STREAM: if ((ret = _gnutls_cipher_decrypt(session->connection_state. read_cipher_state, ciphertext.data, ciphertext.size)) < 0) { gnutls_assert(); return ret; } length = ciphertext.size - hash_size; break; case CIPHER_BLOCK: if ((ciphertext.size < blocksize) || (ciphertext.size % blocksize != 0)) { gnutls_assert(); return GNUTLS_E_DECRYPTION_FAILED; } if ((ret = _gnutls_cipher_decrypt(session->connection_state. read_cipher_state, ciphertext.data, ciphertext.size)) < 0) { gnutls_assert(); return ret; } /* ignore the IV in TLS 1.1. */ if (session->security_parameters.version >= GNUTLS_TLS1_1) { ciphertext.size -= blocksize; ciphertext.data += blocksize; if (ciphertext.size == 0) { gnutls_assert(); return GNUTLS_E_DECRYPTION_FAILED; } } pad = ciphertext.data[ciphertext.size - 1] + 1; /* pad */ length = ciphertext.size - hash_size - pad; if (pad > ciphertext.size - hash_size) { gnutls_assert(); /* We do not fail here. We check below for the * the pad_failed. If zero means success. */ pad_failed = GNUTLS_E_DECRYPTION_FAILED; } /* Check the pading bytes (TLS 1.x) */ if (ver >= GNUTLS_TLS1) for (i = 2; i < pad; i++) { if (ciphertext.data[ciphertext.size - i] != ciphertext.data[ciphertext.size - 1]) pad_failed = GNUTLS_E_DECRYPTION_FAILED; } break; default: gnutls_assert(); return GNUTLS_E_INTERNAL_ERROR; } if (length < 0) length = 0; c_length = _gnutls_conv_uint16((uint16) length); /* Pass the type, version, length and compressed through * MAC. */ if (td != GNUTLS_MAC_FAILED) { _gnutls_hmac(td, UINT64DATA(session->connection_state. read_sequence_number), 8); _gnutls_hmac(td, &type, 1); if (ver >= GNUTLS_TLS1) { /* TLS 1.x */ _gnutls_hmac(td, &major, 1); _gnutls_hmac(td, &minor, 1); } _gnutls_hmac(td, &c_length, 2); if (length > 0) _gnutls_hmac(td, ciphertext.data, length); mac_deinit(td, MAC, ver); } /* This one was introduced to avoid a timing attack against the TLS * 1.0 protocol. */ if (pad_failed != 0) return pad_failed; /* HMAC was not the same. */ if (memcmp(MAC, &ciphertext.data[length], hash_size) != 0) { gnutls_assert(); return GNUTLS_E_DECRYPTION_FAILED; } /* copy the decrypted stuff to compress_data. */ if (compress_size < length) { gnutls_assert(); return GNUTLS_E_INTERNAL_ERROR; } memcpy(compress_data, ciphertext.data, length); return length; }
1
[]
gnutls
7ad6162573ba79a4392c63b453ad0220ca6c5ace
73,008,646,937,836,650,000,000,000,000,000,000,000
157
added an extra check while checking the padding.
static char *make_filename_safe(const char *filename TSRMLS_DC) { if (*filename && strncmp(filename, ":memory:", sizeof(":memory:")-1)) { char *fullpath = expand_filepath(filename, NULL TSRMLS_CC); if (!fullpath) { return NULL; } if (PG(safe_mode) && (!php_checkuid(fullpath, NULL, CHECKUID_CHECK_FILE_AND_DIR))) { efree(fullpath); return NULL; } if (php_check_open_basedir(fullpath TSRMLS_CC)) { efree(fullpath); return NULL; } return fullpath; } return estrdup(filename); }
1
[ "CWE-264" ]
php-src
055ecbc62878e86287d742c7246c21606cee8183
211,824,207,069,112,530,000,000,000,000,000,000,000
22
Improve check for :memory: pseudo-filename in SQlite
unpack_Z_stream(int fd_in, int fd_out) { IF_DESKTOP(long long total_written = 0;) IF_DESKTOP(long long) int retval = -1; unsigned char *stackp; long code; int finchar; long oldcode; long incode; int inbits; int posbits; int outpos; int insize; int bitmask; long free_ent; long maxcode; long maxmaxcode; int n_bits; int rsize = 0; unsigned char *inbuf; /* were eating insane amounts of stack - */ unsigned char *outbuf; /* bad for some embedded targets */ unsigned char *htab; unsigned short *codetab; /* Hmm, these were statics - why?! */ /* user settable max # bits/code */ int maxbits; /* = BITS; */ /* block compress mode -C compatible with 2.0 */ int block_mode; /* = BLOCK_MODE; */ inbuf = xzalloc(IBUFSIZ + 64); outbuf = xzalloc(OBUFSIZ + 2048); htab = xzalloc(HSIZE); /* wsn't zeroed out before, maybe can xmalloc? */ codetab = xzalloc(HSIZE * sizeof(codetab[0])); insize = 0; /* xread isn't good here, we have to return - caller may want * to do some cleanup (e.g. delete incomplete unpacked file etc) */ if (full_read(fd_in, inbuf, 1) != 1) { bb_error_msg("short read"); goto err; } maxbits = inbuf[0] & BIT_MASK; block_mode = inbuf[0] & BLOCK_MODE; maxmaxcode = MAXCODE(maxbits); if (maxbits > BITS) { bb_error_msg("compressed with %d bits, can only handle " BITS_STR" bits", maxbits); goto err; } n_bits = INIT_BITS; maxcode = MAXCODE(INIT_BITS) - 1; bitmask = (1 << INIT_BITS) - 1; oldcode = -1; finchar = 0; outpos = 0; posbits = 0 << 3; free_ent = ((block_mode) ? FIRST : 256); /* As above, initialize the first 256 entries in the table. */ /*clear_tab_prefixof(); - done by xzalloc */ for (code = 255; code >= 0; --code) { tab_suffixof(code) = (unsigned char) code; } do { resetbuf: { int i; int e; int o; o = posbits >> 3; e = insize - o; for (i = 0; i < e; ++i) inbuf[i] = inbuf[i + o]; insize = e; posbits = 0; } if (insize < (int) (IBUFSIZ + 64) - IBUFSIZ) { rsize = safe_read(fd_in, inbuf + insize, IBUFSIZ); //error check?? insize += rsize; } inbits = ((rsize > 0) ? (insize - insize % n_bits) << 3 : (insize << 3) - (n_bits - 1)); while (inbits > posbits) { if (free_ent > maxcode) { posbits = ((posbits - 1) + ((n_bits << 3) - (posbits - 1 + (n_bits << 3)) % (n_bits << 3))); ++n_bits; if (n_bits == maxbits) { maxcode = maxmaxcode; } else { maxcode = MAXCODE(n_bits) - 1; } bitmask = (1 << n_bits) - 1; goto resetbuf; } { unsigned char *p = &inbuf[posbits >> 3]; code = ((((long) (p[0])) | ((long) (p[1]) << 8) | ((long) (p[2]) << 16)) >> (posbits & 0x7)) & bitmask; } posbits += n_bits; if (oldcode == -1) { oldcode = code; finchar = (int) oldcode; outbuf[outpos++] = (unsigned char) finchar; continue; } if (code == CLEAR && block_mode) { clear_tab_prefixof(); free_ent = FIRST - 1; posbits = ((posbits - 1) + ((n_bits << 3) - (posbits - 1 + (n_bits << 3)) % (n_bits << 3))); n_bits = INIT_BITS; maxcode = MAXCODE(INIT_BITS) - 1; bitmask = (1 << INIT_BITS) - 1; goto resetbuf; } incode = code; stackp = de_stack; /* Special case for KwKwK string. */ if (code >= free_ent) { if (code > free_ent) { unsigned char *p; posbits -= n_bits; p = &inbuf[posbits >> 3]; bb_error_msg ("insize:%d posbits:%d inbuf:%02X %02X %02X %02X %02X (%d)", insize, posbits, p[-1], p[0], p[1], p[2], p[3], (posbits & 07)); bb_error_msg("corrupted data"); goto err; } *--stackp = (unsigned char) finchar; code = oldcode; } /* Generate output characters in reverse order */ while ((long) code >= (long) 256) { *--stackp = tab_suffixof(code); code = tab_prefixof(code); } finchar = tab_suffixof(code); *--stackp = (unsigned char) finchar; /* And put them out in forward order */ { int i; i = de_stack - stackp; if (outpos + i >= OBUFSIZ) { do { if (i > OBUFSIZ - outpos) { i = OBUFSIZ - outpos; } if (i > 0) { memcpy(outbuf + outpos, stackp, i); outpos += i; } if (outpos >= OBUFSIZ) { full_write(fd_out, outbuf, outpos); //error check?? IF_DESKTOP(total_written += outpos;) outpos = 0; } stackp += i; i = de_stack - stackp; } while (i > 0); } else { memcpy(outbuf + outpos, stackp, i); outpos += i; } } /* Generate the new entry. */ code = free_ent; if (code < maxmaxcode) { tab_prefixof(code) = (unsigned short) oldcode; tab_suffixof(code) = (unsigned char) finchar; free_ent = code + 1; } /* Remember previous code. */ oldcode = incode; } } while (rsize > 0); if (outpos > 0) { full_write(fd_out, outbuf, outpos); //error check?? IF_DESKTOP(total_written += outpos;) } retval = IF_DESKTOP(total_written) + 0; err: free(inbuf); free(outbuf); free(htab); free(codetab); return retval; }
1
[]
busybox
251fc70e9722f931eec23a34030d05ba5f747b0e
21,401,706,257,394,040,000,000,000,000,000,000,000
232
uncompress: fix buffer underrun by corrupted input Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h) { int sx, sy; int dx, dy; int width, height; int depth; int notify = 0; depth = s->get_bpp((VGAState *)s) / 8; s->get_resolution((VGAState *)s, &width, &height); /* extra x, y */ sx = (src % (width * depth)) / depth; sy = (src / (width * depth)); dx = (dst % (width *depth)) / depth; dy = (dst / (width * depth)); /* normalize width */ w /= depth; /* if we're doing a backward copy, we have to adjust our x/y to be the upper left corner (instead of the lower right corner) */ if (s->cirrus_blt_dstpitch < 0) { sx -= (s->cirrus_blt_width / depth) - 1; dx -= (s->cirrus_blt_width / depth) - 1; sy -= s->cirrus_blt_height - 1; dy -= s->cirrus_blt_height - 1; } /* are we in the visible portion of memory? */ if (sx >= 0 && sy >= 0 && dx >= 0 && dy >= 0 && (sx + w) <= width && (sy + h) <= height && (dx + w) <= width && (dy + h) <= height) { notify = 1; } /* make to sure only copy if it's a plain copy ROP */ if (*s->cirrus_rop != cirrus_bitblt_rop_fwd_src && *s->cirrus_rop != cirrus_bitblt_rop_bkwd_src) notify = 0; /* we have to flush all pending changes so that the copy is generated at the appropriate moment in time */ if (notify) vga_hw_update(); (*s->cirrus_rop) (s, s->vram_ptr + s->cirrus_blt_dstaddr, s->vram_ptr + s->cirrus_blt_srcaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, s->cirrus_blt_width, s->cirrus_blt_height); if (notify) s->ds->dpy_copy(s->ds, sx, sy, dx, dy, s->cirrus_blt_width / depth, s->cirrus_blt_height); /* we don't have to notify the display that this portion has changed since dpy_copy implies this */ if (!notify) cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); }
1
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
135,590,882,627,853,660,000,000,000,000,000,000,000
66
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
glue(cirrus_bitblt_rop_fwd_, ROP_NAME)(CirrusVGAState *s, uint8_t *dst,const uint8_t *src, int dstpitch,int srcpitch, int bltwidth,int bltheight) { int x,y; dstpitch -= bltwidth; srcpitch -= bltwidth; for (y = 0; y < bltheight; y++) { for (x = 0; x < bltwidth; x++) { ROP_OP(*dst, *src); dst++; src++; } dst += dstpitch; src += srcpitch; } }
1
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
27,696,392,987,383,564,000,000,000,000,000,000,000
18
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s) { if (s->ds->dpy_copy) { cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->start_addr, s->cirrus_blt_srcaddr - s->start_addr, s->cirrus_blt_width, s->cirrus_blt_height); } else { (*s->cirrus_rop) (s, s->vram_ptr + s->cirrus_blt_dstaddr, s->vram_ptr + s->cirrus_blt_srcaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); } return 1; }
1
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
331,046,290,845,234,400,000,000,000,000,000,000,000
19
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
static void cirrus_mem_writeb_mode4and5_8bpp(CirrusVGAState * s, unsigned mode, unsigned offset, uint32_t mem_value) { int x; unsigned val = mem_value; uint8_t *dst; dst = s->vram_ptr + offset; for (x = 0; x < 8; x++) { if (val & 0x80) { *dst = s->cirrus_shadow_gr1; } else if (mode == 5) { *dst = s->cirrus_shadow_gr0; } val <<= 1; dst++; } cpu_physical_memory_set_dirty(s->vram_offset + offset); cpu_physical_memory_set_dirty(s->vram_offset + offset + 7); }
1
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
50,418,188,797,463,430,000,000,000,000,000,000,000
22
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
static int cirrus_bitblt_common_patterncopy(CirrusVGAState * s, const uint8_t * src) { uint8_t *dst; dst = s->vram_ptr + s->cirrus_blt_dstaddr; (*s->cirrus_rop) (s, dst, src, s->cirrus_blt_dstpitch, 0, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); return 1; }
1
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
176,630,228,639,235,540,000,000,000,000,000,000,000
14
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
static void cirrus_invalidate_region(CirrusVGAState * s, int off_begin, int off_pitch, int bytesperline, int lines) { int y; int off_cur; int off_cur_end; for (y = 0; y < lines; y++) { off_cur = off_begin; off_cur_end = off_cur + bytesperline; off_cur &= TARGET_PAGE_MASK; while (off_cur < off_cur_end) { cpu_physical_memory_set_dirty(s->vram_offset + off_cur); off_cur += TARGET_PAGE_SIZE; } off_begin += off_pitch; } }
1
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
335,738,960,224,286,050,000,000,000,000,000,000,000
19
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
static int cirrus_bitblt_videotovideo_patterncopy(CirrusVGAState * s) { return cirrus_bitblt_common_patterncopy(s, s->vram_ptr + (s->cirrus_blt_srcaddr & ~7)); }
1
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
58,384,268,221,957,850,000,000,000,000,000,000,000
6
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
static int cirrus_bitblt_solidfill(CirrusVGAState *s, int blt_rop) { cirrus_fill_t rop_func; rop_func = cirrus_fill[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; rop_func(s, s->vram_ptr + s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_bitblt_reset(s); return 1; }
1
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
149,842,811,940,934,740,000,000,000,000,000,000,000
14
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
static void cirrus_bitblt_cputovideo_next(CirrusVGAState * s) { int copy_count; uint8_t *end_ptr; if (s->cirrus_srccounter > 0) { if (s->cirrus_blt_mode & CIRRUS_BLTMODE_PATTERNCOPY) { cirrus_bitblt_common_patterncopy(s, s->cirrus_bltbuf); the_end: s->cirrus_srccounter = 0; cirrus_bitblt_reset(s); } else { /* at least one scan line */ do { (*s->cirrus_rop)(s, s->vram_ptr + s->cirrus_blt_dstaddr, s->cirrus_bltbuf, 0, 0, s->cirrus_blt_width, 1); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, 0, s->cirrus_blt_width, 1); s->cirrus_blt_dstaddr += s->cirrus_blt_dstpitch; s->cirrus_srccounter -= s->cirrus_blt_srcpitch; if (s->cirrus_srccounter <= 0) goto the_end; /* more bytes than needed can be transfered because of word alignment, so we keep them for the next line */ /* XXX: keep alignment to speed up transfer */ end_ptr = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; copy_count = s->cirrus_srcptr_end - end_ptr; memmove(s->cirrus_bltbuf, end_ptr, copy_count); s->cirrus_srcptr = s->cirrus_bltbuf + copy_count; s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; } while (s->cirrus_srcptr >= s->cirrus_srcptr_end); } } }
1
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
246,597,330,759,913,640,000,000,000,000,000,000,000
34
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
static void cirrus_mem_writeb_mode4and5_16bpp(CirrusVGAState * s, unsigned mode, unsigned offset, uint32_t mem_value) { int x; unsigned val = mem_value; uint8_t *dst; dst = s->vram_ptr + offset; for (x = 0; x < 8; x++) { if (val & 0x80) { *dst = s->cirrus_shadow_gr1; *(dst + 1) = s->gr[0x11]; } else if (mode == 5) { *dst = s->cirrus_shadow_gr0; *(dst + 1) = s->gr[0x10]; } val <<= 1; dst += 2; } cpu_physical_memory_set_dirty(s->vram_offset + offset); cpu_physical_memory_set_dirty(s->vram_offset + offset + 15); }
1
[ "CWE-787" ]
qemu
b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef
241,662,835,431,977,260,000,000,000,000,000,000,000
24
CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow I have just noticed that patch for CVE-2007-1320 has never been applied to the QEMU CVS. Please find it below. | Multiple heap-based buffer overflows in the cirrus_invalidate_region | function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and | possibly other products, might allow local users to execute arbitrary | code via unspecified vectors related to "attempting to mark | non-existent regions as dirty," aka the "bitblt" heap overflow. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162
static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s) { if (s->ds->dpy_copy) { cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->start_addr, s->cirrus_blt_srcaddr - s->start_addr, s->cirrus_blt_width, s->cirrus_blt_height); } else { if (BLTUNSAFE(s)) return 0; (*s->cirrus_rop) (s, s->vram_ptr + (s->cirrus_blt_dstaddr & s->cirrus_addr_mask), s->vram_ptr + (s->cirrus_blt_srcaddr & s->cirrus_addr_mask), s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); } return 1; }
1
[ "CWE-787" ]
qemu
65d35a09979e63541afc5bfc595b9f1b1b4ae069
128,516,004,862,549,760,000,000,000,000,000,000,000
25
CVE-2008-4539: fix a heap overflow in Cirrus emulation The code in hw/cirrus_vga.c has changed a lot between CVE-2007-1320 has been announced and the patch has been applied. As a consequence it has wrongly applied and QEMU is still vulnerable to this bug if using VNC. (noticed by Jan Niehusmann) Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5587 c046a42c-6fe2-441c-8c8c-71466251a162
asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name, char __user * type, unsigned long flags, void __user * data) { unsigned long type_page; unsigned long data_page; unsigned long dev_page; char *dir_page; int retval; retval = copy_mount_options (type, &type_page); if (retval < 0) goto out; dir_page = getname(dir_name); retval = PTR_ERR(dir_page); if (IS_ERR(dir_page)) goto out1; retval = copy_mount_options (dev_name, &dev_page); if (retval < 0) goto out2; retval = copy_mount_options (data, &data_page); if (retval < 0) goto out3; retval = -EINVAL; if (type_page) { if (!strcmp((char *)type_page, SMBFS_NAME)) { do_smb_super_data_conv((void *)data_page); } else if (!strcmp((char *)type_page, NCPFS_NAME)) { do_ncp_super_data_conv((void *)data_page); } else if (!strcmp((char *)type_page, NFS4_NAME)) { if (do_nfs4_super_data_conv((void *) data_page)) goto out4; } } lock_kernel(); retval = do_mount((char*)dev_page, dir_page, (char*)type_page, flags, (void*)data_page); unlock_kernel(); out4: free_page(data_page); out3: free_page(dev_page); out2: putname(dir_page); out1: free_page(type_page); out: return retval; }
1
[]
linux-2.6
822191a2fa1584a29c3224ab328507adcaeac1ab
269,348,981,982,883,740,000,000,000,000,000,000,000
56
[PATCH] skip data conversion in compat_sys_mount when data_page is NULL OpenVZ Linux kernel team has found a problem with mounting in compat mode. Simple command "mount -t smbfs ..." on Fedora Core 5 distro in 32-bit mode leads to oops: Unable to handle kernel NULL pointer dereference at 0000000000000000 RIP: compat_sys_mount+0xd6/0x290 Process mount (pid: 14656, veid=300, threadinfo ffff810034d30000, task ffff810034c86bc0) Call Trace: ia32_sysret+0x0/0xa The problem is that data_page pointer can be NULL, so we should skip data conversion in this case. Signed-off-by: Andrey Mirkin <amirkin@openvz.org> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
unsigned short atalk_checksum(struct ddpehdr *ddp, int len) { unsigned long sum = 0; /* Assume unsigned long is >16 bits */ unsigned char *data = (unsigned char *)ddp; len -= 4; /* skip header 4 bytes */ data += 4; /* This ought to be unwrapped neatly. I'll trust gcc for now */ while (len--) { sum += *data; sum <<= 1; if (sum & 0x10000) { sum++; sum &= 0xFFFF; } data++; } /* Use 0xFFFF for 0. 0 itself means none */ return sum ? htons((unsigned short)sum) : 0xFFFF; }
1
[]
history
7ab442d7e0a76402c12553ee256f756097cae2d2
21,680,921,567,039,845,000,000,000,000,000,000,000
21
[DDP]: Convert to new protocol interface. Convert ddp to the new protocol interface which means it has to handle fragmented skb's. The only big change is in the checksum routine which has to do more work (like skb_checksum). Minor speedup is folding the carry to avoid a branch. Tested against a 2.4 system and by running both code over a range of packets.
static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { /* Expand any short form frames */ if (skb->mac.raw[2] == 1) { struct ddpehdr *ddp; /* Find our address */ struct atalk_addr *ap = atalk_find_dev_addr(dev); if (!ap || skb->len < sizeof(struct ddpshdr)) goto freeit; /* * The push leaves us with a ddephdr not an shdr, and * handily the port bytes in the right place preset. */ skb_push(skb, sizeof(*ddp) - 4); /* FIXME: use skb->cb to be able to use shared skbs */ ddp = (struct ddpehdr *)skb->data; /* Now fill in the long header */ /* * These two first. The mac overlays the new source/dest * network information so we MUST copy these before * we write the network numbers ! */ ddp->deh_dnode = skb->mac.raw[0]; /* From physical header */ ddp->deh_snode = skb->mac.raw[1]; /* From physical header */ ddp->deh_dnet = ap->s_net; /* Network number */ ddp->deh_snet = ap->s_net; ddp->deh_sum = 0; /* No checksum */ /* * Not sure about this bit... */ ddp->deh_len = skb->len; ddp->deh_hops = DDP_MAXHOPS; /* Non routable, so force a drop if we slip up later */ /* Mend the byte order */ *((__u16 *)ddp) = htons(*((__u16 *)ddp)); } skb->h.raw = skb->data; return atalk_rcv(skb, dev, pt); freeit: kfree_skb(skb); return 0; }
1
[]
history
7ab442d7e0a76402c12553ee256f756097cae2d2
19,552,567,024,474,732,000,000,000,000,000,000,000
50
[DDP]: Convert to new protocol interface. Convert ddp to the new protocol interface which means it has to handle fragmented skb's. The only big change is in the checksum routine which has to do more work (like skb_checksum). Minor speedup is folding the carry to avoid a branch. Tested against a 2.4 system and by running both code over a range of packets.
static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { struct ddpehdr *ddp = ddp_hdr(skb); struct sock *sock; struct atalk_iface *atif; struct sockaddr_at tosat; int origlen; struct ddpebits ddphv; /* Size check */ if (skb->len < sizeof(*ddp)) goto freeit; /* * Fix up the length field [Ok this is horrible but otherwise * I end up with unions of bit fields and messy bit field order * compiler/endian dependencies..] * * FIXME: This is a write to a shared object. Granted it * happens to be safe BUT.. (Its safe as user space will not * run until we put it back) */ *((__u16 *)&ddphv) = ntohs(*((__u16 *)ddp)); /* Trim buffer in case of stray trailing data */ origlen = skb->len; skb_trim(skb, min_t(unsigned int, skb->len, ddphv.deh_len)); /* * Size check to see if ddp->deh_len was crap * (Otherwise we'll detonate most spectacularly * in the middle of recvmsg()). */ if (skb->len < sizeof(*ddp)) goto freeit; /* * Any checksums. Note we don't do htons() on this == is assumed to be * valid for net byte orders all over the networking code... */ if (ddp->deh_sum && atalk_checksum(ddp, ddphv.deh_len) != ddp->deh_sum) /* Not a valid AppleTalk frame - dustbin time */ goto freeit; /* Check the packet is aimed at us */ if (!ddp->deh_dnet) /* Net 0 is 'this network' */ atif = atalk_find_anynet(ddp->deh_dnode, dev); else atif = atalk_find_interface(ddp->deh_dnet, ddp->deh_dnode); /* Not ours, so we route the packet via the correct AppleTalk iface */ if (!atif) { atalk_route_packet(skb, dev, ddp, &ddphv, origlen); goto out; } /* if IP over DDP is not selected this code will be optimized out */ if (is_ip_over_ddp(skb)) return handle_ip_over_ddp(skb); /* * Which socket - atalk_search_socket() looks for a *full match* * of the <net, node, port> tuple. */ tosat.sat_addr.s_net = ddp->deh_dnet; tosat.sat_addr.s_node = ddp->deh_dnode; tosat.sat_port = ddp->deh_dport; sock = atalk_search_socket(&tosat, atif); if (!sock) /* But not one of our sockets */ goto freeit; /* Queue packet (standard) */ skb->sk = sock; if (sock_queue_rcv_skb(sock, skb) < 0) goto freeit; out: return 0; freeit: kfree_skb(skb); goto out; }
1
[]
history
7ab442d7e0a76402c12553ee256f756097cae2d2
72,319,509,895,917,340,000,000,000,000,000,000,000
84
[DDP]: Convert to new protocol interface. Convert ddp to the new protocol interface which means it has to handle fragmented skb's. The only big change is in the checksum routine which has to do more work (like skb_checksum). Minor speedup is folding the carry to avoid a branch. Tested against a 2.4 system and by running both code over a range of packets.
static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, int len) { struct sock *sk = sock->sk; struct atalk_sock *at = at_sk(sk); struct sockaddr_at *usat = (struct sockaddr_at *)msg->msg_name; int flags = msg->msg_flags; int loopback = 0; struct sockaddr_at local_satalk, gsat; struct sk_buff *skb; struct net_device *dev; struct ddpehdr *ddp; int size; struct atalk_route *rt; int err; if (flags & ~MSG_DONTWAIT) return -EINVAL; if (len > DDP_MAXSZ) return -EMSGSIZE; if (usat) { if (sk->sk_zapped) if (atalk_autobind(sk) < 0) return -EBUSY; if (msg->msg_namelen < sizeof(*usat) || usat->sat_family != AF_APPLETALK) return -EINVAL; /* netatalk doesn't implement this check */ if (usat->sat_addr.s_node == ATADDR_BCAST && !sock_flag(sk, SOCK_BROADCAST)) { printk(KERN_INFO "SO_BROADCAST: Fix your netatalk as " "it will break before 2.2\n"); #if 0 return -EPERM; #endif } } else { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; usat = &local_satalk; usat->sat_family = AF_APPLETALK; usat->sat_port = at->dest_port; usat->sat_addr.s_node = at->dest_node; usat->sat_addr.s_net = at->dest_net; } /* Build a packet */ SOCK_DEBUG(sk, "SK %p: Got address.\n", sk); /* For headers */ size = sizeof(struct ddpehdr) + len + ddp_dl->header_length; if (usat->sat_addr.s_net || usat->sat_addr.s_node == ATADDR_ANYNODE) { rt = atrtr_find(&usat->sat_addr); if (!rt) return -ENETUNREACH; dev = rt->dev; } else { struct atalk_addr at_hint; at_hint.s_node = 0; at_hint.s_net = at->src_net; rt = atrtr_find(&at_hint); if (!rt) return -ENETUNREACH; dev = rt->dev; } SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n", sk, size, dev->name); size += dev->hard_header_len; skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err); if (!skb) return err; skb->sk = sk; skb_reserve(skb, ddp_dl->header_length); skb_reserve(skb, dev->hard_header_len); skb->dev = dev; SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk); ddp = (struct ddpehdr *)skb_put(skb, sizeof(struct ddpehdr)); ddp->deh_pad = 0; ddp->deh_hops = 0; ddp->deh_len = len + sizeof(*ddp); /* * Fix up the length field [Ok this is horrible but otherwise * I end up with unions of bit fields and messy bit field order * compiler/endian dependencies.. */ *((__u16 *)ddp) = ntohs(*((__u16 *)ddp)); ddp->deh_dnet = usat->sat_addr.s_net; ddp->deh_snet = at->src_net; ddp->deh_dnode = usat->sat_addr.s_node; ddp->deh_snode = at->src_node; ddp->deh_dport = usat->sat_port; ddp->deh_sport = at->src_port; SOCK_DEBUG(sk, "SK %p: Copy user data (%d bytes).\n", sk, len); err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (err) { kfree_skb(skb); return -EFAULT; } if (sk->sk_no_check == 1) ddp->deh_sum = 0; else ddp->deh_sum = atalk_checksum(ddp, len + sizeof(*ddp)); /* * Loopback broadcast packets to non gateway targets (ie routes * to group we are in) */ if (ddp->deh_dnode == ATADDR_BCAST && !(rt->flags & RTF_GATEWAY) && !(dev->flags & IFF_LOOPBACK)) { struct sk_buff *skb2 = skb_copy(skb, GFP_KERNEL); if (skb2) { loopback = 1; SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk); if (aarp_send_ddp(dev, skb2, &usat->sat_addr, NULL) == -1) kfree_skb(skb2); /* else queued/sent above in the aarp queue */ } } if (dev->flags & IFF_LOOPBACK || loopback) { SOCK_DEBUG(sk, "SK %p: Loop back.\n", sk); /* loop back */ skb_orphan(skb); ddp_dl->request(ddp_dl, skb, dev->dev_addr); } else { SOCK_DEBUG(sk, "SK %p: send out.\n", sk); if (rt->flags & RTF_GATEWAY) { gsat.sat_addr = rt->gateway; usat = &gsat; } if (aarp_send_ddp(dev, skb, &usat->sat_addr, NULL) == -1) kfree_skb(skb); /* else queued/sent above in the aarp queue */ } SOCK_DEBUG(sk, "SK %p: Done write (%d).\n", sk, len); return len; }
1
[]
history
7ab442d7e0a76402c12553ee256f756097cae2d2
153,761,947,041,868,060,000,000,000,000,000,000,000
159
[DDP]: Convert to new protocol interface. Convert ddp to the new protocol interface which means it has to handle fragmented skb's. The only big change is in the checksum routine which has to do more work (like skb_checksum). Minor speedup is folding the carry to avoid a branch. Tested against a 2.4 system and by running both code over a range of packets.
static int fat_ioctl_filldir(void *__buf, const char *name, int name_len, loff_t offset, u64 ino, unsigned int d_type) { struct fat_ioctl_filldir_callback *buf = __buf; struct dirent __user *d1 = buf->dirent; struct dirent __user *d2 = d1 + 1; if (buf->result) return -EINVAL; buf->result++; if (name != NULL) { /* dirent has only short name */ if (name_len >= sizeof(d1->d_name)) name_len = sizeof(d1->d_name) - 1; if (put_user(0, d2->d_name) || put_user(0, &d2->d_reclen) || copy_to_user(d1->d_name, name, name_len) || put_user(0, d1->d_name + name_len) || put_user(name_len, &d1->d_reclen)) goto efault; } else { /* dirent has short and long name */ const char *longname = buf->longname; int long_len = buf->long_len; const char *shortname = buf->shortname; int short_len = buf->short_len; if (long_len >= sizeof(d1->d_name)) long_len = sizeof(d1->d_name) - 1; if (short_len >= sizeof(d1->d_name)) short_len = sizeof(d1->d_name) - 1; if (copy_to_user(d2->d_name, longname, long_len) || put_user(0, d2->d_name + long_len) || put_user(long_len, &d2->d_reclen) || put_user(ino, &d2->d_ino) || put_user(offset, &d2->d_off) || copy_to_user(d1->d_name, shortname, short_len) || put_user(0, d1->d_name + short_len) || put_user(short_len, &d1->d_reclen)) goto efault; } return 0; efault: buf->result = -EFAULT; return -EFAULT; }
1
[]
linux-2.6
c483bab099cb89e92b7cad94a52fcdaf37e56657
187,708,325,882,921,460,000,000,000,000,000,000,000
49
fat: fix VFAT compat ioctls on 64-bit systems If you compile and run the below test case in an msdos or vfat directory on an x86-64 system with -m32 you'll get garbage in the kernel_dirent struct followed by a SIGSEGV. The patch fixes this. Reported and initial fix by Bart Oldeman #include <sys/types.h> #include <sys/ioctl.h> #include <dirent.h> #include <stdio.h> #include <unistd.h> #include <fcntl.h> struct kernel_dirent { long d_ino; long d_off; unsigned short d_reclen; char d_name[256]; /* We must not include limits.h! */ }; #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct kernel_dirent [2]) #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct kernel_dirent [2]) int main(void) { int fd = open(".", O_RDONLY); struct kernel_dirent de[2]; while (1) { int i = ioctl(fd, VFAT_IOCTL_READDIR_BOTH, (long)de); if (i == -1) break; if (de[0].d_reclen == 0) break; printf("SFN: reclen=%2d off=%d ino=%d, %-12s", de[0].d_reclen, de[0].d_off, de[0].d_ino, de[0].d_name); if (de[1].d_reclen) printf("\tLFN: reclen=%2d off=%d ino=%d, %s", de[1].d_reclen, de[1].d_off, de[1].d_ino, de[1].d_name); printf("\n"); } return 0; } Signed-off-by: Bart Oldeman <bartoldeman@users.sourceforge.net> Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int fat_dir_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { struct fat_ioctl_filldir_callback buf; struct dirent __user *d1; int ret, short_only, both; switch (cmd) { case VFAT_IOCTL_READDIR_SHORT: short_only = 1; both = 0; break; case VFAT_IOCTL_READDIR_BOTH: short_only = 0; both = 1; break; default: return fat_generic_ioctl(inode, filp, cmd, arg); } d1 = (struct dirent __user *)arg; if (!access_ok(VERIFY_WRITE, d1, sizeof(struct dirent[2]))) return -EFAULT; /* * Yes, we don't need this put_user() absolutely. However old * code didn't return the right value. So, app use this value, * in order to check whether it is EOF. */ if (put_user(0, &d1->d_reclen)) return -EFAULT; buf.dirent = d1; buf.result = 0; mutex_lock(&inode->i_mutex); ret = -ENOENT; if (!IS_DEADDIR(inode)) { ret = __fat_readdir(inode, filp, &buf, fat_ioctl_filldir, short_only, both); } mutex_unlock(&inode->i_mutex); if (ret >= 0) ret = buf.result; return ret; }
1
[]
linux-2.6
c483bab099cb89e92b7cad94a52fcdaf37e56657
113,627,596,617,655,060,000,000,000,000,000,000,000
44
fat: fix VFAT compat ioctls on 64-bit systems If you compile and run the below test case in an msdos or vfat directory on an x86-64 system with -m32 you'll get garbage in the kernel_dirent struct followed by a SIGSEGV. The patch fixes this. Reported and initial fix by Bart Oldeman #include <sys/types.h> #include <sys/ioctl.h> #include <dirent.h> #include <stdio.h> #include <unistd.h> #include <fcntl.h> struct kernel_dirent { long d_ino; long d_off; unsigned short d_reclen; char d_name[256]; /* We must not include limits.h! */ }; #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct kernel_dirent [2]) #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct kernel_dirent [2]) int main(void) { int fd = open(".", O_RDONLY); struct kernel_dirent de[2]; while (1) { int i = ioctl(fd, VFAT_IOCTL_READDIR_BOTH, (long)de); if (i == -1) break; if (de[0].d_reclen == 0) break; printf("SFN: reclen=%2d off=%d ino=%d, %-12s", de[0].d_reclen, de[0].d_off, de[0].d_ino, de[0].d_name); if (de[1].d_reclen) printf("\tLFN: reclen=%2d off=%d ino=%d, %s", de[1].d_reclen, de[1].d_off, de[1].d_ino, de[1].d_name); printf("\n"); } return 0; } Signed-off-by: Bart Oldeman <bartoldeman@users.sourceforge.net> Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static long fat_compat_dir_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct compat_dirent __user *p = compat_ptr(arg); int ret; mm_segment_t oldfs = get_fs(); struct dirent d[2]; switch (cmd) { case VFAT_IOCTL_READDIR_BOTH32: cmd = VFAT_IOCTL_READDIR_BOTH; break; case VFAT_IOCTL_READDIR_SHORT32: cmd = VFAT_IOCTL_READDIR_SHORT; break; default: return -ENOIOCTLCMD; } set_fs(KERNEL_DS); lock_kernel(); ret = fat_dir_ioctl(file->f_path.dentry->d_inode, file, cmd, (unsigned long) &d); unlock_kernel(); set_fs(oldfs); if (ret >= 0) { ret |= fat_compat_put_dirent32(&d[0], p); ret |= fat_compat_put_dirent32(&d[1], p + 1); } return ret; }
1
[]
linux-2.6
c483bab099cb89e92b7cad94a52fcdaf37e56657
291,589,362,780,422,180,000,000,000,000,000,000,000
31
fat: fix VFAT compat ioctls on 64-bit systems If you compile and run the below test case in an msdos or vfat directory on an x86-64 system with -m32 you'll get garbage in the kernel_dirent struct followed by a SIGSEGV. The patch fixes this. Reported and initial fix by Bart Oldeman #include <sys/types.h> #include <sys/ioctl.h> #include <dirent.h> #include <stdio.h> #include <unistd.h> #include <fcntl.h> struct kernel_dirent { long d_ino; long d_off; unsigned short d_reclen; char d_name[256]; /* We must not include limits.h! */ }; #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct kernel_dirent [2]) #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct kernel_dirent [2]) int main(void) { int fd = open(".", O_RDONLY); struct kernel_dirent de[2]; while (1) { int i = ioctl(fd, VFAT_IOCTL_READDIR_BOTH, (long)de); if (i == -1) break; if (de[0].d_reclen == 0) break; printf("SFN: reclen=%2d off=%d ino=%d, %-12s", de[0].d_reclen, de[0].d_off, de[0].d_ino, de[0].d_name); if (de[1].d_reclen) printf("\tLFN: reclen=%2d off=%d ino=%d, %s", de[1].d_reclen, de[1].d_off, de[1].d_ino, de[1].d_name); printf("\n"); } return 0; } Signed-off-by: Bart Oldeman <bartoldeman@users.sourceforge.net> Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static long fat_compat_put_dirent32(struct dirent *d, struct compat_dirent __user *d32) { if (!access_ok(VERIFY_WRITE, d32, sizeof(struct compat_dirent))) return -EFAULT; __put_user(d->d_ino, &d32->d_ino); __put_user(d->d_off, &d32->d_off); __put_user(d->d_reclen, &d32->d_reclen); if (__copy_to_user(d32->d_name, d->d_name, d->d_reclen)) return -EFAULT; return 0; }
1
[]
linux-2.6
c483bab099cb89e92b7cad94a52fcdaf37e56657
106,659,780,049,225,780,000,000,000,000,000,000,000
14
fat: fix VFAT compat ioctls on 64-bit systems If you compile and run the below test case in an msdos or vfat directory on an x86-64 system with -m32 you'll get garbage in the kernel_dirent struct followed by a SIGSEGV. The patch fixes this. Reported and initial fix by Bart Oldeman #include <sys/types.h> #include <sys/ioctl.h> #include <dirent.h> #include <stdio.h> #include <unistd.h> #include <fcntl.h> struct kernel_dirent { long d_ino; long d_off; unsigned short d_reclen; char d_name[256]; /* We must not include limits.h! */ }; #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct kernel_dirent [2]) #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct kernel_dirent [2]) int main(void) { int fd = open(".", O_RDONLY); struct kernel_dirent de[2]; while (1) { int i = ioctl(fd, VFAT_IOCTL_READDIR_BOTH, (long)de); if (i == -1) break; if (de[0].d_reclen == 0) break; printf("SFN: reclen=%2d off=%d ino=%d, %-12s", de[0].d_reclen, de[0].d_off, de[0].d_ino, de[0].d_name); if (de[1].d_reclen) printf("\tLFN: reclen=%2d off=%d ino=%d, %s", de[1].d_reclen, de[1].d_off, de[1].d_ino, de[1].d_name); printf("\n"); } return 0; } Signed-off-by: Bart Oldeman <bartoldeman@users.sourceforge.net> Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int vfat_ioctl32(unsigned fd, unsigned cmd, unsigned long arg) { struct compat_dirent __user *p = compat_ptr(arg); int ret; mm_segment_t oldfs = get_fs(); struct dirent d[2]; switch(cmd) { case VFAT_IOCTL_READDIR_BOTH32: cmd = VFAT_IOCTL_READDIR_BOTH; break; case VFAT_IOCTL_READDIR_SHORT32: cmd = VFAT_IOCTL_READDIR_SHORT; break; } set_fs(KERNEL_DS); ret = sys_ioctl(fd,cmd,(unsigned long)&d); set_fs(oldfs); if (ret >= 0) { ret |= put_dirent32(&d[0], p); ret |= put_dirent32(&d[1], p + 1); } return ret; }
1
[]
linux-2.6
188f83dfe0eeecd1427d0d255cc97dbf7ef6b4b7
111,898,724,151,179,410,000,000,000,000,000,000,000
26
[PATCH] BLOCK: Move the msdos device ioctl compat stuff to the msdos driver [try #6] Move the msdos device ioctl compat stuff from fs/compat_ioctl.c to the msdos driver so that the msdos header file doesn't need to be included. Signed-Off-By: David Howells <dhowells@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
put_dirent32 (struct dirent *d, struct compat_dirent __user *d32) { if (!access_ok(VERIFY_WRITE, d32, sizeof(struct compat_dirent))) return -EFAULT; __put_user(d->d_ino, &d32->d_ino); __put_user(d->d_off, &d32->d_off); __put_user(d->d_reclen, &d32->d_reclen); if (__copy_to_user(d32->d_name, d->d_name, d->d_reclen)) return -EFAULT; return 0; }
1
[]
linux-2.6
188f83dfe0eeecd1427d0d255cc97dbf7ef6b4b7
270,045,642,335,722,150,000,000,000,000,000,000,000
13
[PATCH] BLOCK: Move the msdos device ioctl compat stuff to the msdos driver [try #6] Move the msdos device ioctl compat stuff from fs/compat_ioctl.c to the msdos driver so that the msdos header file doesn't need to be included. Signed-Off-By: David Howells <dhowells@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs) { unsigned long addr, seg; addr = regs->rip; seg = regs->cs & 0xffff; /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the * TLS segments are used for data, and the PNPBIOS * and APM bios ones we just ignore here. */ if (seg & LDT_SEGMENT) { u32 *desc; unsigned long base; down(&child->mm->context.sem); desc = child->mm->context.ldt + (seg & ~7); base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000); /* 16-bit code segment? */ if (!((desc[1] >> 22) & 1)) addr &= 0xffff; addr += base; up(&child->mm->context.sem); } return addr; }
1
[ "CWE-20" ]
linux-2.6
29eb51101c02df517ca64ec472d7501127ad1da8
1,508,545,436,095,858,700,000,000,000,000,000,000
29
Handle bogus %cs selector in single-step instruction decoding The code for LDT segment selectors was not robust in the face of a bogus selector set in %cs via ptrace before the single-step was done. Signed-off-by: Roland McGrath <roland@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_regs *regs) { unsigned long addr, seg; addr = regs->eip; seg = regs->xcs & 0xffff; if (regs->eflags & VM_MASK) { addr = (addr & 0xffff) + (seg << 4); return addr; } /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the * TLS segments are used for data, and the PNPBIOS * and APM bios ones we just ignore here. */ if (seg & LDT_SEGMENT) { u32 *desc; unsigned long base; down(&child->mm->context.sem); desc = child->mm->context.ldt + (seg & ~7); base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000); /* 16-bit code segment? */ if (!((desc[1] >> 22) & 1)) addr &= 0xffff; addr += base; up(&child->mm->context.sem); } return addr; }
1
[ "CWE-20" ]
linux-2.6
29eb51101c02df517ca64ec472d7501127ad1da8
106,350,190,623,619,750,000,000,000,000,000,000,000
33
Handle bogus %cs selector in single-step instruction decoding The code for LDT segment selectors was not robust in the face of a bogus selector set in %cs via ptrace before the single-step was done. Signed-off-by: Roland McGrath <roland@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) { struct mm_struct *mm = task->mm; struct vm_area_struct *vma = NULL; unsigned long size; void *smpl_buf; /* * the fixed header + requested size and align to page boundary */ size = PAGE_ALIGN(rsize); DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size)); /* * check requested size to avoid Denial-of-service attacks * XXX: may have to refine this test * Check against address space limit. * * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) * return -ENOMEM; */ if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) return -ENOMEM; /* * We do the easy to undo allocations first. * * pfm_rvmalloc(), clears the buffer, so there is no leak */ smpl_buf = pfm_rvmalloc(size); if (smpl_buf == NULL) { DPRINT(("Can't allocate sampling buffer\n")); return -ENOMEM; } DPRINT(("smpl_buf @%p\n", smpl_buf)); /* allocate vma */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { DPRINT(("Cannot allocate vma\n")); goto error_kmem; } /* * partially initialize the vma for the sampling buffer */ vma->vm_mm = mm; vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ /* * Now we have everything we need and we can initialize * and connect all the data structures */ ctx->ctx_smpl_hdr = smpl_buf; ctx->ctx_smpl_size = size; /* aligned size */ /* * Let's do the difficult operations next. * * now we atomically find some area in the address space and * remap the buffer in it. */ down_write(&task->mm->mmap_sem); /* find some free area in address space, must have mmap sem held */ vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0); if (vma->vm_start == 0UL) { DPRINT(("Cannot find unmapped area for size %ld\n", size)); up_write(&task->mm->mmap_sem); goto error; } vma->vm_end = vma->vm_start + size; vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start)); /* can only be applied to current task, need to have the mm semaphore held when called */ if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) { DPRINT(("Can't remap buffer\n")); up_write(&task->mm->mmap_sem); goto error; } /* * now insert the vma in the vm list for the process, must be * done with mmap lock held */ insert_vm_struct(mm, vma); mm->total_vm += size >> PAGE_SHIFT; vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, vma_pages(vma)); up_write(&task->mm->mmap_sem); /* * keep track of user level virtual address */ ctx->ctx_smpl_vaddr = (void *)vma->vm_start; *(unsigned long *)user_vaddr = vma->vm_start; return 0; error: kmem_cache_free(vm_area_cachep, vma); error_kmem: pfm_rvfree(smpl_buf, size); return -ENOMEM; }
1
[]
linux-2.6
41d5e5d73ecef4ef56b7b4cde962929a712689b4
87,803,820,803,317,590,000,000,000,000,000,000,000
114
[IA64] permon use-after-free fix Perfmon associates vmalloc()ed memory with a file descriptor, and installs a vma mapping that memory. Unfortunately, the vm_file field is not filled in, so processes with mappings to that memory do not prevent the file from being closed and the memory freed. This results in use-after-free bugs and multiple freeing of pages, etc. I saw this bug on an Altix on SLES9. Haven't reproduced upstream but it looks like the same issue is there. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Stephane Eranian <eranian@hpl.hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { pfarg_context_t *req = (pfarg_context_t *)arg; struct file *filp; int ctx_flags; int ret; /* let's check the arguments first */ ret = pfarg_is_sane(current, req); if (ret < 0) return ret; ctx_flags = req->ctx_flags; ret = -ENOMEM; ctx = pfm_context_alloc(); if (!ctx) goto error; ret = pfm_alloc_fd(&filp); if (ret < 0) goto error_file; req->ctx_fd = ctx->ctx_fd = ret; /* * attach context to file */ filp->private_data = ctx; /* * does the user want to sample? */ if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req); if (ret) goto buffer_error; } /* * init context protection lock */ spin_lock_init(&ctx->ctx_lock); /* * context is unloaded */ ctx->ctx_state = PFM_CTX_UNLOADED; /* * initialization of context's flags */ ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */ ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0; /* * will move to set properties * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; */ /* * init restart semaphore to locked */ init_completion(&ctx->ctx_restart_done); /* * activation is used in SMP only */ ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; SET_LAST_CPU(ctx, -1); /* * initialize notification message queue */ ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; init_waitqueue_head(&ctx->ctx_msgq_wait); init_waitqueue_head(&ctx->ctx_zombieq); DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n", ctx, ctx_flags, ctx->ctx_fl_system, ctx->ctx_fl_block, ctx->ctx_fl_excl_idle, ctx->ctx_fl_no_msg, ctx->ctx_fd)); /* * initialize soft PMU state */ pfm_reset_pmu_state(ctx); return 0; buffer_error: pfm_free_fd(ctx->ctx_fd, filp); if (ctx->ctx_buf_fmt) { pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs); } error_file: pfm_context_free(ctx); error: return ret; }
1
[]
linux-2.6
41d5e5d73ecef4ef56b7b4cde962929a712689b4
13,520,750,174,224,364,000,000,000,000,000,000,000
104
[IA64] permon use-after-free fix Perfmon associates vmalloc()ed memory with a file descriptor, and installs a vma mapping that memory. Unfortunately, the vm_file field is not filled in, so processes with mappings to that memory do not prevent the file from being closed and the memory freed. This results in use-after-free bugs and multiple freeing of pages, etc. I saw this bug on an Altix on SLES9. Haven't reproduced upstream but it looks like the same issue is there. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Stephane Eranian <eranian@hpl.hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags, unsigned int cpu, pfarg_context_t *arg) { pfm_buffer_fmt_t *fmt = NULL; unsigned long size = 0UL; void *uaddr = NULL; void *fmt_arg = NULL; int ret = 0; #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1) /* invoke and lock buffer format, if found */ fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id); if (fmt == NULL) { DPRINT(("[%d] cannot find buffer format\n", task->pid)); return -EINVAL; } /* * buffer argument MUST be contiguous to pfarg_context_t */ if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg); ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret)); if (ret) goto error; /* link buffer format and context */ ctx->ctx_buf_fmt = fmt; /* * check if buffer format wants to use perfmon buffer allocation/mapping service */ ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size); if (ret) goto error; if (size) { /* * buffer is always remapped into the caller's address space */ ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr); if (ret) goto error; /* keep track of user address of buffer */ arg->ctx_smpl_vaddr = uaddr; } ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg); error: return ret; }
1
[]
linux-2.6
41d5e5d73ecef4ef56b7b4cde962929a712689b4
46,202,394,531,124,020,000,000,000,000,000,000,000
52
[IA64] permon use-after-free fix Perfmon associates vmalloc()ed memory with a file descriptor, and installs a vma mapping that memory. Unfortunately, the vm_file field is not filled in, so processes with mappings to that memory do not prevent the file from being closed and the memory freed. This results in use-after-free bugs and multiple freeing of pages, etc. I saw this bug on an Altix on SLES9. Haven't reproduced upstream but it looks like the same issue is there. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Stephane Eranian <eranian@hpl.hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
static int __init snd_mem_init(void) { #ifdef CONFIG_PROC_FS snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); if (snd_mem_proc) { snd_mem_proc->read_proc = snd_mem_proc_read; #ifdef CONFIG_PCI snd_mem_proc->write_proc = snd_mem_proc_write; #endif } #endif return 0; }
1
[]
linux-2.6
ccec6e2c4a74adf76ed4e2478091a311b1806212
6,222,875,397,067,194,000,000,000,000,000,000,000
13
Convert snd-page-alloc proc file to use seq_file Use seq_file for the proc file read/write of snd-page-alloc module. This automatically fixes bugs in the old proc code. Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int snd_mem_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; long pages = snd_allocated_pages >> (PAGE_SHIFT-12); struct snd_mem_list *mem; int devno; static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; mutex_lock(&list_mutex); len += snprintf(page + len, count - len, "pages : %li bytes (%li pages per %likB)\n", pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); devno = 0; list_for_each_entry(mem, &mem_list_head, list) { devno++; len += snprintf(page + len, count - len, "buffer %d : ID %08x : type %s\n", devno, mem->id, types[mem->buffer.dev.type]); len += snprintf(page + len, count - len, " addr = 0x%lx, size = %d bytes\n", (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes); } mutex_unlock(&list_mutex); return len; }
1
[]
linux-2.6
ccec6e2c4a74adf76ed4e2478091a311b1806212
177,539,223,428,479,200,000,000,000,000,000,000,000
26
Convert snd-page-alloc proc file to use seq_file Use seq_file for the proc file read/write of snd-page-alloc module. This automatically fixes bugs in the old proc code. Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int snd_mem_proc_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char buf[128]; char *token, *p; if (count > ARRAY_SIZE(buf) - 1) count = ARRAY_SIZE(buf) - 1; if (copy_from_user(buf, buffer, count)) return -EFAULT; buf[ARRAY_SIZE(buf) - 1] = '\0'; p = buf; token = gettoken(&p); if (! token || *token == '#') return (int)count; if (strcmp(token, "add") == 0) { char *endp; int vendor, device, size, buffers; long mask; int i, alloced; struct pci_dev *pci; if ((token = gettoken(&p)) == NULL || (vendor = simple_strtol(token, NULL, 0)) <= 0 || (token = gettoken(&p)) == NULL || (device = simple_strtol(token, NULL, 0)) <= 0 || (token = gettoken(&p)) == NULL || (mask = simple_strtol(token, NULL, 0)) < 0 || (token = gettoken(&p)) == NULL || (size = memparse(token, &endp)) < 64*1024 || size > 16*1024*1024 /* too big */ || (token = gettoken(&p)) == NULL || (buffers = simple_strtol(token, NULL, 0)) <= 0 || buffers > 4) { printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); return (int)count; } vendor &= 0xffff; device &= 0xffff; alloced = 0; pci = NULL; while ((pci = pci_get_device(vendor, device, pci)) != NULL) { if (mask > 0 && mask < 0xffffffff) { if (pci_set_dma_mask(pci, mask) < 0 || pci_set_consistent_dma_mask(pci, mask) < 0) { printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); return (int)count; } } for (i = 0; i < buffers; i++) { struct snd_dma_buffer dmab; memset(&dmab, 0, sizeof(dmab)); if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), size, &dmab) < 0) { printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); pci_dev_put(pci); return (int)count; } snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); } alloced++; } if (! alloced) { for (i = 0; i < buffers; i++) { struct snd_dma_buffer dmab; memset(&dmab, 0, sizeof(dmab)); /* FIXME: We can allocate only in ZONE_DMA * without a device pointer! */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL, size, &dmab) < 0) { printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); break; } snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device)); } } } else if (strcmp(token, "erase") == 0) /* FIXME: need for releasing each buffer chunk? */ free_all_reserved_pages(); else printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); return (int)count; }
1
[]
linux-2.6
ccec6e2c4a74adf76ed4e2478091a311b1806212
309,883,721,051,093,140,000,000,000,000,000,000,000
86
Convert snd-page-alloc proc file to use seq_file Use seq_file for the proc file read/write of snd-page-alloc module. This automatically fixes bugs in the old proc code. Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int putreg(struct task_struct *child, unsigned long regno, unsigned long value) { unsigned long tmp; /* Some code in the 64bit emulation may not be 64bit clean. Don't take any chances. */ if (test_tsk_thread_flag(child, TIF_IA32)) value &= 0xffffffff; switch (regno) { case offsetof(struct user_regs_struct,fs): if (value && (value & 3) != 3) return -EIO; child->thread.fsindex = value & 0xffff; return 0; case offsetof(struct user_regs_struct,gs): if (value && (value & 3) != 3) return -EIO; child->thread.gsindex = value & 0xffff; return 0; case offsetof(struct user_regs_struct,ds): if (value && (value & 3) != 3) return -EIO; child->thread.ds = value & 0xffff; return 0; case offsetof(struct user_regs_struct,es): if (value && (value & 3) != 3) return -EIO; child->thread.es = value & 0xffff; return 0; case offsetof(struct user_regs_struct,ss): if ((value & 3) != 3) return -EIO; value &= 0xffff; return 0; case offsetof(struct user_regs_struct,fs_base): if (value >= TASK_SIZE_OF(child)) return -EIO; child->thread.fs = value; return 0; case offsetof(struct user_regs_struct,gs_base): if (value >= TASK_SIZE_OF(child)) return -EIO; child->thread.gs = value; return 0; case offsetof(struct user_regs_struct, eflags): value &= FLAG_MASK; tmp = get_stack_long(child, EFL_OFFSET); tmp &= ~FLAG_MASK; value |= tmp; break; case offsetof(struct user_regs_struct,cs): if ((value & 3) != 3) return -EIO; value &= 0xffff; break; } put_stack_long(child, regno - sizeof(struct pt_regs), value); return 0; }
1
[ "CWE-264" ]
linux-2.6
176df2457ef6207156ca1a40991c54ca01fef567
118,245,962,164,453,330,000,000,000,000,000,000,000
60
x86_64: Zero extend all registers after ptrace in 32bit entry path. Strictly it's only needed for eax. It actually does a little more than strictly needed -- the other registers are already zero extended. Also remove the now unnecessary and non functional compat task check in ptrace. This is CVE-2007-4573 Found by Wojciech Purczynski Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int pwc_video_close(struct inode *inode, struct file *file) { struct video_device *vdev = file->private_data; struct pwc_device *pdev; int i; PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev); pdev = (struct pwc_device *)vdev->priv; if (pdev->vopen == 0) PWC_DEBUG_MODULE("video_close() called on closed device?\n"); /* Dump statistics, but only if a reasonable amount of frames were processed (to prevent endless log-entries in case of snap-shot programs) */ if (pdev->vframe_count > 20) PWC_DEBUG_MODULE("Closing video device: %d frames received, dumped %d frames, %d frames with errors.\n", pdev->vframe_count, pdev->vframes_dumped, pdev->vframes_error); if (DEVICE_USE_CODEC1(pdev->type)) pwc_dec1_exit(); else pwc_dec23_exit(); pwc_isoc_cleanup(pdev); pwc_free_buffers(pdev); /* Turn off LEDS and power down camera, but only when not unplugged */ if (pdev->error_status != EPIPE) { /* Turn LEDs off */ if (pwc_set_leds(pdev, 0, 0) < 0) PWC_DEBUG_MODULE("Failed to set LED on/off time.\n"); if (power_save) { i = pwc_camera_power(pdev, 0); if (i < 0) PWC_ERROR("Failed to power down camera (%d)\n", i); } } pdev->vopen--; PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen); return 0; }
1
[ "CWE-399" ]
linux-2.6
85237f202d46d55c1bffe0c5b1aa3ddc0f1dce4d
124,682,724,723,247,590,000,000,000,000,000,000,000
42
USB: fix DoS in pwc USB video driver the pwc driver has a disconnect method that waits for user space to close the device. This opens up an opportunity for a DoS attack, blocking the USB subsystem and making khubd's task busy wait in kernel space. This patch shifts freeing resources to close if an opened device is disconnected. Signed-off-by: Oliver Neukum <oneukum@suse.de> CC: stable <stable@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
static void usb_pwc_disconnect(struct usb_interface *intf) { struct pwc_device *pdev; int hint; lock_kernel(); pdev = usb_get_intfdata (intf); usb_set_intfdata (intf, NULL); if (pdev == NULL) { PWC_ERROR("pwc_disconnect() Called without private pointer.\n"); goto disconnect_out; } if (pdev->udev == NULL) { PWC_ERROR("pwc_disconnect() already called for %p\n", pdev); goto disconnect_out; } if (pdev->udev != interface_to_usbdev(intf)) { PWC_ERROR("pwc_disconnect() Woops: pointer mismatch udev/pdev.\n"); goto disconnect_out; } /* We got unplugged; this is signalled by an EPIPE error code */ if (pdev->vopen) { PWC_INFO("Disconnected while webcam is in use!\n"); pdev->error_status = EPIPE; } /* Alert waiting processes */ wake_up_interruptible(&pdev->frameq); /* Wait until device is closed */ while (pdev->vopen) schedule(); /* Device is now closed, so we can safely unregister it */ PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n"); pwc_remove_sysfs_files(pdev->vdev); video_unregister_device(pdev->vdev); /* Free memory (don't set pdev to 0 just yet) */ kfree(pdev); disconnect_out: /* search device_hint[] table if we occupy a slot, by any chance */ for (hint = 0; hint < MAX_DEV_HINTS; hint++) if (device_hint[hint].pdev == pdev) device_hint[hint].pdev = NULL; unlock_kernel(); }
1
[ "CWE-399" ]
linux-2.6
85237f202d46d55c1bffe0c5b1aa3ddc0f1dce4d
73,412,740,084,702,270,000,000,000,000,000,000,000
48
USB: fix DoS in pwc USB video driver the pwc driver has a disconnect method that waits for user space to close the device. This opens up an opportunity for a DoS attack, blocking the USB subsystem and making khubd's task busy wait in kernel space. This patch shifts freeing resources to close if an opened device is disconnected. Signed-off-by: Oliver Neukum <oneukum@suse.de> CC: stable <stable@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
static int return_EIO(void) { return -EIO; }
1
[]
linux-2.6
be6aab0e9fa6d3c6d75aa1e38ac972d8b4ee82b8
96,018,639,668,712,650,000,000,000,000,000,000,000
4
[PATCH] fix memory corruption from misinterpreted bad_inode_ops return values CVE-2006-5753 is for a case where an inode can be marked bad, switching the ops to bad_inode_ops, which are all connected as: static int return_EIO(void) { return -EIO; } #define EIO_ERROR ((void *) (return_EIO)) static struct inode_operations bad_inode_ops = { .create = bad_inode_create ...etc... The problem here is that the void cast causes return types to not be promoted, and for ops such as listxattr which expect more than 32 bits of return value, the 32-bit -EIO is interpreted as a large positive 64-bit number, i.e. 0x00000000fffffffa instead of 0xfffffffa. This goes particularly badly when the return value is taken as a number of bytes to copy into, say, a user's buffer for example... I originally had coded up the fix by creating a return_EIO_<TYPE> macro for each return type, like this: static int return_EIO_int(void) { return -EIO; } #define EIO_ERROR_INT ((void *) (return_EIO_int)) static struct inode_operations bad_inode_ops = { .create = EIO_ERROR_INT, ...etc... but Al felt that it was probably better to create an EIO-returner for each actual op signature. Since so few ops share a signature, I just went ahead & created an EIO function for each individual file & inode op that returns a value. Signed-off-by: Eric Sandeen <sandeen@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
void make_bad_inode(struct inode * inode) { remove_inode_hash(inode); inode->i_mode = S_IFREG; inode->i_atime = inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); inode->i_op = &bad_inode_ops; inode->i_fop = &bad_file_ops; }
1
[]
linux-2.6
be6aab0e9fa6d3c6d75aa1e38ac972d8b4ee82b8
324,712,412,023,376,660,000,000,000,000,000,000,000
10
[PATCH] fix memory corruption from misinterpreted bad_inode_ops return values CVE-2006-5753 is for a case where an inode can be marked bad, switching the ops to bad_inode_ops, which are all connected as: static int return_EIO(void) { return -EIO; } #define EIO_ERROR ((void *) (return_EIO)) static struct inode_operations bad_inode_ops = { .create = bad_inode_create ...etc... The problem here is that the void cast causes return types to not be promoted, and for ops such as listxattr which expect more than 32 bits of return value, the 32-bit -EIO is interpreted as a large positive 64-bit number, i.e. 0x00000000fffffffa instead of 0xfffffffa. This goes particularly badly when the return value is taken as a number of bytes to copy into, say, a user's buffer for example... I originally had coded up the fix by creating a return_EIO_<TYPE> macro for each return type, like this: static int return_EIO_int(void) { return -EIO; } #define EIO_ERROR_INT ((void *) (return_EIO_int)) static struct inode_operations bad_inode_ops = { .create = EIO_ERROR_INT, ...etc... but Al felt that it was probably better to create an EIO-returner for each actual op signature. Since so few ops share a signature, I just went ahead & created an EIO function for each individual file & inode op that returns a value. Signed-off-by: Eric Sandeen <sandeen@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
int is_bad_inode(struct inode * inode) { return (inode->i_op == &bad_inode_ops); }
1
[]
linux-2.6
be6aab0e9fa6d3c6d75aa1e38ac972d8b4ee82b8
274,069,347,086,903,100,000,000,000,000,000,000,000
4
[PATCH] fix memory corruption from misinterpreted bad_inode_ops return values CVE-2006-5753 is for a case where an inode can be marked bad, switching the ops to bad_inode_ops, which are all connected as: static int return_EIO(void) { return -EIO; } #define EIO_ERROR ((void *) (return_EIO)) static struct inode_operations bad_inode_ops = { .create = bad_inode_create ...etc... The problem here is that the void cast causes return types to not be promoted, and for ops such as listxattr which expect more than 32 bits of return value, the 32-bit -EIO is interpreted as a large positive 64-bit number, i.e. 0x00000000fffffffa instead of 0xfffffffa. This goes particularly badly when the return value is taken as a number of bytes to copy into, say, a user's buffer for example... I originally had coded up the fix by creating a return_EIO_<TYPE> macro for each return type, like this: static int return_EIO_int(void) { return -EIO; } #define EIO_ERROR_INT ((void *) (return_EIO_int)) static struct inode_operations bad_inode_ops = { .create = EIO_ERROR_INT, ...etc... but Al felt that it was probably better to create an EIO-returner for each actual op signature. Since so few ops share a signature, I just went ahead & created an EIO function for each individual file & inode op that returns a value. Signed-off-by: Eric Sandeen <sandeen@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
void hash_preload(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap) { unsigned long vsid; void *pgdir; pte_t *ptep; cpumask_t mask; unsigned long flags; int local = 0; /* We don't want huge pages prefaulted for now */ if (unlikely(in_hugepage_area(mm->context, ea))) return; DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," " trap=%lx\n", mm, mm->pgd, ea, access, trap); /* Get PTE, VSID, access mask */ pgdir = mm->pgd; if (pgdir == NULL) return; ptep = find_linux_pte(pgdir, ea); if (!ptep) return; vsid = get_vsid(mm->context.id, ea); /* Hash it in */ local_irq_save(flags); mask = cpumask_of_cpu(smp_processor_id()); if (cpus_equal(mm->cpu_vm_mask, mask)) local = 1; #ifndef CONFIG_PPC_64K_PAGES __hash_page_4K(ea, access, vsid, ptep, trap, local); #else if (mmu_ci_restrictions) { /* If this PTE is non-cacheable, switch to 4k */ if (mm->context.user_psize == MMU_PAGE_64K && (pte_val(*ptep) & _PAGE_NO_CACHE)) { mm->context.user_psize = MMU_PAGE_4K; mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp; get_paca()->context = mm->context; slb_flush_and_rebolt(); #ifdef CONFIG_SPE_BASE spu_flush_all_slbs(mm); #endif } } if (mm->context.user_psize == MMU_PAGE_64K) __hash_page_64K(ea, access, vsid, ptep, trap, local); else __hash_page_4K(ea, access, vsid, ptep, trap, local); #endif /* CONFIG_PPC_64K_PAGES */ local_irq_restore(flags); }
1
[ "CWE-200" ]
linux-2.6
721151d004dcf01a71b12bb6b893f9160284cf6e
283,712,393,711,387,440,000,000,000,000,000,000,000
56
[POWERPC] Allow drivers to map individual 4k pages to userspace Some drivers have resources that they want to be able to map into userspace that are 4k in size. On a kernel configured with 64k pages we currently end up mapping the 4k we want plus another 60k of physical address space, which could contain anything. This can introduce security problems, for example in the case of an infiniband adaptor where the other 60k could contain registers that some other program is using for its communications. This patch adds a new function, remap_4k_pfn, which drivers can use to map a single 4k page to userspace regardless of whether the kernel is using a 4k or a 64k page size. Like remap_pfn_range, it would typically be called in a driver's mmap function. It only maps a single 4k page, which on a 64k page kernel appears replicated 16 times throughout a 64k page. On a 4k page kernel it reduces to a call to remap_pfn_range. The way this works on a 64k kernel is that a new bit, _PAGE_4K_PFN, gets set on the linux PTE. This alters the way that __hash_page_4K computes the real address to put in the HPTE. The RPN field of the linux PTE becomes the 4k RPN directly rather than being interpreted as a 64k RPN. Since the RPN field is 32 bits, this means that physical addresses being mapped with remap_4k_pfn have to be below 2^44, i.e. 0x100000000000. The patch also factors out the code in arch/powerpc/mm/hash_utils_64.c that deals with demoting a process to use 4k pages into one function that gets called in the various different places where we need to do that. There were some discrepancies between exactly what was done in the various places, such as a call to spu_flush_all_slbs in one case but not in others. Signed-off-by: Paul Mackerras <paulus@samba.org>
int hash_page(unsigned long ea, unsigned long access, unsigned long trap) { void *pgdir; unsigned long vsid; struct mm_struct *mm; pte_t *ptep; cpumask_t tmp; int rc, user_region = 0, local = 0; int psize; DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { DBG_LOW(" out of pgtable range !\n"); return 1; } /* Get region & vsid */ switch (REGION_ID(ea)) { case USER_REGION_ID: user_region = 1; mm = current->mm; if (! mm) { DBG_LOW(" user region with no mm !\n"); return 1; } vsid = get_vsid(mm->context.id, ea); psize = mm->context.user_psize; break; case VMALLOC_REGION_ID: mm = &init_mm; vsid = get_kernel_vsid(ea); if (ea < VMALLOC_END) psize = mmu_vmalloc_psize; else psize = mmu_io_psize; break; default: /* Not a valid range * Send the problem up to do_page_fault */ return 1; } DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); /* Get pgdir */ pgdir = mm->pgd; if (pgdir == NULL) return 1; /* Check CPU locality */ tmp = cpumask_of_cpu(smp_processor_id()); if (user_region && cpus_equal(mm->cpu_vm_mask, tmp)) local = 1; /* Handle hugepage regions */ if (unlikely(in_hugepage_area(mm->context, ea))) { DBG_LOW(" -> huge page !\n"); return hash_huge_page(mm, access, ea, vsid, local, trap); } /* Get PTE and page size from page tables */ ptep = find_linux_pte(pgdir, ea); if (ptep == NULL || !pte_present(*ptep)) { DBG_LOW(" no PTE !\n"); return 1; } #ifndef CONFIG_PPC_64K_PAGES DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); #else DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), pte_val(*(ptep + PTRS_PER_PTE))); #endif /* Pre-check access permissions (will be re-checked atomically * in __hash_page_XX but this pre-check is a fast path */ if (access & ~pte_val(*ptep)) { DBG_LOW(" no access !\n"); return 1; } /* Do actual hashing */ #ifndef CONFIG_PPC_64K_PAGES rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); #else if (mmu_ci_restrictions) { /* If this PTE is non-cacheable, switch to 4k */ if (psize == MMU_PAGE_64K && (pte_val(*ptep) & _PAGE_NO_CACHE)) { if (user_region) { psize = MMU_PAGE_4K; mm->context.user_psize = MMU_PAGE_4K; mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp; } else if (ea < VMALLOC_END) { /* * some driver did a non-cacheable mapping * in vmalloc space, so switch vmalloc * to 4k pages */ printk(KERN_ALERT "Reducing vmalloc segment " "to 4kB pages because of " "non-cacheable mapping\n"); psize = mmu_vmalloc_psize = MMU_PAGE_4K; } #ifdef CONFIG_SPE_BASE spu_flush_all_slbs(mm); #endif } if (user_region) { if (psize != get_paca()->context.user_psize) { get_paca()->context = mm->context; slb_flush_and_rebolt(); } } else if (get_paca()->vmalloc_sllp != mmu_psize_defs[mmu_vmalloc_psize].sllp) { get_paca()->vmalloc_sllp = mmu_psize_defs[mmu_vmalloc_psize].sllp; slb_flush_and_rebolt(); } } if (psize == MMU_PAGE_64K) rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); else rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); #endif /* CONFIG_PPC_64K_PAGES */ #ifndef CONFIG_PPC_64K_PAGES DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); #else DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep), pte_val(*(ptep + PTRS_PER_PTE))); #endif DBG_LOW(" -> rc=%d\n", rc); return rc; }
1
[ "CWE-200" ]
linux-2.6
721151d004dcf01a71b12bb6b893f9160284cf6e
9,811,447,350,898,023,000,000,000,000,000,000,000
138
[POWERPC] Allow drivers to map individual 4k pages to userspace Some drivers have resources that they want to be able to map into userspace that are 4k in size. On a kernel configured with 64k pages we currently end up mapping the 4k we want plus another 60k of physical address space, which could contain anything. This can introduce security problems, for example in the case of an infiniband adaptor where the other 60k could contain registers that some other program is using for its communications. This patch adds a new function, remap_4k_pfn, which drivers can use to map a single 4k page to userspace regardless of whether the kernel is using a 4k or a 64k page size. Like remap_pfn_range, it would typically be called in a driver's mmap function. It only maps a single 4k page, which on a 64k page kernel appears replicated 16 times throughout a 64k page. On a 4k page kernel it reduces to a call to remap_pfn_range. The way this works on a 64k kernel is that a new bit, _PAGE_4K_PFN, gets set on the linux PTE. This alters the way that __hash_page_4K computes the real address to put in the HPTE. The RPN field of the linux PTE becomes the 4k RPN directly rather than being interpreted as a 64k RPN. Since the RPN field is 32 bits, this means that physical addresses being mapped with remap_4k_pfn have to be below 2^44, i.e. 0x100000000000. The patch also factors out the code in arch/powerpc/mm/hash_utils_64.c that deals with demoting a process to use 4k pages into one function that gets called in the various different places where we need to do that. There were some discrepancies between exactly what was done in the various places, such as a call to spu_flush_all_slbs in one case but not in others. Signed-off-by: Paul Mackerras <paulus@samba.org>
int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct net_device *dev = ieee->dev; struct ieee80211_hdr_4addr *hdr; size_t hdrlen; u16 fc, type, stype, sc; struct net_device_stats *stats; unsigned int frag; u8 *payload; u16 ethertype; #ifdef NOT_YET struct net_device *wds = NULL; struct sk_buff *skb2 = NULL; struct net_device *wds = NULL; int frame_authorized = 0; int from_assoc_ap = 0; void *sta = NULL; #endif u8 dst[ETH_ALEN]; u8 src[ETH_ALEN]; struct ieee80211_crypt_data *crypt = NULL; int keyidx = 0; int can_be_decrypted = 0; hdr = (struct ieee80211_hdr_4addr *)skb->data; stats = &ieee->stats; if (skb->len < 10) { printk(KERN_INFO "%s: SKB length < 10\n", dev->name); goto rx_dropped; } fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); hdrlen = ieee80211_get_hdrlen(fc); /* Put this code here so that we avoid duplicating it in all * Rx paths. - Jean II */ #ifdef CONFIG_WIRELESS_EXT #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ /* If spy monitoring on */ if (ieee->spy_data.spy_number > 0) { struct iw_quality wstats; wstats.updated = 0; if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { wstats.level = rx_stats->rssi; wstats.updated |= IW_QUAL_LEVEL_UPDATED; } else wstats.updated |= IW_QUAL_LEVEL_INVALID; if (rx_stats->mask & IEEE80211_STATMASK_NOISE) { wstats.noise = rx_stats->noise; wstats.updated |= IW_QUAL_NOISE_UPDATED; } else wstats.updated |= IW_QUAL_NOISE_INVALID; if (rx_stats->mask & IEEE80211_STATMASK_SIGNAL) { wstats.qual = rx_stats->signal; wstats.updated |= IW_QUAL_QUAL_UPDATED; } else wstats.updated |= IW_QUAL_QUAL_INVALID; /* Update spy records */ wireless_spy_update(ieee->dev, hdr->addr2, &wstats); } #endif /* IW_WIRELESS_SPY */ #endif /* CONFIG_WIRELESS_EXT */ #ifdef NOT_YET hostap_update_rx_stats(local->ap, hdr, rx_stats); #endif if (ieee->iw_mode == IW_MODE_MONITOR) { stats->rx_packets++; stats->rx_bytes += skb->len; ieee80211_monitor_rx(ieee, skb, rx_stats); return 1; } can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr2)) ? ieee->host_mc_decrypt : ieee->host_decrypt; if (can_be_decrypted) { if (skb->len >= hdrlen + 3) { /* Top two-bits of byte 3 are the key index */ keyidx = skb->data[hdrlen + 3] >> 6; } /* ieee->crypt[] is WEP_KEY (4) in length. Given that keyidx * is only allowed 2-bits of storage, no value of keyidx can * be provided via above code that would result in keyidx * being out of range */ crypt = ieee->crypt[keyidx]; #ifdef NOT_YET sta = NULL; /* Use station specific key to override default keys if the * receiver address is a unicast address ("individual RA"). If * bcrx_sta_key parameter is set, station specific key is used * even with broad/multicast targets (this is against IEEE * 802.11, but makes it easier to use different keys with * stations that do not support WEP key mapping). */ if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key) (void)hostap_handle_sta_crypto(local, hdr, &crypt, &sta); #endif /* allow NULL decrypt to indicate an station specific override * for default encryption */ if (crypt && (crypt->ops == NULL || crypt->ops->decrypt_mpdu == NULL)) crypt = NULL; if (!crypt && (fc & IEEE80211_FCTL_PROTECTED)) { /* This seems to be triggered by some (multicast?) * frames from other than current BSS, so just drop the * frames silently instead of filling system log with * these reports. */ IEEE80211_DEBUG_DROP("Decryption failed (not set)" " (SA=" MAC_FMT ")\n", MAC_ARG(hdr->addr2)); ieee->ieee_stats.rx_discards_undecryptable++; goto rx_dropped; } } #ifdef NOT_YET if (type != WLAN_FC_TYPE_DATA) { if (type == WLAN_FC_TYPE_MGMT && stype == WLAN_FC_STYPE_AUTH && fc & IEEE80211_FCTL_PROTECTED && ieee->host_decrypt && (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0) { printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth " "from " MAC_FMT "\n", dev->name, MAC_ARG(hdr->addr2)); /* TODO: could inform hostapd about this so that it * could send auth failure report */ goto rx_dropped; } if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype)) goto rx_dropped; else goto rx_exit; } #endif /* drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.29) */ if (sc == ieee->prev_seq_ctl) goto rx_dropped; else ieee->prev_seq_ctl = sc; /* Data frame - extract src/dst addresses */ if (skb->len < IEEE80211_3ADDR_LEN) goto rx_dropped; switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_FROMDS: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr3, ETH_ALEN); break; case IEEE80211_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); break; case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: if (skb->len < IEEE80211_4ADDR_LEN) goto rx_dropped; memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr4, ETH_ALEN); break; case 0: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); break; } #ifdef NOT_YET if (hostap_rx_frame_wds(ieee, hdr, fc, &wds)) goto rx_dropped; if (wds) { skb->dev = dev = wds; stats = hostap_get_stats(dev); } if (ieee->iw_mode == IW_MODE_MASTER && !wds && (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && ieee->stadev && !compare_ether_addr(hdr->addr2, ieee->assoc_ap_addr)) { /* Frame from BSSID of the AP for which we are a client */ skb->dev = dev = ieee->stadev; stats = hostap_get_stats(dev); from_assoc_ap = 1; } #endif dev->last_rx = jiffies; #ifdef NOT_YET if ((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats, wds != NULL)) { case AP_RX_CONTINUE_NOT_AUTHORIZED: frame_authorized = 0; break; case AP_RX_CONTINUE: frame_authorized = 1; break; case AP_RX_DROP: goto rx_dropped; case AP_RX_EXIT: goto rx_exit; } } #endif /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ stype &= ~IEEE80211_STYPE_QOS_DATA; if (stype != IEEE80211_STYPE_DATA && stype != IEEE80211_STYPE_DATA_CFACK && stype != IEEE80211_STYPE_DATA_CFPOLL && stype != IEEE80211_STYPE_DATA_CFACKPOLL) { if (stype != IEEE80211_STYPE_NULLFUNC) IEEE80211_DEBUG_DROP("RX: dropped data frame " "with no data (type=0x%02x, " "subtype=0x%02x, len=%d)\n", type, stype, skb->len); goto rx_dropped; } /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) goto rx_dropped; hdr = (struct ieee80211_hdr_4addr *)skb->data; /* skb: hdr + (possibly fragmented) plaintext payload */ // PR: FIXME: hostap has additional conditions in the "if" below: // ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && if ((frag != 0) || (fc & IEEE80211_FCTL_MOREFRAGS)) { int flen; struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); if (!frag_skb) { IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG, "Rx cannot get skb from fragment " "cache (morefrag=%d seq=%u frag=%u)\n", (fc & IEEE80211_FCTL_MOREFRAGS) != 0, WLAN_GET_SEQ_SEQ(sc), frag); goto rx_dropped; } flen = skb->len; if (frag != 0) flen -= hdrlen; if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); ieee80211_frag_cache_invalidate(ieee, hdr); goto rx_dropped; } if (frag == 0) { /* copy first fragment (including full headers) into * beginning of the fragment cache skb */ skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), flen); } else { /* append frame payload to the end of the fragment * cache skb */ skb_copy_from_linear_data_offset(skb, hdrlen, skb_put(frag_skb, flen), flen); } dev_kfree_skb_any(skb); skb = NULL; if (fc & IEEE80211_FCTL_MOREFRAGS) { /* more fragments expected - leave the skb in fragment * cache for now; it will be delivered to upper layers * after all fragments have been received */ goto rx_exit; } /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; hdr = (struct ieee80211_hdr_4addr *)skb->data; ieee80211_frag_cache_invalidate(ieee, hdr); } /* skb: hdr + (possible reassembled) full MSDU payload; possibly still * encrypted/authenticated */ if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) goto rx_dropped; hdr = (struct ieee80211_hdr_4addr *)skb->data; if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) { if ( /*ieee->ieee802_1x && */ ieee80211_is_eapol_frame(ieee, skb)) { /* pass unencrypted EAPOL frames even if encryption is * configured */ } else { IEEE80211_DEBUG_DROP("encryption configured, but RX " "frame not encrypted (SA=" MAC_FMT ")\n", MAC_ARG(hdr->addr2)); goto rx_dropped; } } if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep && !ieee80211_is_eapol_frame(ieee, skb)) { IEEE80211_DEBUG_DROP("dropped unencrypted RX data " "frame from " MAC_FMT " (drop_unencrypted=1)\n", MAC_ARG(hdr->addr2)); goto rx_dropped; } /* If the frame was decrypted in hardware, we may need to strip off * any security data (IV, ICV, etc) that was left behind */ if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) && ieee->host_strip_iv_icv) { int trimlen = 0; /* Top two-bits of byte 3 are the key index */ if (skb->len >= hdrlen + 3) keyidx = skb->data[hdrlen + 3] >> 6; /* To strip off any security data which appears before the * payload, we simply increase hdrlen (as the header gets * chopped off immediately below). For the security data which * appears after the payload, we use skb_trim. */ switch (ieee->sec.encode_alg[keyidx]) { case SEC_ALG_WEP: /* 4 byte IV */ hdrlen += 4; /* 4 byte ICV */ trimlen = 4; break; case SEC_ALG_TKIP: /* 4 byte IV, 4 byte ExtIV */ hdrlen += 8; /* 8 byte MIC, 4 byte ICV */ trimlen = 12; break; case SEC_ALG_CCMP: /* 8 byte CCMP header */ hdrlen += 8; /* 8 byte MIC */ trimlen = 8; break; } if (skb->len < trimlen) goto rx_dropped; __skb_trim(skb, skb->len - trimlen); if (skb->len < hdrlen) goto rx_dropped; } /* skb: hdr + (possible reassembled) full plaintext payload */ payload = skb->data + hdrlen; ethertype = (payload[6] << 8) | payload[7]; #ifdef NOT_YET /* If IEEE 802.1X is used, check whether the port is authorized to send * the received frame. */ if (ieee->ieee802_1x && ieee->iw_mode == IW_MODE_MASTER) { if (ethertype == ETH_P_PAE) { printk(KERN_DEBUG "%s: RX: IEEE 802.1X frame\n", dev->name); if (ieee->hostapd && ieee->apdev) { /* Send IEEE 802.1X frames to the user * space daemon for processing */ prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT); ieee->apdevstats.rx_packets++; ieee->apdevstats.rx_bytes += skb->len; goto rx_exit; } } else if (!frame_authorized) { printk(KERN_DEBUG "%s: dropped frame from " "unauthorized port (IEEE 802.1X): " "ethertype=0x%04x\n", dev->name, ethertype); goto rx_dropped; } } #endif /* convert hdr + possible LLC headers into Ethernet header */ if (skb->len - hdrlen >= 8 && ((memcmp(payload, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(payload, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(skb, hdrlen + SNAP_SIZE); memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ skb_pull(skb, hdrlen); len = htons(skb->len); memcpy(skb_push(skb, 2), &len, 2); memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); } #ifdef NOT_YET if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) { /* Non-standard frame: get addr4 from its bogus location after * the payload */ skb_copy_to_linear_data_offset(skb, ETH_ALEN, skb->data + skb->len - ETH_ALEN, ETH_ALEN); skb_trim(skb, skb->len - ETH_ALEN); } #endif stats->rx_packets++; stats->rx_bytes += skb->len; #ifdef NOT_YET if (ieee->iw_mode == IW_MODE_MASTER && !wds && ieee->ap->bridge_packets) { if (dst[0] & 0x01) { /* copy multicast frame both to the higher layers and * to the wireless media */ ieee->ap->bridged_multicast++; skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2 == NULL) printk(KERN_DEBUG "%s: skb_clone failed for " "multicast frame\n", dev->name); } else if (hostap_is_sta_assoc(ieee->ap, dst)) { /* send frame directly to the associated STA using * wireless media and not passing to higher layers */ ieee->ap->bridged_unicast++; skb2 = skb; skb = NULL; } } if (skb2 != NULL) { /* send to wireless media */ skb2->dev = dev; skb2->protocol = __constant_htons(ETH_P_802_3); skb_reset_mac_header(skb2); skb_reset_network_header(skb2); /* skb2->network_header += ETH_HLEN; */ dev_queue_xmit(skb2); } #endif if (skb) { skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ if (netif_rx(skb) == NET_RX_DROP) { /* netif_rx always succeeds, but it might drop * the packet. If it drops the packet, we log that * in our stats. */ IEEE80211_DEBUG_DROP ("RX: netif_rx dropped the packet\n"); stats->rx_dropped++; } } rx_exit: #ifdef NOT_YET if (sta) hostap_handle_sta_release(sta); #endif return 1; rx_dropped: stats->rx_dropped++; /* Returning 0 indicates to caller that we have not handled the SKB-- * so it is still allocated and can be used again by underlying * hardware as a DMA target */ return 0; }
1
[ "CWE-189" ]
linux-2.6
04045f98e0457aba7d4e6736f37eed189c48a5f7
308,579,844,544,821,100,000,000,000,000,000,000,000
502
[IEEE80211]: avoid integer underflow for runt rx frames Reported by Chris Evans <scarybeasts@gmail.com>: > The summary is that an evil 80211 frame can crash out a victim's > machine. It only applies to drivers using the 80211 wireless code, and > only then to certain drivers (and even then depends on a card's > firmware not dropping a dubious packet). I must confess I'm not > keeping track of Linux wireless support, and the different protocol > stacks etc. > > Details are as follows: > > ieee80211_rx() does not explicitly check that "skb->len >= hdrlen". > There are other skb->len checks, but not enough to prevent a subtle > off-by-two error if the frame has the IEEE80211_STYPE_QOS_DATA flag > set. > > This leads to integer underflow and crash here: > > if (frag != 0) > flen -= hdrlen; > > (flen is subsequently used as a memcpy length parameter). How about this? Signed-off-by: John W. Linville <linville@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static int do_dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct dccp_sock *dp; int val, len; if (get_user(len, optlen)) return -EFAULT; if (len < sizeof(int)) return -EINVAL; dp = dccp_sk(sk); switch (optname) { case DCCP_SOCKOPT_PACKET_SIZE: DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); return 0; case DCCP_SOCKOPT_SERVICE: return dccp_getsockopt_service(sk, len, (__be32 __user *)optval, optlen); case DCCP_SOCKOPT_SEND_CSCOV: val = dp->dccps_pcslen; break; case DCCP_SOCKOPT_RECV_CSCOV: val = dp->dccps_pcrlen; break; case 128 ... 191: return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, len, (u32 __user *)optval, optlen); case 192 ... 255: return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname, len, (u32 __user *)optval, optlen); default: return -ENOPROTOOPT; } if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; }
1
[]
linux-2.6
39ebc0276bada8bb70e067cb6d0eb71839c0fb08
329,364,480,182,313,500,000,000,000,000,000,000,000
42
[DCCP] getsockopt: Fix DCCP_SOCKOPT_[SEND,RECV]_CSCOV We were only checking if there was enough space to put the int, but left len as specified by the (malicious) user, sigh, fix it by setting len to sizeof(val) and transfering just one int worth of data, the one asked for. Also check for negative len values. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
static int jpc_qcx_getcompparms(jpc_qcxcp_t *compparms, jpc_cstate_t *cstate, jas_stream_t *in, uint_fast16_t len) { uint_fast8_t tmp; int n; int i; /* Eliminate compiler warning about unused variables. */ cstate = 0; n = 0; if (jpc_getuint8(in, &tmp)) { return -1; } ++n; compparms->qntsty = tmp & 0x1f; compparms->numguard = (tmp >> 5) & 7; switch (compparms->qntsty) { case JPC_QCX_SIQNT: compparms->numstepsizes = 1; break; case JPC_QCX_NOQNT: compparms->numstepsizes = (len - n); break; case JPC_QCX_SEQNT: /* XXX - this is a hack */ compparms->numstepsizes = (len - n) / 2; break; } if (compparms->numstepsizes > 0) { compparms->stepsizes = jas_alloc2(compparms->numstepsizes, sizeof(uint_fast16_t)); assert(compparms->stepsizes); for (i = 0; i < compparms->numstepsizes; ++i) { if (compparms->qntsty == JPC_QCX_NOQNT) { if (jpc_getuint8(in, &tmp)) { return -1; } compparms->stepsizes[i] = JPC_QCX_EXPN(tmp >> 3); } else { if (jpc_getuint16(in, &compparms->stepsizes[i])) { return -1; } } } } else { compparms->stepsizes = 0; } if (jas_stream_error(in) || jas_stream_eof(in)) { jpc_qcx_destroycompparms(compparms); return -1; } return 0; }
1
[]
jasper
4031ca321d8cb5798c316ab39c7a5dc88a61fdd7
254,403,990,052,538,500,000,000,000,000,000,000,000
54
Incorporated changes from patch jasper-1.900.3-libjasper-stepsizes-overflow.patch
isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) { isdn_net_dev *p = isdn_net_findif(cfg->name); ulong features; int i; int drvidx; int chidx; char drvid[25]; if (p) { isdn_net_local *lp = p->local; /* See if any registered driver supports the features we want */ features = ((1 << cfg->l2_proto) << ISDN_FEATURE_L2_SHIFT) | ((1 << cfg->l3_proto) << ISDN_FEATURE_L3_SHIFT); for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (dev->drv[i]) if ((dev->drv[i]->interface->features & features) == features) break; if (i == ISDN_MAX_DRIVERS) { printk(KERN_WARNING "isdn_net: No driver with selected features\n"); return -ENODEV; } if (lp->p_encap != cfg->p_encap){ #ifdef CONFIG_ISDN_X25 struct concap_proto * cprot = p -> cprot; #endif if (isdn_net_device_started(p)) { printk(KERN_WARNING "%s: cannot change encap when if is up\n", p->dev->name); return -EBUSY; } #ifdef CONFIG_ISDN_X25 if( cprot && cprot -> pops ) cprot -> pops -> proto_del ( cprot ); p -> cprot = NULL; lp -> dops = NULL; /* ... , prepare for configuration of new one ... */ switch ( cfg -> p_encap ){ case ISDN_NET_ENCAP_X25IFACE: lp -> dops = &isdn_concap_reliable_dl_dops; } /* ... and allocate new one ... */ p -> cprot = isdn_concap_new( cfg -> p_encap ); /* p -> cprot == NULL now if p_encap is not supported by means of the concap_proto mechanism */ /* the protocol is not configured yet; this will happen later when isdn_net_reset() is called */ #endif } switch ( cfg->p_encap ) { case ISDN_NET_ENCAP_SYNCPPP: #ifndef CONFIG_ISDN_PPP printk(KERN_WARNING "%s: SyncPPP support not configured\n", p->dev->name); return -EINVAL; #else p->dev->type = ARPHRD_PPP; /* change ARP type */ p->dev->addr_len = 0; p->dev->do_ioctl = isdn_ppp_dev_ioctl; #endif break; case ISDN_NET_ENCAP_X25IFACE: #ifndef CONFIG_ISDN_X25 printk(KERN_WARNING "%s: isdn-x25 support not configured\n", p->dev->name); return -EINVAL; #else p->dev->type = ARPHRD_X25; /* change ARP type */ p->dev->addr_len = 0; #endif break; case ISDN_NET_ENCAP_CISCOHDLCK: p->dev->do_ioctl = isdn_ciscohdlck_dev_ioctl; break; default: if( cfg->p_encap >= 0 && cfg->p_encap <= ISDN_NET_ENCAP_MAX_ENCAP ) break; printk(KERN_WARNING "%s: encapsulation protocol %d not supported\n", p->dev->name, cfg->p_encap); return -EINVAL; } if (strlen(cfg->drvid)) { /* A bind has been requested ... */ char *c, *e; drvidx = -1; chidx = -1; strcpy(drvid, cfg->drvid); if ((c = strchr(drvid, ','))) { /* The channel-number is appended to the driver-Id with a comma */ chidx = (int) simple_strtoul(c + 1, &e, 10); if (e == c) chidx = -1; *c = '\0'; } for (i = 0; i < ISDN_MAX_DRIVERS; i++) /* Lookup driver-Id in array */ if (!(strcmp(dev->drvid[i], drvid))) { drvidx = i; break; } if ((drvidx == -1) || (chidx == -1)) /* Either driver-Id or channel-number invalid */ return -ENODEV; } else { /* Parameters are valid, so get them */ drvidx = lp->pre_device; chidx = lp->pre_channel; } if (cfg->exclusive > 0) { unsigned long flags; /* If binding is exclusive, try to grab the channel */ spin_lock_irqsave(&dev->lock, flags); if ((i = isdn_get_free_channel(ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, drvidx, chidx, lp->msn)) < 0) { /* Grab failed, because desired channel is in use */ lp->exclusive = -1; spin_unlock_irqrestore(&dev->lock, flags); return -EBUSY; } /* All went ok, so update isdninfo */ dev->usage[i] = ISDN_USAGE_EXCLUSIVE; isdn_info_update(); spin_unlock_irqrestore(&dev->lock, flags); lp->exclusive = i; } else { /* Non-exclusive binding or unbind. */ lp->exclusive = -1; if ((lp->pre_device != -1) && (cfg->exclusive == -1)) { isdn_unexclusive_channel(lp->pre_device, lp->pre_channel); isdn_free_channel(lp->pre_device, lp->pre_channel, ISDN_USAGE_NET); drvidx = -1; chidx = -1; } } strcpy(lp->msn, cfg->eaz); lp->pre_device = drvidx; lp->pre_channel = chidx; lp->onhtime = cfg->onhtime; lp->charge = cfg->charge; lp->l2_proto = cfg->l2_proto; lp->l3_proto = cfg->l3_proto; lp->cbdelay = cfg->cbdelay; lp->dialmax = cfg->dialmax; lp->triggercps = cfg->triggercps; lp->slavedelay = cfg->slavedelay * HZ; lp->pppbind = cfg->pppbind; lp->dialtimeout = cfg->dialtimeout >= 0 ? cfg->dialtimeout * HZ : -1; lp->dialwait = cfg->dialwait * HZ; if (cfg->secure) lp->flags |= ISDN_NET_SECURE; else lp->flags &= ~ISDN_NET_SECURE; if (cfg->cbhup) lp->flags |= ISDN_NET_CBHUP; else lp->flags &= ~ISDN_NET_CBHUP; switch (cfg->callback) { case 0: lp->flags &= ~(ISDN_NET_CALLBACK | ISDN_NET_CBOUT); break; case 1: lp->flags |= ISDN_NET_CALLBACK; lp->flags &= ~ISDN_NET_CBOUT; break; case 2: lp->flags |= ISDN_NET_CBOUT; lp->flags &= ~ISDN_NET_CALLBACK; break; } lp->flags &= ~ISDN_NET_DIALMODE_MASK; /* first all bits off */ if (cfg->dialmode && !(cfg->dialmode & ISDN_NET_DIALMODE_MASK)) { /* old isdnctrl version, where only 0 or 1 is given */ printk(KERN_WARNING "Old isdnctrl version detected! Please update.\n"); lp->flags |= ISDN_NET_DM_OFF; /* turn on `off' bit */ } else { lp->flags |= cfg->dialmode; /* turn on selected bits */ } if (cfg->chargehup) lp->hupflags |= ISDN_CHARGEHUP; else lp->hupflags &= ~ISDN_CHARGEHUP; if (cfg->ihup) lp->hupflags |= ISDN_INHUP; else lp->hupflags &= ~ISDN_INHUP; if (cfg->chargeint > 10) { lp->hupflags |= ISDN_CHARGEHUP | ISDN_HAVECHARGE | ISDN_MANCHARGE; lp->chargeint = cfg->chargeint * HZ; } if (cfg->p_encap != lp->p_encap) { if (cfg->p_encap == ISDN_NET_ENCAP_RAWIP) { p->dev->header_ops = NULL; p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } else { p->dev->header_ops = &isdn_header_ops; if (cfg->p_encap == ISDN_NET_ENCAP_ETHER) p->dev->flags = IFF_BROADCAST | IFF_MULTICAST; else p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } } lp->p_encap = cfg->p_encap; return 0; } return -ENODEV; }
1
[ "CWE-119" ]
linux-2.6
0f13864e5b24d9cbe18d125d41bfa4b726a82e40
80,796,293,720,356,790,000,000,000,000,000,000,000
215
isdn: avoid copying overly-long strings Addresses http://bugzilla.kernel.org/show_bug.cgi?id=9416 Signed-off-by: Karsten Keil <kkeil@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
isdn_net_addphone(isdn_net_ioctl_phone * phone) { isdn_net_dev *p = isdn_net_findif(phone->name); isdn_net_phone *n; if (p) { if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL))) return -ENOMEM; strcpy(n->num, phone->phone); n->next = p->local->phone[phone->outgoing & 1]; p->local->phone[phone->outgoing & 1] = n; return 0; } return -ENODEV; }
1
[ "CWE-119" ]
linux-2.6
0f13864e5b24d9cbe18d125d41bfa4b726a82e40
242,762,251,649,869,600,000,000,000,000,000,000,000
15
isdn: avoid copying overly-long strings Addresses http://bugzilla.kernel.org/show_bug.cgi?id=9416 Signed-off-by: Karsten Keil <kkeil@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup) { char *eaz; int si1; int si2; int ematch; int wret; int swapped; int sidx = 0; u_long flags; isdn_net_dev *p; isdn_net_phone *n; char nr[32]; char *my_eaz; /* Search name in netdev-chain */ if (!setup->phone[0]) { nr[0] = '0'; nr[1] = '\0'; printk(KERN_INFO "isdn_net: Incoming call without OAD, assuming '0'\n"); } else strcpy(nr, setup->phone); si1 = (int) setup->si1; si2 = (int) setup->si2; if (!setup->eazmsn[0]) { printk(KERN_WARNING "isdn_net: Incoming call without CPN, assuming '0'\n"); eaz = "0"; } else eaz = setup->eazmsn; if (dev->net_verbose > 1) printk(KERN_INFO "isdn_net: call from %s,%d,%d -> %s\n", nr, si1, si2, eaz); /* Accept DATA and VOICE calls at this stage * local eaz is checked later for allowed call types */ if ((si1 != 7) && (si1 != 1)) { if (dev->net_verbose > 1) printk(KERN_INFO "isdn_net: Service-Indicator not 1 or 7, ignored\n"); return 0; } n = (isdn_net_phone *) 0; p = dev->netdev; ematch = wret = swapped = 0; #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: di=%d ch=%d idx=%d usg=%d\n", di, ch, idx, dev->usage[idx]); #endif while (p) { int matchret; isdn_net_local *lp = p->local; /* If last check has triggered as binding-swap, revert it */ switch (swapped) { case 2: isdn_net_swap_usage(idx, sidx); /* fall through */ case 1: isdn_net_swapbind(di); break; } swapped = 0; /* check acceptable call types for DOV */ my_eaz = isdn_map_eaz2msn(lp->msn, di); if (si1 == 1) { /* it's a DOV call, check if we allow it */ if (*my_eaz == 'v' || *my_eaz == 'V' || *my_eaz == 'b' || *my_eaz == 'B') my_eaz++; /* skip to allow a match */ else my_eaz = NULL; /* force non match */ } else { /* it's a DATA call, check if we allow it */ if (*my_eaz == 'b' || *my_eaz == 'B') my_eaz++; /* skip to allow a match */ } if (my_eaz) matchret = isdn_msncmp(eaz, my_eaz); else matchret = 1; if (!matchret) ematch = 1; /* Remember if more numbers eventually can match */ if (matchret > wret) wret = matchret; #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: if='%s', l.msn=%s, l.flags=%d, l.dstate=%d\n", p->dev->name, lp->msn, lp->flags, lp->dialstate); #endif if ((!matchret) && /* EAZ is matching */ (((!(lp->flags & ISDN_NET_CONNECTED)) && /* but not connected */ (USG_NONE(dev->usage[idx]))) || /* and ch. unused or */ ((((lp->dialstate == 4) || (lp->dialstate == 12)) && /* if dialing */ (!(lp->flags & ISDN_NET_CALLBACK))) /* but no callback */ ))) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: match1, pdev=%d pch=%d\n", lp->pre_device, lp->pre_channel); #endif if (dev->usage[idx] & ISDN_USAGE_EXCLUSIVE) { if ((lp->pre_channel != ch) || (lp->pre_device != di)) { /* Here we got a problem: * If using an ICN-Card, an incoming call is always signaled on * on the first channel of the card, if both channels are * down. However this channel may be bound exclusive. If the * second channel is free, this call should be accepted. * The solution is horribly but it runs, so what: * We exchange the exclusive bindings of the two channels, the * corresponding variables in the interface-structs. */ if (ch == 0) { sidx = isdn_dc2minor(di, 1); #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: ch is 0\n"); #endif if (USG_NONE(dev->usage[sidx])) { /* Second Channel is free, now see if it is bound * exclusive too. */ if (dev->usage[sidx] & ISDN_USAGE_EXCLUSIVE) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: 2nd channel is down and bound\n"); #endif /* Yes, swap bindings only, if the original * binding is bound to channel 1 of this driver */ if ((lp->pre_device == di) && (lp->pre_channel == 1)) { isdn_net_swapbind(di); swapped = 1; } else { /* ... else iterate next device */ p = (isdn_net_dev *) p->next; continue; } } else { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: 2nd channel is down and unbound\n"); #endif /* No, swap always and swap excl-usage also */ isdn_net_swap_usage(idx, sidx); isdn_net_swapbind(di); swapped = 2; } /* Now check for exclusive binding again */ #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: final check\n"); #endif if ((dev->usage[idx] & ISDN_USAGE_EXCLUSIVE) && ((lp->pre_channel != ch) || (lp->pre_device != di))) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: final check failed\n"); #endif p = (isdn_net_dev *) p->next; continue; } } } else { /* We are already on the second channel, so nothing to do */ #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: already on 2nd channel\n"); #endif } } } #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: match2\n"); #endif n = lp->phone[0]; if (lp->flags & ISDN_NET_SECURE) { while (n) { if (!isdn_msncmp(nr, n->num)) break; n = (isdn_net_phone *) n->next; } } if (n || (!(lp->flags & ISDN_NET_SECURE))) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: match3\n"); #endif /* matching interface found */ /* * Is the state STOPPED? * If so, no dialin is allowed, * so reject actively. * */ if (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF) { printk(KERN_INFO "incoming call, interface %s `stopped' -> rejected\n", p->dev->name); return 3; } /* * Is the interface up? * If not, reject the call actively. */ if (!isdn_net_device_started(p)) { printk(KERN_INFO "%s: incoming call, interface down -> rejected\n", p->dev->name); return 3; } /* Interface is up, now see if it's a slave. If so, see if * it's master and parent slave is online. If not, reject the call. */ if (lp->master) { isdn_net_local *mlp = (isdn_net_local *) lp->master->priv; printk(KERN_DEBUG "ICALLslv: %s\n", p->dev->name); printk(KERN_DEBUG "master=%s\n", lp->master->name); if (mlp->flags & ISDN_NET_CONNECTED) { printk(KERN_DEBUG "master online\n"); /* Master is online, find parent-slave (master if first slave) */ while (mlp->slave) { if ((isdn_net_local *) mlp->slave->priv == lp) break; mlp = (isdn_net_local *) mlp->slave->priv; } } else printk(KERN_DEBUG "master offline\n"); /* Found parent, if it's offline iterate next device */ printk(KERN_DEBUG "mlpf: %d\n", mlp->flags & ISDN_NET_CONNECTED); if (!(mlp->flags & ISDN_NET_CONNECTED)) { p = (isdn_net_dev *) p->next; continue; } } if (lp->flags & ISDN_NET_CALLBACK) { int chi; /* * Is the state MANUAL? * If so, no callback can be made, * so reject actively. * */ if (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF) { printk(KERN_INFO "incoming call for callback, interface %s `off' -> rejected\n", p->dev->name); return 3; } printk(KERN_DEBUG "%s: call from %s -> %s, start callback\n", p->dev->name, nr, eaz); if (lp->phone[1]) { /* Grab a free ISDN-Channel */ spin_lock_irqsave(&dev->lock, flags); if ((chi = isdn_get_free_channel( ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, lp->pre_device, lp->pre_channel, lp->msn) ) < 0) { printk(KERN_WARNING "isdn_net_find_icall: No channel for %s\n", p->dev->name); spin_unlock_irqrestore(&dev->lock, flags); return 0; } /* Setup dialstate. */ lp->dtimer = 0; lp->dialstate = 11; /* Connect interface with channel */ isdn_net_bind_channel(lp, chi); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) if (isdn_ppp_bind(lp) < 0) { spin_unlock_irqrestore(&dev->lock, flags); isdn_net_unbind_channel(lp); return 0; } #endif spin_unlock_irqrestore(&dev->lock, flags); /* Initiate dialing by returning 2 or 4 */ return (lp->flags & ISDN_NET_CBHUP) ? 2 : 4; } else printk(KERN_WARNING "isdn_net: %s: No phone number\n", p->dev->name); return 0; } else { printk(KERN_DEBUG "%s: call from %s -> %s accepted\n", p->dev->name, nr, eaz); /* if this interface is dialing, it does it probably on a different device, so free this device */ if ((lp->dialstate == 4) || (lp->dialstate == 12)) { #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) isdn_ppp_free(lp); #endif isdn_net_lp_disconnected(lp); isdn_free_channel(lp->isdn_device, lp->isdn_channel, ISDN_USAGE_NET); } spin_lock_irqsave(&dev->lock, flags); dev->usage[idx] &= ISDN_USAGE_EXCLUSIVE; dev->usage[idx] |= ISDN_USAGE_NET; strcpy(dev->num[idx], nr); isdn_info_update(); dev->st_netdev[idx] = lp->netdev; lp->isdn_device = di; lp->isdn_channel = ch; lp->ppp_slot = -1; lp->flags |= ISDN_NET_CONNECTED; lp->dialstate = 7; lp->dtimer = 0; lp->outgoing = 0; lp->huptimer = 0; lp->hupflags |= ISDN_WAITCHARGE; lp->hupflags &= ~ISDN_HAVECHARGE; #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) { if (isdn_ppp_bind(lp) < 0) { isdn_net_unbind_channel(lp); spin_unlock_irqrestore(&dev->lock, flags); return 0; } } #endif spin_unlock_irqrestore(&dev->lock, flags); return 1; } } } p = (isdn_net_dev *) p->next; } /* If none of configured EAZ/MSN matched and not verbose, be silent */ if (!ematch || dev->net_verbose) printk(KERN_INFO "isdn_net: call from %s -> %d %s ignored\n", nr, di, eaz); return (wret == 2)?5:0; }
1
[ "CWE-119" ]
linux-2.6
0f13864e5b24d9cbe18d125d41bfa4b726a82e40
322,344,461,636,009,930,000,000,000,000,000,000,000
326
isdn: avoid copying overly-long strings Addresses http://bugzilla.kernel.org/show_bug.cgi?id=9416 Signed-off-by: Karsten Keil <kkeil@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; struct aead_request *req; struct sk_buff *trailer; int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); int nfrags; int ret = 0; void *tmp; u8 *iv; struct scatterlist *sg; struct scatterlist *asg; if (!pskb_may_pull(skb, sizeof(*esph))) { ret = -EINVAL; goto out; } if (elen <= 0) { ret = -EINVAL; goto out; } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { ret = -EINVAL; goto out; } ret = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags + 1); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; iv = esp_tmp_iv(aead, tmp); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); sg = asg + 1; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr *)skb->data; /* Get ivec. This can be wrong, check against another impls. */ iv = esph->enc_data; sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); sg_init_one(asg, esph, sizeof(*esph)); aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, sizeof(*esph)); ret = crypto_aead_decrypt(req); if (ret == -EINPROGRESS) goto out; ret = esp_input_done2(skb, ret); out: return ret; }
1
[ "CWE-16" ]
linux-2.6
920fc941a9617f95ccb283037fe6f8a38d95bb69
104,942,264,381,159,010,000,000,000,000,000,000,000
65
[ESP]: Ensure IV is in linear part of the skb to avoid BUG() due to OOB access ESP does not account for the IV size when calling pskb_may_pull() to ensure everything it accesses directly is within the linear part of a potential fragment. This results in a BUG() being triggered when the both the IPv4 and IPv6 ESP stack is fed with an skb where the first fragment ends between the end of the esp header and the end of the IV. This bug was found by Dirk Nehring <dnehring@gmx.net> . Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
static int esp_input(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; struct aead_request *req; struct sk_buff *trailer; int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); int nfrags; void *tmp; u8 *iv; struct scatterlist *sg; struct scatterlist *asg; int err = -EINVAL; if (!pskb_may_pull(skb, sizeof(*esph))) goto out; if (elen <= 0) goto out; if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; err = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags + 1); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; iv = esp_tmp_iv(aead, tmp); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); sg = asg + 1; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr *)skb->data; /* Get ivec. This can be wrong, check against another impls. */ iv = esph->enc_data; sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); sg_init_one(asg, esph, sizeof(*esph)); aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, sizeof(*esph)); err = crypto_aead_decrypt(req); if (err == -EINPROGRESS) goto out; err = esp_input_done2(skb, err); out: return err; }
1
[ "CWE-16" ]
linux-2.6
920fc941a9617f95ccb283037fe6f8a38d95bb69
261,859,902,457,785,360,000,000,000,000,000,000,000
60
[ESP]: Ensure IV is in linear part of the skb to avoid BUG() due to OOB access ESP does not account for the IV size when calling pskb_may_pull() to ensure everything it accesses directly is within the linear part of a potential fragment. This results in a BUG() being triggered when the both the IPv4 and IPv6 ESP stack is fed with an skb where the first fragment ends between the end of the esp header and the end of the IV. This bug was found by Dirk Nehring <dnehring@gmx.net> . Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) { uint minor = iminor(inode); isdn_ctrl c; int drvidx; int chidx; int ret; int i; char __user *p; char *s; union iocpar { char name[10]; char bname[22]; isdn_ioctl_struct iocts; isdn_net_ioctl_phone phone; isdn_net_ioctl_cfg cfg; } iocpar; void __user *argp = (void __user *)arg; #define name iocpar.name #define bname iocpar.bname #define iocts iocpar.iocts #define phone iocpar.phone #define cfg iocpar.cfg if (minor == ISDN_MINOR_STATUS) { switch (cmd) { case IIOCGETDVR: return (TTY_DV + (NET_DV << 8) + (INF_DV << 16)); case IIOCGETCPS: if (arg) { ulong __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, p, sizeof(ulong) * ISDN_MAX_CHANNELS * 2)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { put_user(dev->ibytes[i], p++); put_user(dev->obytes[i], p++); } return 0; } else return -EINVAL; break; #ifdef CONFIG_NETDEVICES case IIOCNETGPN: /* Get peer phone number of a connected * isdn network interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; return isdn_net_getpeer(&phone, argp); } else return -EINVAL; #endif default: return -EINVAL; } } if (!dev->drivers) return -ENODEV; if (minor <= ISDN_MINOR_BMAX) { drvidx = isdn_minor2drv(minor); if (drvidx < 0) return -ENODEV; chidx = isdn_minor2chan(minor); if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) return -ENODEV; return 0; } if (minor <= ISDN_MINOR_CTRLMAX) { /* * isdn net devices manage lots of configuration variables as linked lists. * Those lists must only be manipulated from user space. Some of the ioctl's * service routines access user space and are not atomic. Therefor, ioctl's * manipulating the lists and ioctl's sleeping while accessing the lists * are serialized by means of a semaphore. */ switch (cmd) { case IIOCNETDWRSET: printk(KERN_INFO "INFO: ISDN_DW_ABC_EXTENSION not enabled\n"); return(-EINVAL); case IIOCNETLCR: printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n"); return -ENODEV; #ifdef CONFIG_NETDEVICES case IIOCNETAIF: /* Add a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; s = name; } else { s = NULL; } ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; if ((s = isdn_net_new(s, NULL))) { if (copy_to_user(argp, s, strlen(s) + 1)){ ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; mutex_unlock(&dev->mtx); return ret; case IIOCNETASL: /* Add a slave to a network-interface */ if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; } else return -EINVAL; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; if ((s = isdn_net_newslave(bname))) { if (copy_to_user(argp, s, strlen(s) + 1)){ ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; mutex_unlock(&dev->mtx); return ret; case IIOCNETDIF: /* Delete a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_rm(name); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETSCF: /* Set configurable parameters of a network-interface */ if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; return isdn_net_setcfg(&cfg); } else return -EINVAL; case IIOCNETGCF: /* Get configurable parameters of a network-interface */ if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; if (!(ret = isdn_net_getcfg(&cfg))) { if (copy_to_user(argp, &cfg, sizeof(cfg))) return -EFAULT; } return ret; } else return -EINVAL; case IIOCNETANM: /* Add a phone-number to a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_addphone(&phone); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETGNM: /* Get list of phone-numbers of a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_getphones(&phone, argp); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETDNM: /* Delete a phone-number of a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_delphone(&phone); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETDIL: /* Force dialing of a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_dial(name); } else return -EINVAL; #ifdef CONFIG_ISDN_PPP case IIOCNETALN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_dial_slave(name); case IIOCNETDLN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_hangup_slave(name); #endif case IIOCNETHUP: /* Force hangup of a network-interface */ if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_hangup(name); break; #endif /* CONFIG_NETDEVICES */ case IIOCSETVER: dev->net_verbose = arg; printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose); return 0; case IIOCSETGST: if (arg) dev->global_flags |= ISDN_GLOBAL_STOPPED; else dev->global_flags &= ~ISDN_GLOBAL_STOPPED; printk(KERN_INFO "isdn: Global Mode %s\n", (dev->global_flags & ISDN_GLOBAL_STOPPED) ? "stopped" : "running"); return 0; case IIOCSETBRJ: drvidx = -1; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } } if (drvidx == -1) return -ENODEV; if (iocts.arg) dev->drv[drvidx]->flags |= DRV_FLAG_REJBUS; else dev->drv[drvidx]->flags &= ~DRV_FLAG_REJBUS; return 0; case IIOCSIGPRF: dev->profd = current; return 0; break; case IIOCGETPRF: /* Get all Modem-Profiles */ if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_to_user(p, dev->mdm.info[i].emu.profile, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_to_user(p, dev->mdm.info[i].emu.pmsn, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; if (copy_to_user(p, dev->mdm.info[i].emu.plmsn, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; } return (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS; } else return -EINVAL; break; case IIOCSETPRF: /* Set all Modem-Profiles */ if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_READ, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_from_user(dev->mdm.info[i].emu.profile, p, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_from_user(dev->mdm.info[i].emu.plmsn, p, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; } return 0; } else return -EINVAL; break; case IIOCSETMAP: case IIOCGETMAP: /* Set/Get MSN->EAZ-Mapping for a driver */ if (arg) { if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; if (strlen(iocts.drvid)) { drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (cmd == IIOCSETMAP) { int loop = 1; p = (char __user *) iocts.arg; i = 0; while (loop) { int j = 0; while (1) { if (!access_ok(VERIFY_READ, p, 1)) return -EFAULT; get_user(bname[j], p++); switch (bname[j]) { case '\0': loop = 0; /* Fall through */ case ',': bname[j] = '\0'; strcpy(dev->drv[drvidx]->msn2eaz[i], bname); j = ISDN_MSNLEN; break; default: j++; } if (j >= ISDN_MSNLEN) break; } if (++i > 9) break; } } else { p = (char __user *) iocts.arg; for (i = 0; i < 10; i++) { sprintf(bname, "%s%s", strlen(dev->drv[drvidx]->msn2eaz[i]) ? dev->drv[drvidx]->msn2eaz[i] : "_", (i < 9) ? "," : "\0"); if (copy_to_user(p, bname, strlen(bname) + 1)) return -EFAULT; p += strlen(bname); } } return 0; } else return -EINVAL; case IIOCDBGVAR: if (arg) { if (copy_to_user(argp, &dev, sizeof(ulong))) return -EFAULT; return 0; } else return -EINVAL; break; default: if ((cmd & IIOCDRVCTL) == IIOCDRVCTL) cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK; else return -EINVAL; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (!access_ok(VERIFY_WRITE, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; c.driver = drvidx; c.command = ISDN_CMD_IOCTL; c.arg = cmd; memcpy(c.parm.num, &iocts.arg, sizeof(ulong)); ret = isdn_command(&c); memcpy(&iocts.arg, c.parm.num, sizeof(ulong)); if (copy_to_user(argp, &iocts, sizeof(isdn_ioctl_struct))) return -EFAULT; return ret; } else return -EINVAL; } } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) return (isdn_ppp_ioctl(minor - ISDN_MINOR_PPP, file, cmd, arg)); #endif return -ENODEV; #undef name #undef bname #undef iocts #undef phone #undef cfg }
1
[ "CWE-119" ]
linux-2.6
eafe1aa37e6ec2d56f14732b5240c4dd09f0613a
328,221,509,131,099,850,000,000,000,000,000,000,000
446
I4L: fix isdn_ioctl memory overrun vulnerability Fix possible memory overrun issue in the isdn ioctl code. Found by ADLAB <adlab@venustech.com.cn> Signed-off-by: Karsten Keil <kkeil@suse.de> Cc: ADLAB <adlab@venustech.com.cn> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) { return alloc_page(gfp | __GFP_ZERO); }
1
[ "CWE-200" ]
linux-2.6
e84e2e132c9c66d8498e7710d4ea532d1feaaac5
40,898,721,436,560,150,000,000,000,000,000,000,000
4
tmpfs: restore missing clear_highpage tmpfs was misconverted to __GFP_ZERO in 2.6.11. There's an unusual case in which shmem_getpage receives the page from its caller instead of allocating. We must cover this case by clear_highpage before SetPageUptodate, as before. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp, int *type) { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo; struct page *filepage = *pagep; struct page *swappage; swp_entry_t *entry; swp_entry_t swap; int error; if (idx >= SHMEM_MAX_INDEX) return -EFBIG; if (type) *type = 0; /* * Normally, filepage is NULL on entry, and either found * uptodate immediately, or allocated and zeroed, or read * in under swappage, which is then assigned to filepage. * But shmem_readpage and shmem_write_begin pass in a locked * filepage, which may be found not uptodate by other callers * too, and may need to be copied from the swappage read in. */ repeat: if (!filepage) filepage = find_lock_page(mapping, idx); if (filepage && PageUptodate(filepage)) goto done; error = 0; if (sgp == SGP_QUICK) goto failed; spin_lock(&info->lock); shmem_recalc_inode(inode); entry = shmem_swp_alloc(info, idx, sgp); if (IS_ERR(entry)) { spin_unlock(&info->lock); error = PTR_ERR(entry); goto failed; } swap = *entry; if (swap.val) { /* Look it up and read it in.. */ swappage = lookup_swap_cache(swap); if (!swappage) { shmem_swp_unmap(entry); /* here we actually do the io */ if (type && !(*type & VM_FAULT_MAJOR)) { __count_vm_event(PGMAJFAULT); *type |= VM_FAULT_MAJOR; } spin_unlock(&info->lock); swappage = shmem_swapin(info, swap, idx); if (!swappage) { spin_lock(&info->lock); entry = shmem_swp_alloc(info, idx, sgp); if (IS_ERR(entry)) error = PTR_ERR(entry); else { if (entry->val == swap.val) error = -ENOMEM; shmem_swp_unmap(entry); } spin_unlock(&info->lock); if (error) goto failed; goto repeat; } wait_on_page_locked(swappage); page_cache_release(swappage); goto repeat; } /* We have to do this with page locked to prevent races */ if (TestSetPageLocked(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); wait_on_page_locked(swappage); page_cache_release(swappage); goto repeat; } if (PageWriteback(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); wait_on_page_writeback(swappage); unlock_page(swappage); page_cache_release(swappage); goto repeat; } if (!PageUptodate(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); unlock_page(swappage); page_cache_release(swappage); error = -EIO; goto failed; } if (filepage) { shmem_swp_set(info, entry, 0); shmem_swp_unmap(entry); delete_from_swap_cache(swappage); spin_unlock(&info->lock); copy_highpage(filepage, swappage); unlock_page(swappage); page_cache_release(swappage); flush_dcache_page(filepage); SetPageUptodate(filepage); set_page_dirty(filepage); swap_free(swap); } else if (!(error = move_from_swap_cache( swappage, idx, mapping))) { info->flags |= SHMEM_PAGEIN; shmem_swp_set(info, entry, 0); shmem_swp_unmap(entry); spin_unlock(&info->lock); filepage = swappage; swap_free(swap); } else { shmem_swp_unmap(entry); spin_unlock(&info->lock); unlock_page(swappage); page_cache_release(swappage); if (error == -ENOMEM) { /* let kswapd refresh zone for GFP_ATOMICs */ congestion_wait(WRITE, HZ/50); } goto repeat; } } else if (sgp == SGP_READ && !filepage) { shmem_swp_unmap(entry); filepage = find_get_page(mapping, idx); if (filepage && (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { spin_unlock(&info->lock); wait_on_page_locked(filepage); page_cache_release(filepage); filepage = NULL; goto repeat; } spin_unlock(&info->lock); } else { shmem_swp_unmap(entry); sbinfo = SHMEM_SB(inode->i_sb); if (sbinfo->max_blocks) { spin_lock(&sbinfo->stat_lock); if (sbinfo->free_blocks == 0 || shmem_acct_block(info->flags)) { spin_unlock(&sbinfo->stat_lock); spin_unlock(&info->lock); error = -ENOSPC; goto failed; } sbinfo->free_blocks--; inode->i_blocks += BLOCKS_PER_PAGE; spin_unlock(&sbinfo->stat_lock); } else if (shmem_acct_block(info->flags)) { spin_unlock(&info->lock); error = -ENOSPC; goto failed; } if (!filepage) { spin_unlock(&info->lock); filepage = shmem_alloc_page(mapping_gfp_mask(mapping), info, idx); if (!filepage) { shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); error = -ENOMEM; goto failed; } spin_lock(&info->lock); entry = shmem_swp_alloc(info, idx, sgp); if (IS_ERR(entry)) error = PTR_ERR(entry); else { swap = *entry; shmem_swp_unmap(entry); } if (error || swap.val || 0 != add_to_page_cache_lru( filepage, mapping, idx, GFP_ATOMIC)) { spin_unlock(&info->lock); page_cache_release(filepage); shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); filepage = NULL; if (error) goto failed; goto repeat; } info->flags |= SHMEM_PAGEIN; } info->alloced++; spin_unlock(&info->lock); flush_dcache_page(filepage); SetPageUptodate(filepage); } done: if (*pagep != filepage) { *pagep = filepage; if (sgp != SGP_FAULT) unlock_page(filepage); } return 0; failed: if (*pagep != filepage) { unlock_page(filepage); page_cache_release(filepage); } return error; }
1
[ "CWE-200" ]
linux-2.6
e84e2e132c9c66d8498e7710d4ea532d1feaaac5
337,224,798,820,921,130,000,000,000,000,000,000,000
221
tmpfs: restore missing clear_highpage tmpfs was misconverted to __GFP_ZERO in 2.6.11. There's an unusual case in which shmem_getpage receives the page from its caller instead of allocating. We must cover this case by clear_highpage before SetPageUptodate, as before. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, unsigned long idx) { struct vm_area_struct pvma; struct page *page; memset(&pvma, 0, sizeof(struct vm_area_struct)); pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); pvma.vm_pgoff = idx; pvma.vm_end = PAGE_SIZE; page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); mpol_free(pvma.vm_policy); return page; }
1
[ "CWE-200" ]
linux-2.6
e84e2e132c9c66d8498e7710d4ea532d1feaaac5
209,078,005,530,362,280,000,000,000,000,000,000,000
14
tmpfs: restore missing clear_highpage tmpfs was misconverted to __GFP_ZERO in 2.6.11. There's an unusual case in which shmem_getpage receives the page from its caller instead of allocating. We must cover this case by clear_highpage before SetPageUptodate, as before. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
ProcShmCreatePixmap(client) register ClientPtr client; { PixmapPtr pMap; DrawablePtr pDraw; DepthPtr pDepth; register int i, rc; ShmDescPtr shmdesc; REQUEST(xShmCreatePixmapReq); unsigned int width, height, depth; unsigned long size; REQUEST_SIZE_MATCH(xShmCreatePixmapReq); client->errorValue = stuff->pid; if (!sharedPixmaps) return BadImplementation; LEGAL_NEW_RESOURCE(stuff->pid, client); rc = dixLookupDrawable(&pDraw, stuff->drawable, client, M_ANY, DixGetAttrAccess); if (rc != Success) return rc; VERIFY_SHMPTR(stuff->shmseg, stuff->offset, TRUE, shmdesc, client); width = stuff->width; height = stuff->height; depth = stuff->depth; if (!width || !height || !depth) { client->errorValue = 0; return BadValue; } if (width > 32767 || height > 32767) return BadAlloc; if (stuff->depth != 1) { pDepth = pDraw->pScreen->allowedDepths; for (i=0; i<pDraw->pScreen->numDepths; i++, pDepth++) if (pDepth->depth == stuff->depth) goto CreatePmap; client->errorValue = stuff->depth; return BadValue; } CreatePmap: size = PixmapBytePad(width, depth) * height; if (sizeof(size) == 4 && BitsPerPixel(depth) > 8) { if (size < width * height) return BadAlloc; /* thankfully, offset is unsigned */ if (stuff->offset + size < size) return BadAlloc; } VERIFY_SHMSIZE(shmdesc, stuff->offset, size, client); pMap = (*shmFuncs[pDraw->pScreen->myNum]->CreatePixmap)( pDraw->pScreen, stuff->width, stuff->height, stuff->depth, shmdesc->addr + stuff->offset); if (pMap) { rc = XaceHook(XACE_RESOURCE_ACCESS, client, stuff->pid, RT_PIXMAP, pMap, RT_NONE, NULL, DixCreateAccess); if (rc != Success) { pDraw->pScreen->DestroyPixmap(pMap); return rc; } dixSetPrivate(&pMap->devPrivates, shmPixmapPrivate, shmdesc); shmdesc->refcnt++; pMap->drawable.serialNumber = NEXT_SERIAL_NUMBER; pMap->drawable.id = stuff->pid; if (AddResource(stuff->pid, RT_PIXMAP, (pointer)pMap)) { return(client->noClientException); } pDraw->pScreen->DestroyPixmap(pMap); } return (BadAlloc); }
1
[ "CWE-189" ]
xserver
be6c17fcf9efebc0bbcc3d9a25f8c5a2450c2161
129,275,241,199,461,490,000,000,000,000,000,000,000
80
CVE-2007-6429: Always test for size+offset wrapping.
ProcPanoramiXShmCreatePixmap( register ClientPtr client) { ScreenPtr pScreen = NULL; PixmapPtr pMap = NULL; DrawablePtr pDraw; DepthPtr pDepth; int i, j, result, rc; ShmDescPtr shmdesc; REQUEST(xShmCreatePixmapReq); unsigned int width, height, depth; unsigned long size; PanoramiXRes *newPix; REQUEST_SIZE_MATCH(xShmCreatePixmapReq); client->errorValue = stuff->pid; if (!sharedPixmaps) return BadImplementation; LEGAL_NEW_RESOURCE(stuff->pid, client); rc = dixLookupDrawable(&pDraw, stuff->drawable, client, M_ANY, DixUnknownAccess); if (rc != Success) return rc; VERIFY_SHMPTR(stuff->shmseg, stuff->offset, TRUE, shmdesc, client); width = stuff->width; height = stuff->height; depth = stuff->depth; if (!width || !height || !depth) { client->errorValue = 0; return BadValue; } if (width > 32767 || height > 32767) return BadAlloc; if (stuff->depth != 1) { pDepth = pDraw->pScreen->allowedDepths; for (i=0; i<pDraw->pScreen->numDepths; i++, pDepth++) if (pDepth->depth == stuff->depth) goto CreatePmap; client->errorValue = stuff->depth; return BadValue; } CreatePmap: size = PixmapBytePad(width, depth) * height; if (sizeof(size) == 4 && BitsPerPixel(depth) > 8) { if (size < width * height) return BadAlloc; /* thankfully, offset is unsigned */ if (stuff->offset + size < size) return BadAlloc; } VERIFY_SHMSIZE(shmdesc, stuff->offset, size, client); if(!(newPix = (PanoramiXRes *) xalloc(sizeof(PanoramiXRes)))) return BadAlloc; newPix->type = XRT_PIXMAP; newPix->u.pix.shared = TRUE; newPix->info[0].id = stuff->pid; for(j = 1; j < PanoramiXNumScreens; j++) newPix->info[j].id = FakeClientID(client->index); result = (client->noClientException); FOR_NSCREENS(j) { pScreen = screenInfo.screens[j]; pMap = (*shmFuncs[j]->CreatePixmap)(pScreen, stuff->width, stuff->height, stuff->depth, shmdesc->addr + stuff->offset); if (pMap) { dixSetPrivate(&pMap->devPrivates, shmPixmapPrivate, shmdesc); shmdesc->refcnt++; pMap->drawable.serialNumber = NEXT_SERIAL_NUMBER; pMap->drawable.id = newPix->info[j].id; if (!AddResource(newPix->info[j].id, RT_PIXMAP, (pointer)pMap)) { (*pScreen->DestroyPixmap)(pMap); result = BadAlloc; break; } } else { result = BadAlloc; break; } } if(result == BadAlloc) { while(j--) { (*pScreen->DestroyPixmap)(pMap); FreeResource(newPix->info[j].id, RT_NONE); } xfree(newPix); } else AddResource(stuff->pid, XRT_PIXMAP, newPix); return result; }
1
[ "CWE-189" ]
xserver
be6c17fcf9efebc0bbcc3d9a25f8c5a2450c2161
17,219,291,516,037,378,000,000,000,000,000,000,000
104
CVE-2007-6429: Always test for size+offset wrapping.
static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n) { int partial; pagefault_disable(); partial = __copy_from_user_inatomic(dst, src, n); pagefault_enable(); /* * Didn't copy everything, drop the mmap_sem and do a faulting copy */ if (unlikely(partial)) { up_read(&current->mm->mmap_sem); partial = copy_from_user(dst, src, n); down_read(&current->mm->mmap_sem); } return partial; }
1
[ "CWE-94" ]
linux-2.6
8811930dc74a503415b35c4a79d14fb0b408a361
235,093,879,813,159,370,000,000,000,000,000,000,000
19
splice: missing user pointer access verification vmsplice_to_user() must always check the user pointer and length with access_ok() before copying. Likewise, for the slow path of copy_from_user_mmap_sem() we need to check that we may read from the user region. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Cc: Wojciech Purczynski <cliph@research.coseinc.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, unsigned long nr_segs, unsigned int flags) { struct pipe_inode_info *pipe; struct splice_desc sd; ssize_t size; int error; long ret; pipe = pipe_info(file->f_path.dentry->d_inode); if (!pipe) return -EBADF; if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); error = ret = 0; while (nr_segs) { void __user *base; size_t len; /* * Get user address base and length for this iovec. */ error = get_user(base, &iov->iov_base); if (unlikely(error)) break; error = get_user(len, &iov->iov_len); if (unlikely(error)) break; /* * Sanity check this iovec. 0 read succeeds. */ if (unlikely(!len)) break; if (unlikely(!base)) { error = -EFAULT; break; } sd.len = 0; sd.total_len = len; sd.flags = flags; sd.u.userptr = base; sd.pos = 0; size = __splice_from_pipe(pipe, &sd, pipe_to_user); if (size < 0) { if (!ret) ret = size; break; } ret += size; if (size < len) break; nr_segs--; iov++; } if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); if (!ret) ret = error; return ret; }
1
[ "CWE-94" ]
linux-2.6
8811930dc74a503415b35c4a79d14fb0b408a361
93,812,929,696,679,570,000,000,000,000,000,000,000
72
splice: missing user pointer access verification vmsplice_to_user() must always check the user pointer and length with access_ok() before copying. Likewise, for the slow path of copy_from_user_mmap_sem() we need to check that we may read from the user region. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Cc: Wojciech Purczynski <cliph@research.coseinc.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int get_iovec_page_array(const struct iovec __user *iov, unsigned int nr_vecs, struct page **pages, struct partial_page *partial, int aligned) { int buffers = 0, error = 0; down_read(&current->mm->mmap_sem); while (nr_vecs) { unsigned long off, npages; struct iovec entry; void __user *base; size_t len; int i; error = -EFAULT; if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry))) break; base = entry.iov_base; len = entry.iov_len; /* * Sanity check this iovec. 0 read succeeds. */ error = 0; if (unlikely(!len)) break; error = -EFAULT; if (unlikely(!base)) break; /* * Get this base offset and number of pages, then map * in the user pages. */ off = (unsigned long) base & ~PAGE_MASK; /* * If asked for alignment, the offset must be zero and the * length a multiple of the PAGE_SIZE. */ error = -EINVAL; if (aligned && (off || len & ~PAGE_MASK)) break; npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; if (npages > PIPE_BUFFERS - buffers) npages = PIPE_BUFFERS - buffers; error = get_user_pages(current, current->mm, (unsigned long) base, npages, 0, 0, &pages[buffers], NULL); if (unlikely(error <= 0)) break; /* * Fill this contiguous range into the partial page map. */ for (i = 0; i < error; i++) { const int plen = min_t(size_t, len, PAGE_SIZE - off); partial[buffers].offset = off; partial[buffers].len = plen; off = 0; len -= plen; buffers++; } /* * We didn't complete this iov, stop here since it probably * means we have to move some of this into a pipe to * be able to continue. */ if (len) break; /* * Don't continue if we mapped fewer pages than we asked for, * or if we mapped the max number of pages that we have * room for. */ if (error < npages || buffers == PIPE_BUFFERS) break; nr_vecs--; iov++; } up_read(&current->mm->mmap_sem); if (buffers) return buffers; return error; }
1
[ "CWE-94" ]
linux-2.6
712a30e63c8066ed84385b12edbfb804f49cbc44
235,267,654,020,111,850,000,000,000,000,000,000,000
98
splice: fix user pointer access in get_iovec_page_array() Commit 8811930dc74a503415b35c4a79d14fb0b408a361 ("splice: missing user pointer access verification") added the proper access_ok() calls to copy_from_user_mmap_sem() which ensures we can copy the struct iovecs from userspace to the kernel. But we also must check whether we can access the actual memory region pointed to by the struct iovec to fix the access checks properly. Signed-off-by: Bastian Blank <waldi@debian.org> Acked-by: Oliver Pinter <oliver.pntr@gmail.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) { struct rlimit new_rlim, *old_rlim; unsigned long it_prof_secs; int retval; if (resource >= RLIM_NLIMITS) return -EINVAL; if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) return -EFAULT; if (new_rlim.rlim_cur > new_rlim.rlim_max) return -EINVAL; old_rlim = current->signal->rlim + resource; if ((new_rlim.rlim_max > old_rlim->rlim_max) && !capable(CAP_SYS_RESOURCE)) return -EPERM; if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) return -EPERM; retval = security_task_setrlimit(resource, &new_rlim); if (retval) return retval; task_lock(current->group_leader); *old_rlim = new_rlim; task_unlock(current->group_leader); if (resource != RLIMIT_CPU) goto out; /* * RLIMIT_CPU handling. Note that the kernel fails to return an error * code if it rejected the user's attempt to set RLIMIT_CPU. This is a * very long-standing error, and fixing it now risks breakage of * applications, so we live with it */ if (new_rlim.rlim_cur == RLIM_INFINITY) goto out; it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { unsigned long rlim_cur = new_rlim.rlim_cur; cputime_t cputime; if (rlim_cur == 0) { /* * The caller is asking for an immediate RLIMIT_CPU * expiry. But we use the zero value to mean "it was * never set". So let's cheat and make it one second * instead */ rlim_cur = 1; } cputime = secs_to_cputime(rlim_cur); read_lock(&tasklist_lock); spin_lock_irq(&current->sighand->siglock); set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); spin_unlock_irq(&current->sighand->siglock); read_unlock(&tasklist_lock); } out: return 0; }
1
[ "CWE-20" ]
linux-2.6
9926e4c74300c4b31dee007298c6475d33369df0
53,303,886,230,877,640,000,000,000,000,000,000,000
63
CPU time limit patch / setrlimit(RLIMIT_CPU, 0) cheat fix As discovered here today, the change in Kernel 2.6.17 intended to inhibit users from setting RLIMIT_CPU to 0 (as that is equivalent to unlimited) by "cheating" and setting it to 1 in such a case, does not make a difference, as the check is done in the wrong place (too late), and only applies to the profiling code. On all systems I checked running kernels above 2.6.17, no matter what the hard and soft CPU time limits were before, a user could escape them by issuing in the shell (sh/bash/zsh) "ulimit -t 0", and then the user's process was not ever killed. Attached is a trivial patch to fix that. Simply moving the check to a slightly earlier location (specifically, before the line that actually assigns the limit - *old_rlim = new_rlim), does the trick. Do note that at least the zsh (but not ash, dash, or bash) shell has the problem of "caching" the limits set by the ulimit command, so when running zsh the fix will not immediately be evident - after entering "ulimit -t 0", "ulimit -a" will show "-t: cpu time (seconds) 0", even though the actual limit as returned by getrlimit(...) will be 1. It can be verified by opening a subshell (which will not have the values of the parent shell in cache) and checking in it, or just by running a CPU intensive command like "echo '65536^1048576' | bc" and verifying that it dumps core after one second. Regardless of whether that is a misfeature in the shell, perhaps it would be better to return -EINVAL from setrlimit in such a case instead of cheating and setting to 1, as that does not really reflect the actual state of the process anymore. I do not however know what the ground for that decision was in the original 2.6.17 change, and whether there would be any "backward" compatibility issues, so I preferred not to touch that right now. Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs * regs) { void __user *restorer; struct rt_sigframe __user *frame; int err = 0; int usig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= __put_user(usig, &frame->sig); err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); if (err) goto give_sigsegv; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; /* Set up to return from userspace. */ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; err |= __put_user(restorer, &frame->pretcode); /* * This is movl $,%ax ; int $0x80 * * WE DO NOT USE IT ANY MORE! It's only left here for historical * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ err |= __put_user(0xb8, (char __user *)(frame->retcode+0)); err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1)); err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; regs->ax = (unsigned long) usig; regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; /* * Clear TF when entering the signal handler, but * notify any tracer that was single-stepping it. * The tracer may want to single-step inside the * handler too. */ regs->flags &= ~TF_MASK; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
1
[ "CWE-399" ]
linux-2.6
e40cd10ccff3d9fbffd57b93780bee4b7b9bff51
25,210,609,055,917,767,000,000,000,000,000,000,000
92
x86: clear DF before calling signal handler The Linux kernel currently does not clear the direction flag before calling a signal handler, whereas the x86/x86-64 ABI requires that. Linux had this behavior/bug forever, but this becomes a real problem with gcc version 4.3, which assumes that the direction flag is correctly cleared at the entry of a function. This patches changes the setup_frame() functions to clear the direction before entering the signal handler. Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: H. Peter Anvin <hpa@zytor.com>
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, compat_sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; struct exec_domain *ed = current_thread_info()->exec_domain; void __user *restorer; int err = 0; /* __copy_to_user optimizes that into a single 8 byte store */ static const struct { u8 movl; u32 val; u16 int80; u16 pad; u8 pad2; } __attribute__((packed)) code = { 0xb8, __NR_ia32_rt_sigreturn, 0x80cd, 0, }; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user((ed && ed->signal_invmap && sig < 32 ? ed->signal_invmap[sig] : sig), &frame->sig); err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo); err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc); err |= copy_siginfo_to_user32(&frame->info, info); if (err) goto give_sigsegv; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; else restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); /* * Not actually used anymore, but left because some gdb * versions need it. */ err |= __copy_to_user(frame->retcode, &code, 8); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; /* Make -mregparm=3 work */ regs->ax = sig; regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; /* Make -mregparm=3 work */ regs->ax = sig; regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); regs->cs = __USER32_CS; regs->ss = __USER32_DS; set_fs(USER_DS); regs->flags &= ~X86_EFLAGS_TF; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
1
[ "CWE-399" ]
linux-2.6
e40cd10ccff3d9fbffd57b93780bee4b7b9bff51
336,026,717,347,518,600,000,000,000,000,000,000,000
99
x86: clear DF before calling signal handler The Linux kernel currently does not clear the direction flag before calling a signal handler, whereas the x86/x86-64 ABI requires that. Linux had this behavior/bug forever, but this becomes a real problem with gcc version 4.3, which assumes that the direction flag is correctly cleared at the entry of a function. This patches changes the setup_frame() functions to clear the direction before entering the signal handler. Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: H. Peter Anvin <hpa@zytor.com>
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs * regs) { struct rt_sigframe __user *frame; struct _fpstate __user *fp = NULL; int err = 0; struct task_struct *me = current; if (used_math()) { fp = get_stack(ka, regs, sizeof(struct _fpstate)); frame = (void __user *)round_down( (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) goto give_sigsegv; if (save_i387(fp) < 0) err |= -1; } else frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; if (ka->sa.sa_flags & SA_SIGINFO) { err |= copy_siginfo_to_user(&frame->info, info); if (err) goto give_sigsegv; } /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); if (sizeof(*set) == 16) { __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); } else err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ /* x86-64 should always use SA_RESTORER. */ if (ka->sa.sa_flags & SA_RESTORER) { err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); } else { /* could use a vstub here */ goto give_sigsegv; } if (err) goto give_sigsegv; #ifdef DEBUG_SIG printk("%d old ip %lx old sp %lx old ax %lx\n", current->pid,regs->ip,regs->sp,regs->ax); #endif /* Set up registers for signal handler */ regs->di = sig; /* In case the signal handler was declared without prototypes */ regs->ax = 0; /* This also works for non SA_SIGINFO handlers because they expect the next argument after the signal number on the stack. */ regs->si = (unsigned long)&frame->info; regs->dx = (unsigned long)&frame->uc; regs->ip = (unsigned long) ka->sa.sa_handler; regs->sp = (unsigned long)frame; /* Set up the CS register to run signal handlers in 64-bit mode, even if the handler happens to be interrupting 32-bit code. */ regs->cs = __USER_CS; /* This, by contrast, has nothing to do with segment registers - see include/asm-x86_64/uaccess.h for details. */ set_fs(USER_DS); regs->flags &= ~X86_EFLAGS_TF; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #ifdef DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
1
[ "CWE-399" ]
linux-2.6
e40cd10ccff3d9fbffd57b93780bee4b7b9bff51
323,536,889,097,069,170,000,000,000,000,000,000,000
97
x86: clear DF before calling signal handler The Linux kernel currently does not clear the direction flag before calling a signal handler, whereas the x86/x86-64 ABI requires that. Linux had this behavior/bug forever, but this becomes a real problem with gcc version 4.3, which assumes that the direction flag is correctly cleared at the entry of a function. This patches changes the setup_frame() functions to clear the direction before entering the signal handler. Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: H. Peter Anvin <hpa@zytor.com>
static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs) { void __user *restorer; struct sigframe __user *frame; int err = 0; int usig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err = __put_user(usig, &frame->sig); if (err) goto give_sigsegv; err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]); if (err) goto give_sigsegv; if (_NSIG_WORDS > 1) { err = __copy_to_user(&frame->extramask, &set->sig[1], sizeof(frame->extramask)); if (err) goto give_sigsegv; } if (current->binfmt->hasvdso) restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else restorer = &frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; /* Set up to return from userspace. */ err |= __put_user(restorer, &frame->pretcode); /* * This is popl %eax ; movl $,%eax ; int $0x80 * * WE DO NOT USE IT ANY MORE! It's only left here for historical * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ err |= __put_user(0xb858, (short __user *)(frame->retcode+0)); err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2)); err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; regs->ax = (unsigned long) sig; regs->dx = (unsigned long) 0; regs->cx = (unsigned long) 0; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; /* * Clear TF when entering the signal handler, but * notify any tracer that was single-stepping it. * The tracer may want to single-step inside the * handler too. */ regs->flags &= ~TF_MASK; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
1
[ "CWE-399" ]
linux-2.6
e40cd10ccff3d9fbffd57b93780bee4b7b9bff51
240,195,482,991,768,350,000,000,000,000,000,000,000
91
x86: clear DF before calling signal handler The Linux kernel currently does not clear the direction flag before calling a signal handler, whereas the x86/x86-64 ABI requires that. Linux had this behavior/bug forever, but this becomes a real problem with gcc version 4.3, which assumes that the direction flag is correctly cleared at the entry of a function. This patches changes the setup_frame() functions to clear the direction before entering the signal handler. Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: H. Peter Anvin <hpa@zytor.com>
int ia32_setup_frame(int sig, struct k_sigaction *ka, compat_sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; void __user *restorer; int err = 0; /* copy_to_user optimizes that into a single 8 byte store */ static const struct { u16 poplmovl; u32 val; u16 int80; u16 pad; } __attribute__((packed)) code = { 0xb858, /* popl %eax ; movl $...,%eax */ __NR_ia32_sigreturn, 0x80cd, /* int $0x80 */ 0, }; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user(sig, &frame->sig); if (err) goto give_sigsegv; err |= ia32_setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]); if (err) goto give_sigsegv; if (_COMPAT_NSIG_WORDS > 1) { err |= __copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); if (err) goto give_sigsegv; } if (ka->sa.sa_flags & SA_RESTORER) { restorer = ka->sa.sa_restorer; } else { /* Return stub is in 32bit vsyscall page */ if (current->binfmt->hasvdso) restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else restorer = &frame->retcode; } err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); /* * These are actually not used anymore, but left because some * gdb versions depend on them as a marker. */ err |= __copy_to_user(frame->retcode, &code, 8); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; /* Make -mregparm=3 work */ regs->ax = sig; regs->dx = 0; regs->cx = 0; asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); regs->cs = __USER32_CS; regs->ss = __USER32_DS; set_fs(USER_DS); regs->flags &= ~X86_EFLAGS_TF; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
1
[ "CWE-399" ]
linux-2.6
e40cd10ccff3d9fbffd57b93780bee4b7b9bff51
242,893,451,020,799,340,000,000,000,000,000,000,000
92
x86: clear DF before calling signal handler The Linux kernel currently does not clear the direction flag before calling a signal handler, whereas the x86/x86-64 ABI requires that. Linux had this behavior/bug forever, but this becomes a real problem with gcc version 4.3, which assumes that the direction flag is correctly cleared at the entry of a function. This patches changes the setup_frame() functions to clear the direction before entering the signal handler. Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: H. Peter Anvin <hpa@zytor.com>
static unsigned long __peek_user(struct task_struct *child, addr_t addr) { struct user *dummy = NULL; addr_t offset, tmp; if (addr < (addr_t) &dummy->regs.acrs) { /* * psw and gprs are stored on the stack */ tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); if (addr == (addr_t) &dummy->regs.psw.mask) /* Remove per bit from user psw. */ tmp &= ~PSW_MASK_PER; } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb reading * from acrs[15]. Result is a 64 bit value. Read the * 32 bit acrs[15] value and shift it by 32. Sick... */ if (addr == (addr_t) &dummy->regs.acrs[15]) tmp = ((unsigned long) child->thread.acrs[15]) << 32; else #endif tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* * orig_gpr2 is stored on the kernel stack */ tmp = (addr_t) task_pt_regs(child)->orig_gpr2; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.fp_regs; tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); if (addr == (addr_t) &dummy->regs.fp_regs.fpc) tmp &= (unsigned long) FPC_VALID_MASK << (BITS_PER_LONG - 32); } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* * per_info is found in the thread structure */ offset = addr - (addr_t) &dummy->regs.per_info; tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); } else tmp = 0; return tmp; }
1
[ "CWE-399" ]
linux-2.6
3d6e48f43340343d97839eadb1ab7b6a3ea98797
228,545,314,486,915,480,000,000,000,000,000,000,000
59
[S390] CVE-2008-1514: prevent ptrace padding area read/write in 31-bit mode When running a 31-bit ptrace, on either an s390 or s390x kernel, reads and writes into a padding area in struct user_regs_struct32 will result in a kernel panic. This is also known as CVE-2008-1514. Test case available here: http://sources.redhat.com/cgi-bin/cvsweb.cgi/~checkout~/tests/ptrace-tests/tests/user-area-padding.c?cvsroot=systemtap Steps to reproduce: 1) wget the above 2) gcc -o user-area-padding-31bit user-area-padding.c -Wall -ggdb2 -D_GNU_SOURCE -m31 3) ./user-area-padding-31bit <panic> Test status ----------- Without patch, both s390 and s390x kernels panic. With patch, the test case, as well as the gdb testsuite, pass without incident, padding area reads returning zero, writes ignored. Nb: original version returned -EINVAL on write attempts, which broke the gdb test and made the test case slightly unhappy, Jan Kratochvil suggested the change to return 0 on write attempts. Signed-off-by: Jarod Wilson <jarod@redhat.com> Tested-by: Jan Kratochvil <jan.kratochvil@redhat.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
static int __poke_user_compat(struct task_struct *child, addr_t addr, addr_t data) { struct user32 *dummy32 = NULL; per_struct32 *dummy_per32 = NULL; __u32 tmp = (__u32) data; addr_t offset; if (addr < (addr_t) &dummy32->regs.acrs) { /* * psw, gprs, acrs and orig_gpr2 are stored on the stack */ if (addr == (addr_t) &dummy32->regs.psw.mask) { /* Build a 64 bit psw mask from 31 bit mask. */ if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp)) /* Invalid psw mask. */ return -EINVAL; task_pt_regs(child)->psw.mask = PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32); } else if (addr == (addr_t) &dummy32->regs.psw.addr) { /* Build a 64 bit psw address from 31 bit address. */ task_pt_regs(child)->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; } else { /* gpr 0-15 */ *(__u32*)((addr_t) &task_pt_regs(child)->psw + addr*2 + 4) = tmp; } } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy32->regs.acrs; *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack */ *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && (tmp & ~FPC_VALID_MASK) != 0) /* Invalid floating point control. */ return -EINVAL; offset = addr - (addr_t) &dummy32->regs.fp_regs; *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* * per_info is found in the thread structure. */ offset = addr - (addr_t) &dummy32->regs.per_info; /* * This is magic. See per_struct and per_struct32. * By incident the offsets in per_struct are exactly * twice the offsets in per_struct32 for all fields. * The 8 byte fields need special handling though, * because the second half (bytes 4-7) is needed and * not the first half. */ if ((offset >= (addr_t) &dummy_per32->control_regs && offset < (addr_t) (&dummy_per32->control_regs + 1)) || (offset >= (addr_t) &dummy_per32->starting_addr && offset <= (addr_t) &dummy_per32->ending_addr) || offset == (addr_t) &dummy_per32->lowcore.words.address) offset = offset*2 + 4; else offset = offset*2; *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp; } FixPerRegisters(child); return 0; }
1
[ "CWE-399" ]
linux-2.6
3d6e48f43340343d97839eadb1ab7b6a3ea98797
336,645,180,341,076,800,000,000,000,000,000,000,000
80
[S390] CVE-2008-1514: prevent ptrace padding area read/write in 31-bit mode When running a 31-bit ptrace, on either an s390 or s390x kernel, reads and writes into a padding area in struct user_regs_struct32 will result in a kernel panic. This is also known as CVE-2008-1514. Test case available here: http://sources.redhat.com/cgi-bin/cvsweb.cgi/~checkout~/tests/ptrace-tests/tests/user-area-padding.c?cvsroot=systemtap Steps to reproduce: 1) wget the above 2) gcc -o user-area-padding-31bit user-area-padding.c -Wall -ggdb2 -D_GNU_SOURCE -m31 3) ./user-area-padding-31bit <panic> Test status ----------- Without patch, both s390 and s390x kernels panic. With patch, the test case, as well as the gdb testsuite, pass without incident, padding area reads returning zero, writes ignored. Nb: original version returned -EINVAL on write attempts, which broke the gdb test and made the test case slightly unhappy, Jan Kratochvil suggested the change to return 0 on write attempts. Signed-off-by: Jarod Wilson <jarod@redhat.com> Tested-by: Jan Kratochvil <jan.kratochvil@redhat.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) { struct user *dummy = NULL; addr_t offset; if (addr < (addr_t) &dummy->regs.acrs) { /* * psw and gprs are stored on the stack */ if (addr == (addr_t) &dummy->regs.psw.mask && #ifdef CONFIG_COMPAT data != PSW_MASK_MERGE(psw_user32_bits, data) && #endif data != PSW_MASK_MERGE(psw_user_bits, data)) /* Invalid psw mask. */ return -EINVAL; #ifndef CONFIG_64BIT if (addr == (addr_t) &dummy->regs.psw.addr) /* I'd like to reject addresses without the high order bit but older gdb's rely on it */ data |= PSW_ADDR_AMODE; #endif *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb writing * to acrs[15] with a 64 bit value. Ignore the lower * half of the value and write the upper 32 bit to * acrs[15]. Sick... */ if (addr == (addr_t) &dummy->regs.acrs[15]) child->thread.acrs[15] = (unsigned int) (data >> 32); else #endif *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* * orig_gpr2 is stored on the kernel stack */ task_pt_regs(child)->orig_gpr2 = data; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy->regs.fp_regs.fpc && (data & ~((unsigned long) FPC_VALID_MASK << (BITS_PER_LONG - 32))) != 0) return -EINVAL; offset = addr - (addr_t) &dummy->regs.fp_regs; *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* * per_info is found in the thread structure */ offset = addr - (addr_t) &dummy->regs.per_info; *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; } FixPerRegisters(child); return 0; }
1
[ "CWE-399" ]
linux-2.6
3d6e48f43340343d97839eadb1ab7b6a3ea98797
315,587,780,023,225,040,000,000,000,000,000,000,000
71
[S390] CVE-2008-1514: prevent ptrace padding area read/write in 31-bit mode When running a 31-bit ptrace, on either an s390 or s390x kernel, reads and writes into a padding area in struct user_regs_struct32 will result in a kernel panic. This is also known as CVE-2008-1514. Test case available here: http://sources.redhat.com/cgi-bin/cvsweb.cgi/~checkout~/tests/ptrace-tests/tests/user-area-padding.c?cvsroot=systemtap Steps to reproduce: 1) wget the above 2) gcc -o user-area-padding-31bit user-area-padding.c -Wall -ggdb2 -D_GNU_SOURCE -m31 3) ./user-area-padding-31bit <panic> Test status ----------- Without patch, both s390 and s390x kernels panic. With patch, the test case, as well as the gdb testsuite, pass without incident, padding area reads returning zero, writes ignored. Nb: original version returned -EINVAL on write attempts, which broke the gdb test and made the test case slightly unhappy, Jan Kratochvil suggested the change to return 0 on write attempts. Signed-off-by: Jarod Wilson <jarod@redhat.com> Tested-by: Jan Kratochvil <jan.kratochvil@redhat.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
static u32 __peek_user_compat(struct task_struct *child, addr_t addr) { struct user32 *dummy32 = NULL; per_struct32 *dummy_per32 = NULL; addr_t offset; __u32 tmp; if (addr < (addr_t) &dummy32->regs.acrs) { /* * psw and gprs are stored on the stack */ if (addr == (addr_t) &dummy32->regs.psw.mask) { /* Fake a 31 bit psw mask. */ tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp); } else if (addr == (addr_t) &dummy32->regs.psw.addr) { /* Fake a 31 bit psw address. */ tmp = (__u32) task_pt_regs(child)->psw.addr | PSW32_ADDR_AMODE31; } else { /* gpr 0-15 */ tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw + addr*2 + 4); } } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy32->regs.acrs; tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack */ tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ offset = addr - (addr_t) &dummy32->regs.fp_regs; tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset); } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* * per_info is found in the thread structure */ offset = addr - (addr_t) &dummy32->regs.per_info; /* This is magic. See per_struct and per_struct32. */ if ((offset >= (addr_t) &dummy_per32->control_regs && offset < (addr_t) (&dummy_per32->control_regs + 1)) || (offset >= (addr_t) &dummy_per32->starting_addr && offset <= (addr_t) &dummy_per32->ending_addr) || offset == (addr_t) &dummy_per32->lowcore.words.address) offset = offset*2 + 4; else offset = offset*2; tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset); } else tmp = 0; return tmp; }
1
[ "CWE-399" ]
linux-2.6
3d6e48f43340343d97839eadb1ab7b6a3ea98797
35,699,932,838,905,237,000,000,000,000,000,000,000
65
[S390] CVE-2008-1514: prevent ptrace padding area read/write in 31-bit mode When running a 31-bit ptrace, on either an s390 or s390x kernel, reads and writes into a padding area in struct user_regs_struct32 will result in a kernel panic. This is also known as CVE-2008-1514. Test case available here: http://sources.redhat.com/cgi-bin/cvsweb.cgi/~checkout~/tests/ptrace-tests/tests/user-area-padding.c?cvsroot=systemtap Steps to reproduce: 1) wget the above 2) gcc -o user-area-padding-31bit user-area-padding.c -Wall -ggdb2 -D_GNU_SOURCE -m31 3) ./user-area-padding-31bit <panic> Test status ----------- Without patch, both s390 and s390x kernels panic. With patch, the test case, as well as the gdb testsuite, pass without incident, padding area reads returning zero, writes ignored. Nb: original version returned -EINVAL on write attempts, which broke the gdb test and made the test case slightly unhappy, Jan Kratochvil suggested the change to return 0 on write attempts. Signed-off-by: Jarod Wilson <jarod@redhat.com> Tested-by: Jan Kratochvil <jan.kratochvil@redhat.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) { struct dnotify_struct *dn; struct dnotify_struct *odn; struct dnotify_struct **prev; struct inode *inode; fl_owner_t id = current->files; int error = 0; if ((arg & ~DN_MULTISHOT) == 0) { dnotify_flush(filp, id); return 0; } if (!dir_notify_enable) return -EINVAL; inode = filp->f_path.dentry->d_inode; if (!S_ISDIR(inode->i_mode)) return -ENOTDIR; dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); if (dn == NULL) return -ENOMEM; spin_lock(&inode->i_lock); prev = &inode->i_dnotify; while ((odn = *prev) != NULL) { if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { odn->dn_fd = fd; odn->dn_mask |= arg; inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; goto out_free; } prev = &odn->dn_next; } error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); if (error) goto out_free; dn->dn_mask = arg; dn->dn_fd = fd; dn->dn_filp = filp; dn->dn_owner = id; inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; dn->dn_next = inode->i_dnotify; inode->i_dnotify = dn; spin_unlock(&inode->i_lock); if (filp->f_op && filp->f_op->dir_notify) return filp->f_op->dir_notify(filp, arg); return 0; out_free: spin_unlock(&inode->i_lock); kmem_cache_free(dn_cache, dn); return error; }
1
[ "CWE-362" ]
linux-2.6
214b7049a7929f03bbd2786aaef04b8b79db34e2
125,123,024,928,884,710,000,000,000,000,000,000,000
55
Fix dnotify/close race We have a race between fcntl() and close() that can lead to dnotify_struct inserted into inode's list *after* the last descriptor had been gone from current->files. Since that's the only point where dnotify_struct gets evicted, we are screwed - it will stick around indefinitely. Even after struct file in question is gone and freed. Worse, we can trigger send_sigio() on it at any later point, which allows to send an arbitrary signal to arbitrary process if we manage to apply enough memory pressure to get the page that used to host that struct file and fill it with the right pattern... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) { unsigned long orun = 1; ktime_t delta; delta = ktime_sub(now, timer->expires); if (delta.tv64 < 0) return 0; if (interval.tv64 < timer->base->resolution.tv64) interval.tv64 = timer->base->resolution.tv64; if (unlikely(delta.tv64 >= interval.tv64)) { s64 incr = ktime_to_ns(interval); orun = ktime_divns(delta, incr); timer->expires = ktime_add_ns(timer->expires, incr * orun); if (timer->expires.tv64 > now.tv64) return orun; /* * This (and the ktime_add() below) is the * correction for exact: */ orun++; } timer->expires = ktime_add(timer->expires, interval); return orun; }
1
[ "CWE-189" ]
linux-2.6
13788ccc41ceea5893f9c747c59bc0b28f2416c2
179,936,784,627,311,600,000,000,000,000,000,000,000
30
[PATCH] hrtimer: prevent overrun DoS in hrtimer_forward() hrtimer_forward() does not check for the possible overflow of timer->expires. This can happen on 64 bit machines with large interval values and results currently in an endless loop in the softirq because the expiry value becomes negative and therefor the timer is expired all the time. Check for this condition and set the expiry value to the max. expiry time in the future. The fix should be applied to stable kernel series as well. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
CairoFont *CairoFont::create(GfxFont *gfxFont, XRef *xref, FT_Library lib, GBool useCIDs) { Ref embRef; Object refObj, strObj; GooString *tmpFileName, *fileName,*tmpFileName2; DisplayFontParam *dfp; FILE *tmpFile; int c, i, n; GfxFontType fontType; char **enc; char *name; FoFiTrueType *ff; FoFiType1C *ff1c; Ref ref; static cairo_user_data_key_t cairo_font_face_key; cairo_font_face_t *cairo_font_face; FT_Face face; Gushort *codeToGID; int codeToGIDLen; dfp = NULL; codeToGID = NULL; codeToGIDLen = 0; cairo_font_face = NULL; ref = *gfxFont->getID(); fontType = gfxFont->getType(); tmpFileName = NULL; if (gfxFont->getEmbeddedFontID(&embRef)) { if (!openTempFile(&tmpFileName, &tmpFile, "wb", NULL)) { error(-1, "Couldn't create temporary font file"); goto err2; } refObj.initRef(embRef.num, embRef.gen); refObj.fetch(xref, &strObj); refObj.free(); strObj.streamReset(); while ((c = strObj.streamGetChar()) != EOF) { fputc(c, tmpFile); } strObj.streamClose(); strObj.free(); fclose(tmpFile); fileName = tmpFileName; } else if (!(fileName = gfxFont->getExtFontFile())) { // look for a display font mapping or a substitute font dfp = NULL; if (gfxFont->getName()) { dfp = globalParams->getDisplayFont(gfxFont); } if (!dfp) { error(-1, "Couldn't find a font for '%s'", gfxFont->getName() ? gfxFont->getName()->getCString() : "(unnamed)"); goto err2; } switch (dfp->kind) { case displayFontT1: fileName = dfp->t1.fileName; fontType = gfxFont->isCIDFont() ? fontCIDType0 : fontType1; break; case displayFontTT: fileName = dfp->tt.fileName; fontType = gfxFont->isCIDFont() ? fontCIDType2 : fontTrueType; break; } } switch (fontType) { case fontType1: case fontType1C: if (FT_New_Face(lib, fileName->getCString(), 0, &face)) { error(-1, "could not create type1 face"); goto err2; } enc = ((Gfx8BitFont *)gfxFont)->getEncoding(); codeToGID = (Gushort *)gmallocn(256, sizeof(int)); codeToGIDLen = 256; for (i = 0; i < 256; ++i) { codeToGID[i] = 0; if ((name = enc[i])) { codeToGID[i] = (Gushort)FT_Get_Name_Index(face, name); } } break; case fontCIDType2: codeToGID = NULL; n = 0; if (((GfxCIDFont *)gfxFont)->getCIDToGID()) { n = ((GfxCIDFont *)gfxFont)->getCIDToGIDLen(); if (n) { codeToGID = (Gushort *)gmallocn(n, sizeof(Gushort)); memcpy(codeToGID, ((GfxCIDFont *)gfxFont)->getCIDToGID(), n * sizeof(Gushort)); } } else { ff = FoFiTrueType::load(fileName->getCString()); if (! ff) goto err2; codeToGID = ((GfxCIDFont *)gfxFont)->getCodeToGIDMap(ff, &n); delete ff; } codeToGIDLen = n; /* Fall through */ case fontTrueType: if (!(ff = FoFiTrueType::load(fileName->getCString()))) { error(-1, "failed to load truetype font\n"); goto err2; } /* This might be set already for the CIDType2 case */ if (fontType == fontTrueType) { codeToGID = ((Gfx8BitFont *)gfxFont)->getCodeToGIDMap(ff); codeToGIDLen = 256; } if (!openTempFile(&tmpFileName2, &tmpFile, "wb", NULL)) { delete ff; error(-1, "failed to open truetype tempfile\n"); goto err2; } ff->writeTTF(&fileWrite, tmpFile); fclose(tmpFile); delete ff; if (FT_New_Face(lib, tmpFileName2->getCString(), 0, &face)) { error(-1, "could not create truetype face\n"); goto err2; } unlink (tmpFileName2->getCString()); delete tmpFileName2; break; case fontCIDType0: case fontCIDType0C: codeToGID = NULL; codeToGIDLen = 0; if (!useCIDs) { if ((ff1c = FoFiType1C::load(fileName->getCString()))) { codeToGID = ff1c->getCIDToGIDMap(&codeToGIDLen); delete ff1c; } } if (FT_New_Face(lib, fileName->getCString(), 0, &face)) { gfree(codeToGID); codeToGID = NULL; error(-1, "could not create cid face\n"); goto err2; } break; default: printf ("font type not handled\n"); goto err2; break; } // delete the (temporary) font file -- with Unix hard link // semantics, this will remove the last link; otherwise it will // return an error, leaving the file to be deleted later if (fileName == tmpFileName) { unlink (fileName->getCString()); delete tmpFileName; } cairo_font_face = cairo_ft_font_face_create_for_ft_face (face, FT_LOAD_NO_HINTING | FT_LOAD_NO_BITMAP); if (cairo_font_face == NULL) { error(-1, "could not create cairo font\n"); goto err2; /* this doesn't do anything, but it looks like we're * handling the error */ } { CairoFont *ret = new CairoFont(ref, cairo_font_face, face, codeToGID, codeToGIDLen); cairo_font_face_set_user_data (cairo_font_face, &cairo_font_face_key, ret, cairo_font_face_destroy); return ret; } err2: /* hmm? */ printf ("some font thing failed\n"); return NULL; }
1
[ "CWE-20" ]
poppler
1a531dcfee1c6fc79a414c38cbe7327fbf9a59d8
294,666,036,737,394,350,000,000,000,000,000,000,000
195
Fix a crash with invalid embedded fonts
m4_mkstemp (struct obstack *obs, int argc, token_data **argv) { if (bad_argc (argv[0], argc, 2, 2)) return; mkstemp_helper (obs, ARG (1)); }
1
[]
m4
5345bb49077bfda9fabd048e563f9e7077fe335d
318,397,935,641,703,000,000,000,000,000,000,000,000
6
Minor security fix: Quote output of mkstemp. * src/builtin.c (mkstemp_helper): Produce quoted output. * doc/m4.texinfo (Mkstemp): Update the documentation and tests. * NEWS: Document this change. Signed-off-by: Eric Blake <ebb9@byu.net> (cherry picked from commit bd9900d65eb9cd5add0f107e94b513fa267495ba)
mkstemp_helper (struct obstack *obs, const char *name) { int fd; int len; int i; /* Guarantee that there are six trailing 'X' characters, even if the user forgot to supply them. */ len = strlen (name); obstack_grow (obs, name, len); for (i = 0; len > 0 && i < 6; i++) if (name[--len] != 'X') break; for (; i < 6; i++) obstack_1grow (obs, 'X'); obstack_1grow (obs, '\0'); errno = 0; fd = mkstemp ((char *) obstack_base (obs)); if (fd < 0) { M4ERROR ((0, errno, "cannot create tempfile `%s'", name)); obstack_free (obs, obstack_finish (obs)); } else close (fd); }
1
[]
m4
5345bb49077bfda9fabd048e563f9e7077fe335d
323,725,070,097,395,600,000,000,000,000,000,000,000
27
Minor security fix: Quote output of mkstemp. * src/builtin.c (mkstemp_helper): Produce quoted output. * doc/m4.texinfo (Mkstemp): Update the documentation and tests. * NEWS: Document this change. Signed-off-by: Eric Blake <ebb9@byu.net> (cherry picked from commit bd9900d65eb9cd5add0f107e94b513fa267495ba)
m4_maketemp (struct obstack *obs, int argc, token_data **argv) { if (bad_argc (argv[0], argc, 2, 2)) return; if (no_gnu_extensions) { /* POSIX states "any trailing 'X' characters [are] replaced with the current process ID as a string", without referencing the file system. Horribly insecure, but we have to do it when we are in traditional mode. For reference, Solaris m4 does: maketemp() -> `' maketemp(X) -> `X' maketemp(XX) -> `Xn', where n is last digit of pid maketemp(XXXXXXXX) -> `X00nnnnn', where nnnnn is 16-bit pid */ const char *str = ARG (1); int len = strlen (str); int i; int len2; M4ERROR ((warning_status, 0, "recommend using mkstemp instead")); for (i = len; i > 1; i--) if (str[i - 1] != 'X') break; obstack_grow (obs, str, i); str = ntoa ((int32_t) getpid (), 10); len2 = strlen (str); if (len2 > len - i) obstack_grow0 (obs, str + len2 - (len - i), len - i); else { while (i++ < len - len2) obstack_1grow (obs, '0'); obstack_grow0 (obs, str, len2); } } else mkstemp_helper (obs, ARG (1)); }
1
[]
m4
5345bb49077bfda9fabd048e563f9e7077fe335d
14,712,089,961,255,950,000,000,000,000,000,000,000
41
Minor security fix: Quote output of mkstemp. * src/builtin.c (mkstemp_helper): Produce quoted output. * doc/m4.texinfo (Mkstemp): Update the documentation and tests. * NEWS: Document this change. Signed-off-by: Eric Blake <ebb9@byu.net> (cherry picked from commit bd9900d65eb9cd5add0f107e94b513fa267495ba)
produce_frozen_state (const char *name) { FILE *file; int h; symbol *sym; const builtin *bp; if (file = fopen (name, O_BINARY ? "wb" : "w"), !file) { M4ERROR ((warning_status, errno, name)); return; } /* Write a recognizable header. */ xfprintf (file, "# This is a frozen state file generated by %s\n", PACKAGE_STRING); xfprintf (file, "V1\n"); /* Dump quote delimiters. */ if (strcmp (lquote.string, DEF_LQUOTE) || strcmp (rquote.string, DEF_RQUOTE)) { xfprintf (file, "Q%d,%d\n", (int) lquote.length, (int) rquote.length); fputs (lquote.string, file); fputs (rquote.string, file); fputc ('\n', file); } /* Dump comment delimiters. */ if (strcmp (bcomm.string, DEF_BCOMM) || strcmp (ecomm.string, DEF_ECOMM)) { xfprintf (file, "C%d,%d\n", (int) bcomm.length, (int) ecomm.length); fputs (bcomm.string, file); fputs (ecomm.string, file); fputc ('\n', file); } /* Dump all symbols. */ for (h = 0; h < hash_table_size; h++) { /* Process all entries in one bucket, from the last to the first. This order ensures that, at reload time, pushdef's will be executed with the oldest definitions first. */ symtab[h] = reverse_symbol_list (symtab[h]); for (sym = symtab[h]; sym; sym = SYMBOL_NEXT (sym)) { switch (SYMBOL_TYPE (sym)) { case TOKEN_TEXT: xfprintf (file, "T%d,%d\n", (int) strlen (SYMBOL_NAME (sym)), (int) strlen (SYMBOL_TEXT (sym))); fputs (SYMBOL_NAME (sym), file); fputs (SYMBOL_TEXT (sym), file); fputc ('\n', file); break; case TOKEN_FUNC: bp = find_builtin_by_addr (SYMBOL_FUNC (sym)); if (bp == NULL) { M4ERROR ((warning_status, 0, "\ INTERNAL ERROR: builtin not found in builtin table!")); abort (); } xfprintf (file, "F%d,%d\n", (int) strlen (SYMBOL_NAME (sym)), (int) strlen (bp->name)); fputs (SYMBOL_NAME (sym), file); fputs (bp->name, file); fputc ('\n', file); break; case TOKEN_VOID: /* Ignore placeholder tokens that exist due to traceon. */ break; default: M4ERROR ((warning_status, 0, "\ INTERNAL ERROR: bad token data type in freeze_one_symbol ()")); abort (); break; } } /* Reverse the bucket once more, putting it back as it was. */ symtab[h] = reverse_symbol_list (symtab[h]); } /* Let diversions be issued from output.c module, its cleaner to have this piece of code there. */ freeze_diversions (file); /* All done. */ fputs ("# End of frozen state file\n", file); if (close_stream (file) != 0) M4ERROR ((EXIT_FAILURE, errno, "unable to create frozen state")); }
1
[]
m4
035998112737e52cb229e342913ef404e5a51040
263,127,698,236,721,600,000,000,000,000,000,000,000
106
Security fix: avoid arbitrary code execution with 'm4 -F'. * src/freeze.c (produce_frozen_state): Never pass raw file name as printf format. * NEWS: Document this fix. Signed-off-by: Eric Blake <ebb9@byu.net> (cherry picked from commit 031a71a80442ed2ad3c2ee14d5811c786a12c51b)
static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, unsigned char *eoc, unsigned long **oid, unsigned int *len) { unsigned long subid; unsigned int size; unsigned long *optr; size = eoc - ctx->pointer + 1; *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); if (*oid == NULL) { if (net_ratelimit()) printk("OOM in bsalg (%d)\n", __LINE__); return 0; } optr = *oid; if (!asn1_subid_decode(ctx, &subid)) { kfree(*oid); *oid = NULL; return 0; } if (subid < 40) { optr [0] = 0; optr [1] = subid; } else if (subid < 80) { optr [0] = 1; optr [1] = subid - 40; } else { optr [0] = 2; optr [1] = subid - 80; } *len = 2; optr += 2; while (ctx->pointer < eoc) { if (++(*len) > size) { ctx->error = ASN1_ERR_DEC_BADVALUE; kfree(*oid); *oid = NULL; return 0; } if (!asn1_subid_decode(ctx, optr++)) { kfree(*oid); *oid = NULL; return 0; } } return 1; }
1
[ "CWE-119" ]
linux-2.6
ddb2c43594f22843e9f3153da151deaba1a834c5
258,736,852,088,290,000,000,000,000,000,000,000,000
55
asn1: additional sanity checking during BER decoding - Don't trust a length which is greater than the working buffer. An invalid length could cause overflow when calculating buffer size for decoding oid. - An oid length of zero is invalid and allows for an off-by-one error when decoding oid because the first subid actually encodes first 2 subids. - A primitive encoding may not have an indefinite length. Thanks to Wei Wang from McAfee for report. Cc: Steven French <sfrench@us.ibm.com> Cc: stable@kernel.org Acked-by: Patrick McHardy <kaber@trash.net> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
asn1_oid_decode(struct asn1_ctx *ctx, unsigned char *eoc, unsigned long **oid, unsigned int *len) { unsigned long subid; unsigned int size; unsigned long *optr; size = eoc - ctx->pointer + 1; *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); if (*oid == NULL) return 0; optr = *oid; if (!asn1_subid_decode(ctx, &subid)) { kfree(*oid); *oid = NULL; return 0; } if (subid < 40) { optr[0] = 0; optr[1] = subid; } else if (subid < 80) { optr[0] = 1; optr[1] = subid - 40; } else { optr[0] = 2; optr[1] = subid - 80; } *len = 2; optr += 2; while (ctx->pointer < eoc) { if (++(*len) > size) { ctx->error = ASN1_ERR_DEC_BADVALUE; kfree(*oid); *oid = NULL; return 0; } if (!asn1_subid_decode(ctx, optr++)) { kfree(*oid); *oid = NULL; return 0; } } return 1; }
1
[ "CWE-119" ]
linux-2.6
ddb2c43594f22843e9f3153da151deaba1a834c5
24,934,009,460,877,333,000,000,000,000,000,000,000
50
asn1: additional sanity checking during BER decoding - Don't trust a length which is greater than the working buffer. An invalid length could cause overflow when calculating buffer size for decoding oid. - An oid length of zero is invalid and allows for an off-by-one error when decoding oid because the first subid actually encodes first 2 subids. - A primitive encoding may not have an indefinite length. Thanks to Wei Wang from McAfee for report. Cc: Steven French <sfrench@us.ibm.com> Cc: stable@kernel.org Acked-by: Patrick McHardy <kaber@trash.net> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
asn1_header_decode(struct asn1_ctx *ctx, unsigned char **eoc, unsigned int *cls, unsigned int *con, unsigned int *tag) { unsigned int def = 0; unsigned int len = 0; if (!asn1_id_decode(ctx, cls, con, tag)) return 0; if (!asn1_length_decode(ctx, &def, &len)) return 0; if (def) *eoc = ctx->pointer + len; else *eoc = NULL; return 1; }
1
[ "CWE-119" ]
linux-2.6
ddb2c43594f22843e9f3153da151deaba1a834c5
100,827,339,625,052,960,000,000,000,000,000,000,000
19
asn1: additional sanity checking during BER decoding - Don't trust a length which is greater than the working buffer. An invalid length could cause overflow when calculating buffer size for decoding oid. - An oid length of zero is invalid and allows for an off-by-one error when decoding oid because the first subid actually encodes first 2 subids. - A primitive encoding may not have an indefinite length. Thanks to Wei Wang from McAfee for report. Cc: Steven French <sfrench@us.ibm.com> Cc: stable@kernel.org Acked-by: Patrick McHardy <kaber@trash.net> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static unsigned char asn1_header_decode(struct asn1_ctx *ctx, unsigned char **eoc, unsigned int *cls, unsigned int *con, unsigned int *tag) { unsigned int def, len; if (!asn1_id_decode(ctx, cls, con, tag)) return 0; def = len = 0; if (!asn1_length_decode(ctx, &def, &len)) return 0; if (def) *eoc = ctx->pointer + len; else *eoc = NULL; return 1; }
1
[ "CWE-119" ]
linux-2.6
ddb2c43594f22843e9f3153da151deaba1a834c5
228,756,596,288,618,030,000,000,000,000,000,000,000
21
asn1: additional sanity checking during BER decoding - Don't trust a length which is greater than the working buffer. An invalid length could cause overflow when calculating buffer size for decoding oid. - An oid length of zero is invalid and allows for an off-by-one error when decoding oid because the first subid actually encodes first 2 subids. - A primitive encoding may not have an indefinite length. Thanks to Wei Wang from McAfee for report. Cc: Steven French <sfrench@us.ibm.com> Cc: stable@kernel.org Acked-by: Patrick McHardy <kaber@trash.net> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static unsigned char asn1_length_decode(struct asn1_ctx *ctx, unsigned int *def, unsigned int *len) { unsigned char ch, cnt; if (!asn1_octet_decode(ctx, &ch)) return 0; if (ch == 0x80) *def = 0; else { *def = 1; if (ch < 0x80) *len = ch; else { cnt = ch & 0x7F; *len = 0; while (cnt > 0) { if (!asn1_octet_decode(ctx, &ch)) return 0; *len <<= 8; *len |= ch; cnt--; } } } return 1; }
1
[ "CWE-119" ]
linux-2.6
ddb2c43594f22843e9f3153da151deaba1a834c5
214,953,782,388,236,550,000,000,000,000,000,000,000
31
asn1: additional sanity checking during BER decoding - Don't trust a length which is greater than the working buffer. An invalid length could cause overflow when calculating buffer size for decoding oid. - An oid length of zero is invalid and allows for an off-by-one error when decoding oid because the first subid actually encodes first 2 subids. - A primitive encoding may not have an indefinite length. Thanks to Wei Wang from McAfee for report. Cc: Steven French <sfrench@us.ibm.com> Cc: stable@kernel.org Acked-by: Patrick McHardy <kaber@trash.net> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
asn1_length_decode(struct asn1_ctx *ctx, unsigned int *def, unsigned int *len) { unsigned char ch, cnt; if (!asn1_octet_decode(ctx, &ch)) return 0; if (ch == 0x80) *def = 0; else { *def = 1; if (ch < 0x80) *len = ch; else { cnt = (unsigned char) (ch & 0x7F); *len = 0; while (cnt > 0) { if (!asn1_octet_decode(ctx, &ch)) return 0; *len <<= 8; *len |= ch; cnt--; } } } return 1; }
1
[ "CWE-119" ]
linux-2.6
ddb2c43594f22843e9f3153da151deaba1a834c5
188,088,692,906,320,600,000,000,000,000,000,000,000
29
asn1: additional sanity checking during BER decoding - Don't trust a length which is greater than the working buffer. An invalid length could cause overflow when calculating buffer size for decoding oid. - An oid length of zero is invalid and allows for an off-by-one error when decoding oid because the first subid actually encodes first 2 subids. - A primitive encoding may not have an indefinite length. Thanks to Wei Wang from McAfee for report. Cc: Steven French <sfrench@us.ibm.com> Cc: stable@kernel.org Acked-by: Patrick McHardy <kaber@trash.net> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int ipip6_rcv(struct sk_buff *skb) { struct iphdr *iph; struct ip_tunnel *tunnel; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto out; iph = ip_hdr(skb); read_lock(&ipip6_lock); if ((tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr)) != NULL) { secpath_reset(skb); skb->mac_header = skb->network_header; skb_reset_network_header(skb); IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IPV6); skb->pkt_type = PACKET_HOST; if ((tunnel->dev->priv_flags & IFF_ISATAP) && !isatap_chksrc(skb, iph, tunnel)) { tunnel->stat.rx_errors++; read_unlock(&ipip6_lock); kfree_skb(skb); return 0; } tunnel->stat.rx_packets++; tunnel->stat.rx_bytes += skb->len; skb->dev = tunnel->dev; dst_release(skb->dst); skb->dst = NULL; nf_reset(skb); ipip6_ecn_decapsulate(iph, skb); netif_rx(skb); read_unlock(&ipip6_lock); return 0; } icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); kfree_skb(skb); read_unlock(&ipip6_lock); out: return 0; }
1
[ "CWE-399" ]
linux-2.6
36ca34cc3b8335eb1fe8bd9a1d0a2592980c3f02
33,362,704,464,284,364,000,000,000,000,000,000,000
45
sit: Add missing kfree_skb() on pskb_may_pull() failure. Noticed by Paul Marks <paul@pmarks.net>. Signed-off-by: David S. Miller <davem@davemloft.net>
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { u64 slice = __sched_period(cfs_rq->nr_running); slice *= se->load.weight; do_div(slice, cfs_rq->load.weight); return slice; }
1
[]
linux-2.6
6a6029b8cefe0ca7e82f27f3904dbedba3de4e06
58,865,871,396,891,120,000,000,000,000,000,000,000
9
sched: simplify sched_slice() Use the existing calc_delta_mine() calculation for sched_slice(). This saves a divide and simplifies the code because we share it with the other /cfs_rq->load users. It also improves code size: text data bss dec hex filename 42659 2740 144 45543 b1e7 sched.o.before 42093 2740 144 44977 afb1 sched.o.after Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) { return __sched_vslice(cfs_rq->load.weight + se->load.weight, cfs_rq->nr_running + 1); }
1
[]
linux-2.6
ac884dec6d4a7df252150af875cffddf8f1d9c15
22,178,641,074,713,846,000,000,000,000,000,000,000
5
sched: fair-group scheduling vs latency Currently FAIR_GROUP sched grows the scheduler latency outside of sysctl_sched_latency, invert this so it stays within. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running) { u64 vslice = __sched_period(nr_running); vslice *= NICE_0_LOAD; do_div(vslice, rq_weight); return vslice; }
1
[]
linux-2.6
ac884dec6d4a7df252150af875cffddf8f1d9c15
26,078,873,616,473,650,000,000,000,000,000,000,000
9
sched: fair-group scheduling vs latency Currently FAIR_GROUP sched grows the scheduler latency outside of sysctl_sched_latency, invert this so it stays within. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { return calc_delta_mine(__sched_period(cfs_rq->nr_running), se->load.weight, &cfs_rq->load); }
1
[]
linux-2.6
ac884dec6d4a7df252150af875cffddf8f1d9c15
242,984,434,542,507,600,000,000,000,000,000,000,000
5
sched: fair-group scheduling vs latency Currently FAIR_GROUP sched grows the scheduler latency outside of sysctl_sched_latency, invert this so it stays within. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) { unsigned long nr_running = cfs_rq->nr_running; unsigned long weight; u64 vslice; if (!se->on_rq) nr_running++; vslice = __sched_period(nr_running); for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); weight = cfs_rq->load.weight; if (!se->on_rq) weight += se->load.weight; vslice *= NICE_0_LOAD; do_div(vslice, weight); } return vslice; }
1
[]
linux-2.6
8f1bc385cfbab474db6c27b5af1e439614f3025c
199,943,656,571,357,600,000,000,000,000,000,000,000
24
sched: fair: weight calculations In order to level the hierarchy, we need to calculate load based on the root view. That is, each task's load is in the same unit. A / \ B 1 / \ 2 3 To compute 1's load we do: weight(1) -------------- rq_weight(A) To compute 2's load we do: weight(2) weight(B) ------------ * ----------- rq_weight(B) rw_weight(A) This yields load fractions in comparable units. The consequence is that it changes virtual time. We used to have: time_{i} vtime_{i} = ------------ weight_{i} vtime = \Sum vtime_{i} = time / rq_weight. But with the new way of load calculation we get that vtime equals time. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
static unsigned long wakeup_gran(struct sched_entity *se) { unsigned long gran = sysctl_sched_wakeup_granularity; /* * More easily preempt - nice tasks, while not making * it harder for + nice tasks. */ if (unlikely(se->load.weight > NICE_0_LOAD)) gran = calc_delta_fair(gran, &se->load); return gran; }
1
[]
linux-2.6
8f1bc385cfbab474db6c27b5af1e439614f3025c
314,868,220,618,263,730,000,000,000,000,000,000,000
13
sched: fair: weight calculations In order to level the hierarchy, we need to calculate load based on the root view. That is, each task's load is in the same unit. A / \ B 1 / \ 2 3 To compute 1's load we do: weight(1) -------------- rq_weight(A) To compute 2's load we do: weight(2) weight(B) ------------ * ----------- rq_weight(B) rw_weight(A) This yields load fractions in comparable units. The consequence is that it changes virtual time. We used to have: time_{i} vtime_{i} = ------------ weight_{i} vtime = \Sum vtime_{i} = time / rq_weight. But with the new way of load calculation we get that vtime equals time. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { u64 slice = __sched_period(cfs_rq->nr_running); for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); slice *= se->load.weight; do_div(slice, cfs_rq->load.weight); } return slice; }
1
[]
linux-2.6
8f1bc385cfbab474db6c27b5af1e439614f3025c
20,493,139,195,104,024,000,000,000,000,000,000,000
14
sched: fair: weight calculations In order to level the hierarchy, we need to calculate load based on the root view. That is, each task's load is in the same unit. A / \ B 1 / \ 2 3 To compute 1's load we do: weight(1) -------------- rq_weight(A) To compute 2's load we do: weight(2) weight(B) ------------ * ----------- rq_weight(B) rw_weight(A) This yields load fractions in comparable units. The consequence is that it changes virtual time. We used to have: time_{i} vtime_{i} = ------------ weight_{i} vtime = \Sum vtime_{i} = time / rq_weight. But with the new way of load calculation we get that vtime equals time. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
calc_delta_fair(unsigned long delta_exec, struct load_weight *lw) { return calc_delta_mine(delta_exec, NICE_0_LOAD, lw); }
1
[]
linux-2.6
8f1bc385cfbab474db6c27b5af1e439614f3025c
275,354,062,946,871,070,000,000,000,000,000,000,000
4
sched: fair: weight calculations In order to level the hierarchy, we need to calculate load based on the root view. That is, each task's load is in the same unit. A / \ B 1 / \ 2 3 To compute 1's load we do: weight(1) -------------- rq_weight(A) To compute 2's load we do: weight(2) weight(B) ------------ * ----------- rq_weight(B) rw_weight(A) This yields load fractions in comparable units. The consequence is that it changes virtual time. We used to have: time_{i} vtime_{i} = ------------ weight_{i} vtime = \Sum vtime_{i} = time / rq_weight. But with the new way of load calculation we get that vtime equals time. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) { u64 vruntime; if (first_fair(cfs_rq)) { vruntime = min_vruntime(cfs_rq->min_vruntime, __pick_next_entity(cfs_rq)->vruntime); } else vruntime = cfs_rq->min_vruntime; /* * The 'current' period is already promised to the current tasks, * however the extra weight of the new task will slow them down a * little, place the new task so that it fits in the slot that * stays open at the end. */ if (initial && sched_feat(START_DEBIT)) vruntime += sched_vslice_add(cfs_rq, se); if (!initial) { /* sleeps upto a single latency don't count. */ if (sched_feat(NEW_FAIR_SLEEPERS)) { if (sched_feat(NORMALIZED_SLEEPER)) vruntime -= calc_delta_fair(sysctl_sched_latency, &cfs_rq->load); else vruntime -= sysctl_sched_latency; } /* ensure we never gain time by being placed backwards. */ vruntime = max_vruntime(se->vruntime, vruntime); } se->vruntime = vruntime; }
1
[]
linux-2.6
8f1bc385cfbab474db6c27b5af1e439614f3025c
308,682,937,641,422,840,000,000,000,000,000,000,000
35
sched: fair: weight calculations In order to level the hierarchy, we need to calculate load based on the root view. That is, each task's load is in the same unit. A / \ B 1 / \ 2 3 To compute 1's load we do: weight(1) -------------- rq_weight(A) To compute 2's load we do: weight(2) weight(B) ------------ * ----------- rq_weight(B) rw_weight(A) This yields load fractions in comparable units. The consequence is that it changes virtual time. We used to have: time_{i} vtime_{i} = ------------ weight_{i} vtime = \Sum vtime_{i} = time / rq_weight. But with the new way of load calculation we get that vtime equals time. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, unsigned long delta_exec) { unsigned long delta_exec_weighted; schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); curr->sum_exec_runtime += delta_exec; schedstat_add(cfs_rq, exec_clock, delta_exec); delta_exec_weighted = delta_exec; if (unlikely(curr->load.weight != NICE_0_LOAD)) { delta_exec_weighted = calc_delta_fair(delta_exec_weighted, &curr->load); } curr->vruntime += delta_exec_weighted; }
1
[]
linux-2.6
8f1bc385cfbab474db6c27b5af1e439614f3025c
65,520,173,795,750,995,000,000,000,000,000,000,000
16
sched: fair: weight calculations In order to level the hierarchy, we need to calculate load based on the root view. That is, each task's load is in the same unit. A / \ B 1 / \ 2 3 To compute 1's load we do: weight(1) -------------- rq_weight(A) To compute 2's load we do: weight(2) weight(B) ------------ * ----------- rq_weight(B) rw_weight(A) This yields load fractions in comparable units. The consequence is that it changes virtual time. We used to have: time_{i} vtime_{i} = ------------ weight_{i} vtime = \Sum vtime_{i} = time / rq_weight. But with the new way of load calculation we get that vtime equals time. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
_gnutls_server_name_recv_params (gnutls_session_t session, const opaque * data, size_t _data_size) { int i; const unsigned char *p; uint16_t len, type; ssize_t data_size = _data_size; int server_names = 0; if (session->security_parameters.entity == GNUTLS_SERVER) { DECR_LENGTH_RET (data_size, 2, 0); len = _gnutls_read_uint16 (data); if (len != data_size) { /* This is unexpected packet length, but * just ignore it, for now. */ gnutls_assert (); return 0; } p = data + 2; /* Count all server_names in the packet. */ while (data_size > 0) { DECR_LENGTH_RET (data_size, 1, 0); p++; DECR_LEN (data_size, 2); len = _gnutls_read_uint16 (p); p += 2; DECR_LENGTH_RET (data_size, len, 0); server_names++; p += len; } session->security_parameters.extensions.server_names_size = server_names; if (server_names == 0) return 0; /* no names found */ /* we cannot accept more server names. */ if (server_names > MAX_SERVER_NAME_EXTENSIONS) server_names = MAX_SERVER_NAME_EXTENSIONS; p = data + 2; for (i = 0; i < server_names; i++) { type = *p; p++; len = _gnutls_read_uint16 (p); p += 2; switch (type) { case 0: /* NAME_DNS */ if (len <= MAX_SERVER_NAME_SIZE) { memcpy (session->security_parameters.extensions. server_names[i].name, p, len); session->security_parameters.extensions. server_names[i].name_length = len; session->security_parameters.extensions. server_names[i].type = GNUTLS_NAME_DNS; break; } } /* move to next record */ p += len; } } return 0; }
1
[ "CWE-189" ]
gnutls
bc8102405fda11ea00ca3b42acc4f4bce9d6e97b
168,266,384,151,590,400,000,000,000,000,000,000,000
81
Fix GNUTLS-SA-2008-1 security vulnerabilities. See http://www.gnu.org/software/gnutls/security.html for updates.
_gnutls_ciphertext2compressed (gnutls_session_t session, opaque * compress_data, int compress_size, gnutls_datum_t ciphertext, uint8_t type) { uint8_t MAC[MAX_HASH_SIZE]; uint16_t c_length; uint8_t pad; int length; digest_hd_st td; uint16_t blocksize; int ret, i, pad_failed = 0; uint8_t major, minor; gnutls_protocol_t ver; int hash_size = _gnutls_hash_get_algo_len (session->security_parameters. read_mac_algorithm); ver = gnutls_protocol_get_version (session); minor = _gnutls_version_get_minor (ver); major = _gnutls_version_get_major (ver); blocksize = _gnutls_cipher_get_block_size (session->security_parameters. read_bulk_cipher_algorithm); /* initialize MAC */ ret = mac_init (&td, session->security_parameters.read_mac_algorithm, session->connection_state.read_mac_secret.data, session->connection_state.read_mac_secret.size, ver); if (ret < 0 && session->security_parameters.read_mac_algorithm != GNUTLS_MAC_NULL) { gnutls_assert (); return GNUTLS_E_INTERNAL_ERROR; } /* actual decryption (inplace) */ switch (_gnutls_cipher_is_block (session->security_parameters.read_bulk_cipher_algorithm)) { case CIPHER_STREAM: if ((ret = _gnutls_cipher_decrypt (&session->connection_state. read_cipher_state, ciphertext.data, ciphertext.size)) < 0) { gnutls_assert (); return ret; } length = ciphertext.size - hash_size; break; case CIPHER_BLOCK: if ((ciphertext.size < blocksize) || (ciphertext.size % blocksize != 0)) { gnutls_assert (); return GNUTLS_E_DECRYPTION_FAILED; } if ((ret = _gnutls_cipher_decrypt (&session->connection_state. read_cipher_state, ciphertext.data, ciphertext.size)) < 0) { gnutls_assert (); return ret; } /* ignore the IV in TLS 1.1. */ if (session->security_parameters.version >= GNUTLS_TLS1_1) { ciphertext.size -= blocksize; ciphertext.data += blocksize; if (ciphertext.size == 0) { gnutls_assert (); return GNUTLS_E_DECRYPTION_FAILED; } } pad = ciphertext.data[ciphertext.size - 1] + 1; /* pad */ length = ciphertext.size - hash_size - pad; if (pad > ciphertext.size - hash_size) { gnutls_assert (); /* We do not fail here. We check below for the * the pad_failed. If zero means success. */ pad_failed = GNUTLS_E_DECRYPTION_FAILED; } /* Check the pading bytes (TLS 1.x) */ if (ver >= GNUTLS_TLS1 && pad_failed == 0) for (i = 2; i < pad; i++) { if (ciphertext.data[ciphertext.size - i] != ciphertext.data[ciphertext.size - 1]) pad_failed = GNUTLS_E_DECRYPTION_FAILED; } break; default: gnutls_assert (); return GNUTLS_E_INTERNAL_ERROR; } if (length < 0) length = 0; c_length = _gnutls_conv_uint16 ((uint16_t) length); /* Pass the type, version, length and compressed through * MAC. */ if (session->security_parameters.read_mac_algorithm != GNUTLS_MAC_NULL) { _gnutls_hmac (&td, UINT64DATA (session->connection_state. read_sequence_number), 8); _gnutls_hmac (&td, &type, 1); if (ver >= GNUTLS_TLS1) { /* TLS 1.x */ _gnutls_hmac (&td, &major, 1); _gnutls_hmac (&td, &minor, 1); } _gnutls_hmac (&td, &c_length, 2); if (length > 0) _gnutls_hmac (&td, ciphertext.data, length); mac_deinit (&td, MAC, ver); } /* This one was introduced to avoid a timing attack against the TLS * 1.0 protocol. */ if (pad_failed != 0) return pad_failed; /* HMAC was not the same. */ if (memcmp (MAC, &ciphertext.data[length], hash_size) != 0) { gnutls_assert (); return GNUTLS_E_DECRYPTION_FAILED; } /* copy the decrypted stuff to compress_data. */ if (compress_size < length) { gnutls_assert (); return GNUTLS_E_DECOMPRESSION_FAILED; } memcpy (compress_data, ciphertext.data, length); return length; }
1
[ "CWE-189" ]
gnutls
bc8102405fda11ea00ca3b42acc4f4bce9d6e97b
137,431,019,550,729,330,000,000,000,000,000,000,000
167
Fix GNUTLS-SA-2008-1 security vulnerabilities. See http://www.gnu.org/software/gnutls/security.html for updates.
_gnutls_recv_handshake_header (gnutls_session_t session, gnutls_handshake_description_t type, gnutls_handshake_description_t * recv_type) { int ret; uint32_t length32 = 0; uint8_t *dataptr = NULL; /* for realloc */ size_t handshake_header_size = HANDSHAKE_HEADER_SIZE; /* if we have data into the buffer then return them, do not read the next packet. * In order to return we need a full TLS handshake header, or in case of a version 2 * packet, then we return the first byte. */ if (session->internals.handshake_header_buffer.header_size == handshake_header_size || (session->internals.v2_hello != 0 && type == GNUTLS_HANDSHAKE_CLIENT_HELLO && session->internals. handshake_header_buffer.packet_length > 0)) { *recv_type = session->internals.handshake_header_buffer.recv_type; return session->internals.handshake_header_buffer.packet_length; } /* Note: SSL2_HEADERS == 1 */ dataptr = session->internals.handshake_header_buffer.header; /* If we haven't already read the handshake headers. */ if (session->internals.handshake_header_buffer.header_size < SSL2_HEADERS) { ret = _gnutls_handshake_io_recv_int (session, GNUTLS_HANDSHAKE, type, dataptr, SSL2_HEADERS); if (ret < 0) { gnutls_assert (); return ret; } /* The case ret==0 is caught here. */ if (ret != SSL2_HEADERS) { gnutls_assert (); return GNUTLS_E_UNEXPECTED_PACKET_LENGTH; } session->internals.handshake_header_buffer.header_size = SSL2_HEADERS; } if (session->internals.v2_hello == 0 || type != GNUTLS_HANDSHAKE_CLIENT_HELLO) { ret = _gnutls_handshake_io_recv_int (session, GNUTLS_HANDSHAKE, type, &dataptr[session-> internals. handshake_header_buffer. header_size], HANDSHAKE_HEADER_SIZE - session->internals. handshake_header_buffer.header_size); if (ret <= 0) { gnutls_assert (); return (ret < 0) ? ret : GNUTLS_E_UNEXPECTED_PACKET_LENGTH; } if ((size_t) ret != HANDSHAKE_HEADER_SIZE - session->internals.handshake_header_buffer.header_size) { gnutls_assert (); return GNUTLS_E_UNEXPECTED_PACKET_LENGTH; } *recv_type = dataptr[0]; /* we do not use DECR_LEN because we know * that the packet has enough data. */ length32 = _gnutls_read_uint24 (&dataptr[1]); handshake_header_size = HANDSHAKE_HEADER_SIZE; _gnutls_handshake_log ("HSK[%x]: %s was received [%ld bytes]\n", session, _gnutls_handshake2str (dataptr[0]), length32 + HANDSHAKE_HEADER_SIZE); } else { /* v2 hello */ length32 = session->internals.v2_hello - SSL2_HEADERS; /* we've read the first byte */ handshake_header_size = SSL2_HEADERS; /* we've already read one byte */ *recv_type = dataptr[0]; _gnutls_handshake_log ("HSK[%x]: %s(v2) was received [%ld bytes]\n", session, _gnutls_handshake2str (*recv_type), length32 + handshake_header_size); if (*recv_type != GNUTLS_HANDSHAKE_CLIENT_HELLO) { /* it should be one or nothing */ gnutls_assert (); return GNUTLS_E_UNEXPECTED_HANDSHAKE_PACKET; } } /* put the packet into the buffer */ session->internals.handshake_header_buffer.header_size = handshake_header_size; session->internals.handshake_header_buffer.packet_length = length32; session->internals.handshake_header_buffer.recv_type = *recv_type; if (*recv_type != type) { gnutls_assert (); return GNUTLS_E_UNEXPECTED_HANDSHAKE_PACKET; } return length32; }
1
[ "CWE-189" ]
gnutls
bc8102405fda11ea00ca3b42acc4f4bce9d6e97b
205,220,500,280,194,900,000,000,000,000,000,000,000
124
Fix GNUTLS-SA-2008-1 security vulnerabilities. See http://www.gnu.org/software/gnutls/security.html for updates.
_gnutls_ciphertext2compressed (gnutls_session_t session, opaque * compress_data, int compress_size, gnutls_datum_t ciphertext, uint8_t type) { uint8_t MAC[MAX_HASH_SIZE]; uint16_t c_length; uint8_t pad; int length; digest_hd_st td; uint16_t blocksize; int ret, i, pad_failed = 0; uint8_t major, minor; gnutls_protocol_t ver; int hash_size = _gnutls_hash_get_algo_len (session->security_parameters. read_mac_algorithm); ver = gnutls_protocol_get_version (session); minor = _gnutls_version_get_minor (ver); major = _gnutls_version_get_major (ver); blocksize = _gnutls_cipher_get_block_size (session->security_parameters. read_bulk_cipher_algorithm); /* initialize MAC */ ret = mac_init (&td, session->security_parameters.read_mac_algorithm, session->connection_state.read_mac_secret.data, session->connection_state.read_mac_secret.size, ver); if (ret < 0 && session->security_parameters.read_mac_algorithm != GNUTLS_MAC_NULL) { gnutls_assert (); return GNUTLS_E_INTERNAL_ERROR; } if (ciphertext.size < (unsigned) blocksize + hash_size) { _gnutls_record_log ("REC[%x]: Short record length %d < %d + %d (under attack?)\n", session, ciphertext.size, blocksize, hash_size); gnutls_assert (); return GNUTLS_E_DECRYPTION_FAILED; } /* actual decryption (inplace) */ switch (_gnutls_cipher_is_block (session->security_parameters.read_bulk_cipher_algorithm)) { case CIPHER_STREAM: if ((ret = _gnutls_cipher_decrypt (&session->connection_state. read_cipher_state, ciphertext.data, ciphertext.size)) < 0) { gnutls_assert (); return ret; } length = ciphertext.size - hash_size; break; case CIPHER_BLOCK: if ((ciphertext.size < blocksize) || (ciphertext.size % blocksize != 0)) { gnutls_assert (); return GNUTLS_E_DECRYPTION_FAILED; } if ((ret = _gnutls_cipher_decrypt (&session->connection_state. read_cipher_state, ciphertext.data, ciphertext.size)) < 0) { gnutls_assert (); return ret; } /* ignore the IV in TLS 1.1. */ if (session->security_parameters.version >= GNUTLS_TLS1_1) { ciphertext.size -= blocksize; ciphertext.data += blocksize; if (ciphertext.size == 0) { gnutls_assert (); return GNUTLS_E_DECRYPTION_FAILED; } } pad = ciphertext.data[ciphertext.size - 1] + 1; /* pad */ if ((int)pad > (int)ciphertext.size - hash_size) { gnutls_assert (); /* We do not fail here. We check below for the * the pad_failed. If zero means success. */ pad_failed = GNUTLS_E_DECRYPTION_FAILED; } length = ciphertext.size - hash_size - pad; /* Check the pading bytes (TLS 1.x) */ if (ver >= GNUTLS_TLS1 && pad_failed == 0) for (i = 2; i < pad; i++) { if (ciphertext.data[ciphertext.size - i] != ciphertext.data[ciphertext.size - 1]) pad_failed = GNUTLS_E_DECRYPTION_FAILED; } break; default: gnutls_assert (); return GNUTLS_E_INTERNAL_ERROR; } if (length < 0) length = 0; c_length = _gnutls_conv_uint16 ((uint16_t) length); /* Pass the type, version, length and compressed through * MAC. */ if (session->security_parameters.read_mac_algorithm != GNUTLS_MAC_NULL) { _gnutls_hmac (&td, UINT64DATA (session->connection_state. read_sequence_number), 8); _gnutls_hmac (&td, &type, 1); if (ver >= GNUTLS_TLS1) { /* TLS 1.x */ _gnutls_hmac (&td, &major, 1); _gnutls_hmac (&td, &minor, 1); } _gnutls_hmac (&td, &c_length, 2); if (length > 0) _gnutls_hmac (&td, ciphertext.data, length); mac_deinit (&td, MAC, ver); } /* This one was introduced to avoid a timing attack against the TLS * 1.0 protocol. */ if (pad_failed != 0) return pad_failed; /* HMAC was not the same. */ if (memcmp (MAC, &ciphertext.data[length], hash_size) != 0) { gnutls_assert (); return GNUTLS_E_DECRYPTION_FAILED; } /* copy the decrypted stuff to compress_data. */ if (compress_size < length) { gnutls_assert (); return GNUTLS_E_DECOMPRESSION_FAILED; } memcpy (compress_data, ciphertext.data, length); return length; }
1
[ "CWE-189" ]
gnutls
d223040e498bd50a4b9e0aa493e78587ae1ed653
133,466,176,970,244,050,000,000,000,000,000,000,000
175
Fix broken debug check for GNUTLS-SA-2008-1.
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas) { int i; unsigned int vm_flags; if (len <= 0) return 0; /* * Require read or write permissions. * If 'force' is set, we only require the "MAY" flags. */ vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); i = 0; do { struct vm_area_struct *vma; unsigned int foll_flags; vma = find_extend_vma(mm, start); if (!vma && in_gate_area(tsk, start)) { unsigned long pg = start & PAGE_MASK; struct vm_area_struct *gate_vma = get_gate_vma(tsk); pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; if (write) /* user gate pages are read-only */ return i ? : -EFAULT; if (pg > TASK_SIZE) pgd = pgd_offset_k(pg); else pgd = pgd_offset_gate(mm, pg); BUG_ON(pgd_none(*pgd)); pud = pud_offset(pgd, pg); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, pg); if (pmd_none(*pmd)) return i ? : -EFAULT; pte = pte_offset_map(pmd, pg); if (pte_none(*pte)) { pte_unmap(pte); return i ? : -EFAULT; } if (pages) { struct page *page = vm_normal_page(gate_vma, start, *pte); pages[i] = page; if (page) get_page(page); } pte_unmap(pte); if (vmas) vmas[i] = gate_vma; i++; start += PAGE_SIZE; len--; continue; } if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) return i ? : -EFAULT; if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &len, i, write); continue; } foll_flags = FOLL_TOUCH; if (pages) foll_flags |= FOLL_GET; if (!write && !(vma->vm_flags & VM_LOCKED) && (!vma->vm_ops || !vma->vm_ops->fault)) foll_flags |= FOLL_ANON; do { struct page *page; /* * If tsk is ooming, cut off its access to large memory * allocations. It has a pending SIGKILL, but it can't * be processed until returning to user space. */ if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE))) return -ENOMEM; if (write) foll_flags |= FOLL_WRITE; cond_resched(); while (!(page = follow_page(vma, start, foll_flags))) { int ret; ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return i ? i : -ENOMEM; else if (ret & VM_FAULT_SIGBUS) return i ? i : -EFAULT; BUG(); } if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; /* * The VM_FAULT_WRITE bit tells us that * do_wp_page has broken COW when necessary, * even if maybe_mkwrite decided not to set * pte_write. We can thus safely do subsequent * page lookups as if they were reads. */ if (ret & VM_FAULT_WRITE) foll_flags &= ~FOLL_WRITE; cond_resched(); } if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); } if (vmas) vmas[i] = vma; i++; start += PAGE_SIZE; len--; } while (len && start < vma->vm_end); } while (len); return i; }
1
[ "CWE-20" ]
linux-2.6
89f5b7da2a6bad2e84670422ab8192382a5aeb9f
192,373,302,205,049,660,000,000,000,000,000,000,000
136
Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit 557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed the ZERO_PAGE from the VM mappings, any users of get_user_pages() will generally now populate the VM with real empty pages needlessly. We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but since fault handling no longer uses ZERO_PAGE for new anonymous pages, we now need to handle that special case in follow_page() instead. In particular, the removal of ZERO_PAGE effectively removed the core file writing optimization where we would skip writing pages that had not been populated at all, and increased memory pressure a lot by allocating all those useless newly zeroed pages. This reinstates the optimization by making the unmapped PTE case the same as for a non-existent page table, which already did this correctly. While at it, this also fixes the XIP case for follow_page(), where the caller could not differentiate between the case of a page that simply could not be used (because it had no "struct page" associated with it) and a page that just wasn't mapped. We do that by simply returning an error pointer for pages that could not be turned into a "struct page *". The error is arbitrarily picked to be EFAULT, since that was what get_user_pages() already used for the equivalent IO-mapped page case. [ Also removed an impossible test for pte_offset_map_lock() failing: that's not how that function works ] Acked-by: Oleg Nesterov <oleg@tv-sign.ru> Acked-by: Nick Piggin <npiggin@suse.de> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm) { down_read(&mm->mmap_sem); for ( ; pm->node != MAX_NUMNODES; pm++) { struct vm_area_struct *vma; struct page *page; int err; err = -EFAULT; vma = find_vma(mm, pm->addr); if (!vma) goto set_status; page = follow_page(vma, pm->addr, 0); err = -ENOENT; /* Use PageReserved to check for zero page */ if (!page || PageReserved(page)) goto set_status; err = page_to_nid(page); set_status: pm->status = err; } up_read(&mm->mmap_sem); return 0; }
1
[ "CWE-20" ]
linux-2.6
89f5b7da2a6bad2e84670422ab8192382a5aeb9f
190,691,605,824,195,420,000,000,000,000,000,000,000
28
Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit 557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed the ZERO_PAGE from the VM mappings, any users of get_user_pages() will generally now populate the VM with real empty pages needlessly. We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but since fault handling no longer uses ZERO_PAGE for new anonymous pages, we now need to handle that special case in follow_page() instead. In particular, the removal of ZERO_PAGE effectively removed the core file writing optimization where we would skip writing pages that had not been populated at all, and increased memory pressure a lot by allocating all those useless newly zeroed pages. This reinstates the optimization by making the unmapped PTE case the same as for a non-existent page table, which already did this correctly. While at it, this also fixes the XIP case for follow_page(), where the caller could not differentiate between the case of a page that simply could not be used (because it had no "struct page" associated with it) and a page that just wasn't mapped. We do that by simply returning an error pointer for pages that could not be turned into a "struct page *". The error is arbitrarily picked to be EFAULT, since that was what get_user_pages() already used for the equivalent IO-mapped page case. [ Also removed an impossible test for pte_offset_map_lock() failing: that's not how that function works ] Acked-by: Oleg Nesterov <oleg@tv-sign.ru> Acked-by: Nick Piggin <npiggin@suse.de> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, int migrate_all) { int err; struct page_to_node *pp; LIST_HEAD(pagelist); down_read(&mm->mmap_sem); /* * Build a list of pages to migrate */ migrate_prep(); for (pp = pm; pp->node != MAX_NUMNODES; pp++) { struct vm_area_struct *vma; struct page *page; /* * A valid page pointer that will not match any of the * pages that will be moved. */ pp->page = ZERO_PAGE(0); err = -EFAULT; vma = find_vma(mm, pp->addr); if (!vma || !vma_migratable(vma)) goto set_status; page = follow_page(vma, pp->addr, FOLL_GET); err = -ENOENT; if (!page) goto set_status; if (PageReserved(page)) /* Check for zero page */ goto put_and_set; pp->page = page; err = page_to_nid(page); if (err == pp->node) /* * Node already in the right place */ goto put_and_set; err = -EACCES; if (page_mapcount(page) > 1 && !migrate_all) goto put_and_set; err = isolate_lru_page(page, &pagelist); put_and_set: /* * Either remove the duplicate refcount from * isolate_lru_page() or drop the page ref if it was * not isolated. */ put_page(page); set_status: pp->status = err; } if (!list_empty(&pagelist)) err = migrate_pages(&pagelist, new_page_node, (unsigned long)pm); else err = -ENOENT; up_read(&mm->mmap_sem); return err; }
1
[ "CWE-20" ]
linux-2.6
89f5b7da2a6bad2e84670422ab8192382a5aeb9f
18,220,699,088,629,602,000,000,000,000,000,000,000
71
Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit 557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed the ZERO_PAGE from the VM mappings, any users of get_user_pages() will generally now populate the VM with real empty pages needlessly. We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but since fault handling no longer uses ZERO_PAGE for new anonymous pages, we now need to handle that special case in follow_page() instead. In particular, the removal of ZERO_PAGE effectively removed the core file writing optimization where we would skip writing pages that had not been populated at all, and increased memory pressure a lot by allocating all those useless newly zeroed pages. This reinstates the optimization by making the unmapped PTE case the same as for a non-existent page table, which already did this correctly. While at it, this also fixes the XIP case for follow_page(), where the caller could not differentiate between the case of a page that simply could not be used (because it had no "struct page" associated with it) and a page that just wasn't mapped. We do that by simply returning an error pointer for pages that could not be turned into a "struct page *". The error is arbitrarily picked to be EFAULT, since that was what get_user_pages() already used for the equivalent IO-mapped page case. [ Also removed an impossible test for pte_offset_map_lock() failing: that's not how that function works ] Acked-by: Oleg Nesterov <oleg@tv-sign.ru> Acked-by: Nick Piggin <npiggin@suse.de> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static void dump_one_vdso_page(struct page *pg, struct page *upg) { printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT), page_count(pg), pg->flags); if (upg/* && pg != upg*/) { printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) << PAGE_SHIFT), page_count(upg), upg->flags); } printk("\n"); }
1
[ "CWE-20" ]
linux-2.6
89f5b7da2a6bad2e84670422ab8192382a5aeb9f
198,053,340,576,165,900,000,000,000,000,000,000,000
13
Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit 557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed the ZERO_PAGE from the VM mappings, any users of get_user_pages() will generally now populate the VM with real empty pages needlessly. We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but since fault handling no longer uses ZERO_PAGE for new anonymous pages, we now need to handle that special case in follow_page() instead. In particular, the removal of ZERO_PAGE effectively removed the core file writing optimization where we would skip writing pages that had not been populated at all, and increased memory pressure a lot by allocating all those useless newly zeroed pages. This reinstates the optimization by making the unmapped PTE case the same as for a non-existent page table, which already did this correctly. While at it, this also fixes the XIP case for follow_page(), where the caller could not differentiate between the case of a page that simply could not be used (because it had no "struct page" associated with it) and a page that just wasn't mapped. We do that by simply returning an error pointer for pages that could not be turned into a "struct page *". The error is arbitrarily picked to be EFAULT, since that was what get_user_pages() already used for the equivalent IO-mapped page case. [ Also removed an impossible test for pte_offset_map_lock() failing: that's not how that function works ] Acked-by: Oleg Nesterov <oleg@tv-sign.ru> Acked-by: Nick Piggin <npiggin@suse.de> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>