id
stringlengths
25
30
content
stringlengths
14
942k
max_stars_repo_path
stringlengths
49
55
crossvul-cpp_data_bad_5861_20
/* Glue code for SHA512 hashing optimized for sparc64 crypto opcodes. * * This is based largely upon crypto/sha512_generic.c * * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) 2003 Kyle McMartin <kyle@debian.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/cryptohash.h> #include <linux/types.h> #include <crypto/sha.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" asmlinkage void sha512_sparc64_transform(u64 *digest, const char *data, unsigned int rounds); static int sha512_sparc64_init(struct shash_desc *desc) { struct sha512_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA512_H0; sctx->state[1] = SHA512_H1; sctx->state[2] = SHA512_H2; sctx->state[3] = SHA512_H3; sctx->state[4] = SHA512_H4; sctx->state[5] = SHA512_H5; sctx->state[6] = SHA512_H6; sctx->state[7] = SHA512_H7; sctx->count[0] = sctx->count[1] = 0; return 0; } static int sha384_sparc64_init(struct shash_desc *desc) { struct sha512_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA384_H0; sctx->state[1] = SHA384_H1; sctx->state[2] = SHA384_H2; sctx->state[3] = SHA384_H3; sctx->state[4] = SHA384_H4; sctx->state[5] = SHA384_H5; sctx->state[6] = SHA384_H6; sctx->state[7] = SHA384_H7; sctx->count[0] = sctx->count[1] = 0; return 0; } static void __sha512_sparc64_update(struct sha512_state *sctx, const u8 *data, unsigned int len, unsigned int partial) { unsigned int done = 0; if ((sctx->count[0] += len) < len) sctx->count[1]++; if (partial) { done = SHA512_BLOCK_SIZE - partial; memcpy(sctx->buf + partial, data, done); sha512_sparc64_transform(sctx->state, sctx->buf, 1); } if (len - done >= SHA512_BLOCK_SIZE) { const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE; sha512_sparc64_transform(sctx->state, data + done, rounds); done += rounds * SHA512_BLOCK_SIZE; } memcpy(sctx->buf, data + done, len - done); } static int sha512_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha512_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; /* Handle the fast case right here */ if (partial + len < SHA512_BLOCK_SIZE) { if ((sctx->count[0] += len) < len) sctx->count[1]++; memcpy(sctx->buf + partial, data, len); } else __sha512_sparc64_update(sctx, data, len, partial); return 0; } static int sha512_sparc64_final(struct shash_desc *desc, u8 *out) { struct sha512_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; __be64 *dst = (__be64 *)out; __be64 bits[2]; static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, }; /* Save number of bits */ bits[1] = cpu_to_be64(sctx->count[0] << 3); bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); /* Pad out to 112 mod 128 and append length */ index = sctx->count[0] % SHA512_BLOCK_SIZE; padlen = (index < 112) ? (112 - index) : ((SHA512_BLOCK_SIZE+112) - index); /* We need to fill a whole block for __sha512_sparc64_update() */ if (padlen <= 112) { if ((sctx->count[0] += padlen) < padlen) sctx->count[1]++; memcpy(sctx->buf + index, padding, padlen); } else { __sha512_sparc64_update(sctx, padding, padlen, index); } __sha512_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 112); /* Store state in digest */ for (i = 0; i < 8; i++) dst[i] = cpu_to_be64(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash) { u8 D[64]; sha512_sparc64_final(desc, D); memcpy(hash, D, 48); memset(D, 0, 64); return 0; } static struct shash_alg sha512 = { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_sparc64_init, .update = sha512_sparc64_update, .final = sha512_sparc64_final, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name= "sha512-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static struct shash_alg sha384 = { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_sparc64_init, .update = sha512_sparc64_update, .final = sha384_sparc64_final, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_driver_name= "sha384-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static bool __init sparc64_has_sha512_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_SHA512)) return false; return true; } static int __init sha512_sparc64_mod_init(void) { if (sparc64_has_sha512_opcode()) { int ret = crypto_register_shash(&sha384); if (ret < 0) return ret; ret = crypto_register_shash(&sha512); if (ret < 0) { crypto_unregister_shash(&sha384); return ret; } pr_info("Using sparc64 sha512 opcode optimized SHA-512/SHA-384 implementation\n"); return 0; } pr_info("sparc64 sha512 opcode not available.\n"); return -ENODEV; } static void __exit sha512_sparc64_mod_fini(void) { crypto_unregister_shash(&sha384); crypto_unregister_shash(&sha512); } module_init(sha512_sparc64_mod_init); module_exit(sha512_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated"); MODULE_ALIAS("sha384"); MODULE_ALIAS("sha512"); #include "crop_devid.c"
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5861_20
crossvul-cpp_data_bad_2399_15
/* LRW: as defined by Cyril Guyot in * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf * * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> * * Based on ecb.c * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* This implementation is checked against the test vectors in the above * document and by a test vector provided by Ken Buchanan at * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html * * The test vectors are included in the testing module tcrypt.[ch] */ #include <crypto/algapi.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <crypto/b128ops.h> #include <crypto/gf128mul.h> #include <crypto/lrw.h> struct priv { struct crypto_cipher *child; struct lrw_table_ctx table; }; static inline void setbit128_bbe(void *b, int bit) { __set_bit(bit ^ (0x80 - #ifdef __BIG_ENDIAN BITS_PER_LONG #else BITS_PER_BYTE #endif ), b); } int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) { be128 tmp = { 0 }; int i; if (ctx->table) gf128mul_free_64k(ctx->table); /* initialize multiplication table for Key2 */ ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); if (!ctx->table) return -ENOMEM; /* initialize optimization table */ for (i = 0; i < 128; i++) { setbit128_bbe(&tmp, i); ctx->mulinc[i] = tmp; gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); } return 0; } EXPORT_SYMBOL_GPL(lrw_init_table); void lrw_free_table(struct lrw_table_ctx *ctx) { if (ctx->table) gf128mul_free_64k(ctx->table); } EXPORT_SYMBOL_GPL(lrw_free_table); static int setkey(struct crypto_tfm *parent, const u8 *key, unsigned int keylen) { struct priv *ctx = crypto_tfm_ctx(parent); struct crypto_cipher *child = ctx->child; int err, bsize = LRW_BLOCK_SIZE; const u8 *tweak = key + keylen - bsize; crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(child, key, keylen - bsize); if (err) return err; crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & CRYPTO_TFM_RES_MASK); return lrw_init_table(&ctx->table, tweak); } struct sinfo { be128 t; struct crypto_tfm *tfm; void (*fn)(struct crypto_tfm *, u8 *, const u8 *); }; static inline void inc(be128 *iv) { be64_add_cpu(&iv->b, 1); if (!iv->b) be64_add_cpu(&iv->a, 1); } static inline void lrw_round(struct sinfo *s, void *dst, const void *src) { be128_xor(dst, &s->t, src); /* PP <- T xor P */ s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */ be128_xor(dst, dst, &s->t); /* C <- T xor CC */ } /* this returns the number of consequative 1 bits starting * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ static inline int get_index128(be128 *block) { int x; __be32 *p = (__be32 *) block; for (p += 3, x = 0; x < 128; p--, x += 32) { u32 val = be32_to_cpup(p); if (!~val) continue; return x + ffz(val); } return x; } static int crypt(struct blkcipher_desc *d, struct blkcipher_walk *w, struct priv *ctx, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { int err; unsigned int avail; const int bs = LRW_BLOCK_SIZE; struct sinfo s = { .tfm = crypto_cipher_tfm(ctx->child), .fn = fn }; be128 *iv; u8 *wsrc; u8 *wdst; err = blkcipher_walk_virt(d, w); if (!(avail = w->nbytes)) return err; wsrc = w->src.virt.addr; wdst = w->dst.virt.addr; /* calculate first value of T */ iv = (be128 *)w->iv; s.t = *iv; /* T <- I*Key2 */ gf128mul_64k_bbe(&s.t, ctx->table.table); goto first; for (;;) { do { /* T <- I*Key2, using the optimization * discussed in the specification */ be128_xor(&s.t, &s.t, &ctx->table.mulinc[get_index128(iv)]); inc(iv); first: lrw_round(&s, wdst, wsrc); wsrc += bs; wdst += bs; } while ((avail -= bs) >= bs); err = blkcipher_walk_done(d, w, avail); if (!(avail = w->nbytes)) break; wsrc = w->src.virt.addr; wdst = w->dst.virt.addr; } return err; } static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk w; blkcipher_walk_init(&w, dst, src, nbytes); return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->child)->cia_encrypt); } static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk w; blkcipher_walk_init(&w, dst, src, nbytes); return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->child)->cia_decrypt); } int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, struct scatterlist *ssrc, unsigned int nbytes, struct lrw_crypt_req *req) { const unsigned int bsize = LRW_BLOCK_SIZE; const unsigned int max_blks = req->tbuflen / bsize; struct lrw_table_ctx *ctx = req->table_ctx; struct blkcipher_walk walk; unsigned int nblocks; be128 *iv, *src, *dst, *t; be128 *t_buf = req->tbuf; int err, i; BUG_ON(max_blks < 1); blkcipher_walk_init(&walk, sdst, ssrc, nbytes); err = blkcipher_walk_virt(desc, &walk); nbytes = walk.nbytes; if (!nbytes) return err; nblocks = min(walk.nbytes / bsize, max_blks); src = (be128 *)walk.src.virt.addr; dst = (be128 *)walk.dst.virt.addr; /* calculate first value of T */ iv = (be128 *)walk.iv; t_buf[0] = *iv; /* T <- I*Key2 */ gf128mul_64k_bbe(&t_buf[0], ctx->table); i = 0; goto first; for (;;) { do { for (i = 0; i < nblocks; i++) { /* T <- I*Key2, using the optimization * discussed in the specification */ be128_xor(&t_buf[i], t, &ctx->mulinc[get_index128(iv)]); inc(iv); first: t = &t_buf[i]; /* PP <- T xor P */ be128_xor(dst + i, t, src + i); } /* CC <- E(Key2,PP) */ req->crypt_fn(req->crypt_ctx, (u8 *)dst, nblocks * bsize); /* C <- T xor CC */ for (i = 0; i < nblocks; i++) be128_xor(dst + i, dst + i, &t_buf[i]); src += nblocks; dst += nblocks; nbytes -= nblocks * bsize; nblocks = min(nbytes / bsize, max_blks); } while (nblocks > 0); err = blkcipher_walk_done(desc, &walk, nbytes); nbytes = walk.nbytes; if (!nbytes) break; nblocks = min(nbytes / bsize, max_blks); src = (be128 *)walk.src.virt.addr; dst = (be128 *)walk.dst.virt.addr; } return err; } EXPORT_SYMBOL_GPL(lrw_crypt); static int init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct priv *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; crypto_free_cipher(cipher); return -EINVAL; } ctx->child = cipher; return 0; } static void exit_tfm(struct crypto_tfm *tfm) { struct priv *ctx = crypto_tfm_ctx(tfm); lrw_free_table(&ctx->table); crypto_free_cipher(ctx->child); } static struct crypto_instance *alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); inst = crypto_alloc_instance("lrw", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; else inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; if (!(alg->cra_blocksize % 4)) inst->alg.cra_alignmask |= 3; inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; inst->alg.cra_ctxsize = sizeof(struct priv); inst->alg.cra_init = init_tfm; inst->alg.cra_exit = exit_tfm; inst->alg.cra_blkcipher.setkey = setkey; inst->alg.cra_blkcipher.encrypt = encrypt; inst->alg.cra_blkcipher.decrypt = decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); } static struct crypto_template crypto_tmpl = { .name = "lrw", .alloc = alloc, .free = free, .module = THIS_MODULE, }; static int __init crypto_module_init(void) { return crypto_register_template(&crypto_tmpl); } static void __exit crypto_module_exit(void) { crypto_unregister_template(&crypto_tmpl); } module_init(crypto_module_init); module_exit(crypto_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2399_15
crossvul-cpp_data_good_5861_6
/* * Cryptographic API. * * powerpc implementation of the SHA1 Secure Hash Algorithm. * * Derived from cryptoapi implementation, adapted for in-place * scatterlist interface. * * Derived from "crypto/sha1.c" * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/cryptohash.h> #include <linux/types.h> #include <crypto/sha.h> #include <asm/byteorder.h> extern void powerpc_sha_transform(u32 *state, const u8 *src, u32 *temp); static int sha1_init(struct shash_desc *desc) { struct sha1_state *sctx = shash_desc_ctx(desc); *sctx = (struct sha1_state){ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, }; return 0; } static int sha1_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int partial, done; const u8 *src; partial = sctx->count & 0x3f; sctx->count += len; done = 0; src = data; if ((partial + len) > 63) { u32 temp[SHA_WORKSPACE_WORDS]; if (partial) { done = -partial; memcpy(sctx->buffer + partial, data, done + 64); src = sctx->buffer; } do { powerpc_sha_transform(sctx->state, src, temp); done += 64; src = data + done; } while (done + 63 < len); memset(temp, 0, sizeof(temp)); partial = 0; } memcpy(sctx->buffer + partial, src, len - done); return 0; } /* Add padding and return the message digest. */ static int sha1_final(struct shash_desc *desc, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; u32 i, index, padlen; __be64 bits; static const u8 padding[64] = { 0x80, }; bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64 */ index = sctx->count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); sha1_update(desc, padding, padlen); /* Append length */ sha1_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_be32(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof *sctx); return 0; } static int sha1_export(struct shash_desc *desc, void *out) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int sha1_import(struct shash_desc *desc, const void *in) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_init, .update = sha1_update, .final = sha1_final, .export = sha1_export, .import = sha1_import, .descsize = sizeof(struct sha1_state), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-powerpc", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha1_powerpc_mod_init(void) { return crypto_register_shash(&alg); } static void __exit sha1_powerpc_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_powerpc_mod_init); module_exit(sha1_powerpc_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); MODULE_ALIAS_CRYPTO("sha1-powerpc");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_6
crossvul-cpp_data_bad_1513_2
/* * PCI Backend - Handles the virtual fields in the configuration space headers. * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/pci.h> #include "pciback.h" #include "conf_space.h" struct pci_bar_info { u32 val; u32 len_val; int which; }; #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) { int i; int ret; ret = xen_pcibk_read_config_word(dev, offset, value, data); if (!pci_is_enabled(dev)) return ret; for (i = 0; i < PCI_ROM_RESOURCE; i++) { if (dev->resource[i].flags & IORESOURCE_IO) *value |= PCI_COMMAND_IO; if (dev->resource[i].flags & IORESOURCE_MEM) *value |= PCI_COMMAND_MEMORY; } return ret; } static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable\n", pci_name(dev)); err = pci_enable_device(dev); if (err) return err; if (dev_data) dev_data->enable_intx = 1; } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable\n", pci_name(dev)); pci_disable_device(dev); if (dev_data) dev_data->enable_intx = 0; } if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n", pci_name(dev)); pci_set_master(dev); } if (value & PCI_COMMAND_INVALIDATE) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable memory-write-invalidate\n", pci_name(dev)); err = pci_set_mwi(dev); if (err) { pr_warn("%s: cannot enable memory-write-invalidate (%d)\n", pci_name(dev), err); value &= ~PCI_COMMAND_INVALIDATE; } } return pci_write_config_word(dev, offset, value); } static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { pr_warn(DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } /* A write to obtain the length must happen as a 32-bit write. * This does not (yet) support writing individual bytes */ if (value == ~PCI_ROM_ADDRESS_ENABLE) bar->which = 1; else { u32 tmpval; pci_read_config_dword(dev, offset, &tmpval); if (tmpval != bar->val && value == bar->val) { /* Allow restoration of bar value. */ pci_write_config_dword(dev, offset, bar->val); } bar->which = 0; } /* Do we need to support enabling/disabling the rom address here? */ return 0; } /* For the BARs, only allow writes which write ~0 or * the correct resource information * (Needed for when the driver probes the resource usage) */ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { pr_warn(DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } /* A write to obtain the length must happen as a 32-bit write. * This does not (yet) support writing individual bytes */ if (value == ~0) bar->which = 1; else { u32 tmpval; pci_read_config_dword(dev, offset, &tmpval); if (tmpval != bar->val && value == bar->val) { /* Allow restoration of bar value. */ pci_write_config_dword(dev, offset, bar->val); } bar->which = 0; } return 0; } static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { pr_warn(DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } *value = bar->which ? bar->len_val : bar->val; return 0; } static inline void read_dev_bar(struct pci_dev *dev, struct pci_bar_info *bar_info, int offset, u32 len_mask) { int pos; struct resource *res = dev->resource; if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1) pos = PCI_ROM_RESOURCE; else { pos = (offset - PCI_BASE_ADDRESS_0) / 4; if (pos && ((res[pos - 1].flags & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64))) { bar_info->val = res[pos - 1].start >> 32; bar_info->len_val = res[pos - 1].end >> 32; return; } } bar_info->val = res[pos].start | (res[pos].flags & PCI_REGION_FLAG_MASK); bar_info->len_val = resource_size(&res[pos]); } static void *bar_init(struct pci_dev *dev, int offset) { struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); if (!bar) return ERR_PTR(-ENOMEM); read_dev_bar(dev, bar, offset, ~0); bar->which = 0; return bar; } static void *rom_init(struct pci_dev *dev, int offset) { struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); if (!bar) return ERR_PTR(-ENOMEM); read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); bar->which = 0; return bar; } static void bar_reset(struct pci_dev *dev, int offset, void *data) { struct pci_bar_info *bar = data; bar->which = 0; } static void bar_release(struct pci_dev *dev, int offset, void *data) { kfree(data); } static int xen_pcibk_read_vendor(struct pci_dev *dev, int offset, u16 *value, void *data) { *value = dev->vendor; return 0; } static int xen_pcibk_read_device(struct pci_dev *dev, int offset, u16 *value, void *data) { *value = dev->device; return 0; } static int interrupt_read(struct pci_dev *dev, int offset, u8 * value, void *data) { *value = (u8) dev->irq; return 0; } static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data) { u8 cur_value; int err; err = pci_read_config_byte(dev, offset, &cur_value); if (err) goto out; if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START) || value == PCI_BIST_START) err = pci_write_config_byte(dev, offset, value); out: return err; } static const struct config_field header_common[] = { { .offset = PCI_VENDOR_ID, .size = 2, .u.w.read = xen_pcibk_read_vendor, }, { .offset = PCI_DEVICE_ID, .size = 2, .u.w.read = xen_pcibk_read_device, }, { .offset = PCI_COMMAND, .size = 2, .u.w.read = command_read, .u.w.write = command_write, }, { .offset = PCI_INTERRUPT_LINE, .size = 1, .u.b.read = interrupt_read, }, { .offset = PCI_INTERRUPT_PIN, .size = 1, .u.b.read = xen_pcibk_read_config_byte, }, { /* Any side effects of letting driver domain control cache line? */ .offset = PCI_CACHE_LINE_SIZE, .size = 1, .u.b.read = xen_pcibk_read_config_byte, .u.b.write = xen_pcibk_write_config_byte, }, { .offset = PCI_LATENCY_TIMER, .size = 1, .u.b.read = xen_pcibk_read_config_byte, }, { .offset = PCI_BIST, .size = 1, .u.b.read = xen_pcibk_read_config_byte, .u.b.write = bist_write, }, {} }; #define CFG_FIELD_BAR(reg_offset) \ { \ .offset = reg_offset, \ .size = 4, \ .init = bar_init, \ .reset = bar_reset, \ .release = bar_release, \ .u.dw.read = bar_read, \ .u.dw.write = bar_write, \ } #define CFG_FIELD_ROM(reg_offset) \ { \ .offset = reg_offset, \ .size = 4, \ .init = rom_init, \ .reset = bar_reset, \ .release = bar_release, \ .u.dw.read = bar_read, \ .u.dw.write = rom_write, \ } static const struct config_field header_0[] = { CFG_FIELD_BAR(PCI_BASE_ADDRESS_0), CFG_FIELD_BAR(PCI_BASE_ADDRESS_1), CFG_FIELD_BAR(PCI_BASE_ADDRESS_2), CFG_FIELD_BAR(PCI_BASE_ADDRESS_3), CFG_FIELD_BAR(PCI_BASE_ADDRESS_4), CFG_FIELD_BAR(PCI_BASE_ADDRESS_5), CFG_FIELD_ROM(PCI_ROM_ADDRESS), {} }; static const struct config_field header_1[] = { CFG_FIELD_BAR(PCI_BASE_ADDRESS_0), CFG_FIELD_BAR(PCI_BASE_ADDRESS_1), CFG_FIELD_ROM(PCI_ROM_ADDRESS1), {} }; int xen_pcibk_config_header_add_fields(struct pci_dev *dev) { int err; err = xen_pcibk_config_add_fields(dev, header_common); if (err) goto out; switch (dev->hdr_type) { case PCI_HEADER_TYPE_NORMAL: err = xen_pcibk_config_add_fields(dev, header_0); break; case PCI_HEADER_TYPE_BRIDGE: err = xen_pcibk_config_add_fields(dev, header_1); break; default: err = -EINVAL; pr_err("%s: Unsupported header type %d!\n", pci_name(dev), dev->hdr_type); break; } out: return err; }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_1513_2
crossvul-cpp_data_bad_2399_16
/* * Software multibuffer async crypto daemon. * * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com> * * Adapted from crypto daemon. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/algapi.h> #include <crypto/internal/hash.h> #include <crypto/internal/aead.h> #include <crypto/mcryptd.h> #include <crypto/crypto_wq.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/hardirq.h> #define MCRYPTD_MAX_CPU_QLEN 100 #define MCRYPTD_BATCH 9 static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, unsigned int tail); struct mcryptd_flush_list { struct list_head list; struct mutex lock; }; static struct mcryptd_flush_list __percpu *mcryptd_flist; struct hashd_instance_ctx { struct crypto_shash_spawn spawn; struct mcryptd_queue *queue; }; static void mcryptd_queue_worker(struct work_struct *work); void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay) { struct mcryptd_flush_list *flist; if (!cstate->flusher_engaged) { /* put the flusher on the flush list */ flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); mutex_lock(&flist->lock); list_add_tail(&cstate->flush_list, &flist->list); cstate->flusher_engaged = true; cstate->next_flush = jiffies + delay; queue_delayed_work_on(smp_processor_id(), kcrypto_wq, &cstate->flush, delay); mutex_unlock(&flist->lock); } } EXPORT_SYMBOL(mcryptd_arm_flusher); static int mcryptd_init_queue(struct mcryptd_queue *queue, unsigned int max_cpu_qlen) { int cpu; struct mcryptd_cpu_queue *cpu_queue; queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); if (!queue->cpu_queue) return -ENOMEM; for_each_possible_cpu(cpu) { cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); } return 0; } static void mcryptd_fini_queue(struct mcryptd_queue *queue) { int cpu; struct mcryptd_cpu_queue *cpu_queue; for_each_possible_cpu(cpu) { cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); BUG_ON(cpu_queue->queue.qlen); } free_percpu(queue->cpu_queue); } static int mcryptd_enqueue_request(struct mcryptd_queue *queue, struct crypto_async_request *request, struct mcryptd_hash_request_ctx *rctx) { int cpu, err; struct mcryptd_cpu_queue *cpu_queue; cpu = get_cpu(); cpu_queue = this_cpu_ptr(queue->cpu_queue); rctx->tag.cpu = cpu; err = crypto_enqueue_request(&cpu_queue->queue, request); pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", cpu, cpu_queue, request); queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); put_cpu(); return err; } /* * Try to opportunisticlly flush the partially completed jobs if * crypto daemon is the only task running. */ static void mcryptd_opportunistic_flush(void) { struct mcryptd_flush_list *flist; struct mcryptd_alg_cstate *cstate; flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); while (single_task_running()) { mutex_lock(&flist->lock); if (list_empty(&flist->list)) { mutex_unlock(&flist->lock); return; } cstate = list_entry(flist->list.next, struct mcryptd_alg_cstate, flush_list); if (!cstate->flusher_engaged) { mutex_unlock(&flist->lock); return; } list_del(&cstate->flush_list); cstate->flusher_engaged = false; mutex_unlock(&flist->lock); cstate->alg_state->flusher(cstate); } } /* * Called in workqueue context, do one real cryption work (via * req->complete) and reschedule itself if there are more work to * do. */ static void mcryptd_queue_worker(struct work_struct *work) { struct mcryptd_cpu_queue *cpu_queue; struct crypto_async_request *req, *backlog; int i; /* * Need to loop through more than once for multi-buffer to * be effective. */ cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); i = 0; while (i < MCRYPTD_BATCH || single_task_running()) { /* * preempt_disable/enable is used to prevent * being preempted by mcryptd_enqueue_request() */ local_bh_disable(); preempt_disable(); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); preempt_enable(); local_bh_enable(); if (!req) { mcryptd_opportunistic_flush(); return; } if (backlog) backlog->complete(backlog, -EINPROGRESS); req->complete(req, 0); if (!cpu_queue->queue.qlen) return; ++i; } if (cpu_queue->queue.qlen) queue_work(kcrypto_wq, &cpu_queue->work); } void mcryptd_flusher(struct work_struct *__work) { struct mcryptd_alg_cstate *alg_cpu_state; struct mcryptd_alg_state *alg_state; struct mcryptd_flush_list *flist; int cpu; cpu = smp_processor_id(); alg_cpu_state = container_of(to_delayed_work(__work), struct mcryptd_alg_cstate, flush); alg_state = alg_cpu_state->alg_state; if (alg_cpu_state->cpu != cpu) pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n", cpu, alg_cpu_state->cpu); if (alg_cpu_state->flusher_engaged) { flist = per_cpu_ptr(mcryptd_flist, cpu); mutex_lock(&flist->lock); list_del(&alg_cpu_state->flush_list); alg_cpu_state->flusher_engaged = false; mutex_unlock(&flist->lock); alg_state->flusher(alg_cpu_state); } } EXPORT_SYMBOL_GPL(mcryptd_flusher); static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst); return ictx->queue; } static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, unsigned int tail) { char *p; struct crypto_instance *inst; int err; p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); inst = (void *)(p + head); err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_free_inst; memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); inst->alg.cra_priority = alg->cra_priority + 50; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; out: return p; out_free_inst: kfree(p); p = ERR_PTR(err); goto out; } static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); struct crypto_shash_spawn *spawn = &ictx->spawn; struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_shash *hash; hash = crypto_spawn_shash(spawn); if (IS_ERR(hash)) return PTR_ERR(hash); ctx->child = hash; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct mcryptd_hash_request_ctx) + crypto_shash_descsize(hash)); return 0; } static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) { struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_shash(ctx->child); } static int mcryptd_hash_setkey(struct crypto_ahash *parent, const u8 *key, unsigned int keylen) { struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); struct crypto_shash *child = ctx->child; int err; crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_shash_setkey(child, key, keylen); crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; } static int mcryptd_hash_enqueue(struct ahash_request *req, crypto_completion_t complete) { int ret; struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct mcryptd_queue *queue = mcryptd_get_queue(crypto_ahash_tfm(tfm)); rctx->complete = req->base.complete; req->base.complete = complete; ret = mcryptd_enqueue_request(queue, &req->base, rctx); return ret; } static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) { struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); struct crypto_shash *child = ctx->child; struct ahash_request *req = ahash_request_cast(req_async); struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); struct shash_desc *desc = &rctx->desc; if (unlikely(err == -EINPROGRESS)) goto out; desc->tfm = child; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; err = crypto_shash_init(desc); req->base.complete = rctx->complete; out: local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); } static int mcryptd_hash_init_enqueue(struct ahash_request *req) { return mcryptd_hash_enqueue(req, mcryptd_hash_init); } static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) { struct ahash_request *req = ahash_request_cast(req_async); struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); if (unlikely(err == -EINPROGRESS)) goto out; err = shash_ahash_mcryptd_update(req, &rctx->desc); if (err) { req->base.complete = rctx->complete; goto out; } return; out: local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); } static int mcryptd_hash_update_enqueue(struct ahash_request *req) { return mcryptd_hash_enqueue(req, mcryptd_hash_update); } static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) { struct ahash_request *req = ahash_request_cast(req_async); struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); if (unlikely(err == -EINPROGRESS)) goto out; err = shash_ahash_mcryptd_final(req, &rctx->desc); if (err) { req->base.complete = rctx->complete; goto out; } return; out: local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); } static int mcryptd_hash_final_enqueue(struct ahash_request *req) { return mcryptd_hash_enqueue(req, mcryptd_hash_final); } static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) { struct ahash_request *req = ahash_request_cast(req_async); struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); if (unlikely(err == -EINPROGRESS)) goto out; err = shash_ahash_mcryptd_finup(req, &rctx->desc); if (err) { req->base.complete = rctx->complete; goto out; } return; out: local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); } static int mcryptd_hash_finup_enqueue(struct ahash_request *req) { return mcryptd_hash_enqueue(req, mcryptd_hash_finup); } static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) { struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); struct crypto_shash *child = ctx->child; struct ahash_request *req = ahash_request_cast(req_async); struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); struct shash_desc *desc = &rctx->desc; if (unlikely(err == -EINPROGRESS)) goto out; desc->tfm = child; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */ err = shash_ahash_mcryptd_digest(req, desc); if (err) { req->base.complete = rctx->complete; goto out; } return; out: local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); } static int mcryptd_hash_digest_enqueue(struct ahash_request *req) { return mcryptd_hash_enqueue(req, mcryptd_hash_digest); } static int mcryptd_hash_export(struct ahash_request *req, void *out) { struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); return crypto_shash_export(&rctx->desc, out); } static int mcryptd_hash_import(struct ahash_request *req, const void *in) { struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); return crypto_shash_import(&rctx->desc, in); } static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, struct mcryptd_queue *queue) { struct hashd_instance_ctx *ctx; struct ahash_instance *inst; struct shash_alg *salg; struct crypto_alg *alg; int err; salg = shash_attr_alg(tb[1], 0, 0); if (IS_ERR(salg)) return PTR_ERR(salg); alg = &salg->base; pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), sizeof(*ctx)); err = PTR_ERR(inst); if (IS_ERR(inst)) goto out_put_alg; ctx = ahash_instance_ctx(inst); ctx->queue = queue; err = crypto_init_shash_spawn(&ctx->spawn, salg, ahash_crypto_instance(inst)); if (err) goto out_free_inst; inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; inst->alg.halg.digestsize = salg->digestsize; inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm; inst->alg.init = mcryptd_hash_init_enqueue; inst->alg.update = mcryptd_hash_update_enqueue; inst->alg.final = mcryptd_hash_final_enqueue; inst->alg.finup = mcryptd_hash_finup_enqueue; inst->alg.export = mcryptd_hash_export; inst->alg.import = mcryptd_hash_import; inst->alg.setkey = mcryptd_hash_setkey; inst->alg.digest = mcryptd_hash_digest_enqueue; err = ahash_register_instance(tmpl, inst); if (err) { crypto_drop_shash(&ctx->spawn); out_free_inst: kfree(inst); } out_put_alg: crypto_mod_put(alg); return err; } static struct mcryptd_queue mqueue; static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return PTR_ERR(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_DIGEST: return mcryptd_create_hash(tmpl, tb, &mqueue); break; } return -EINVAL; } static void mcryptd_free(struct crypto_instance *inst) { struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst); struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AHASH: crypto_drop_shash(&hctx->spawn); kfree(ahash_instance(inst)); return; default: crypto_drop_spawn(&ctx->spawn); kfree(inst); } } static struct crypto_template mcryptd_tmpl = { .name = "mcryptd", .create = mcryptd_create, .free = mcryptd_free, .module = THIS_MODULE, }; struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, u32 type, u32 mask) { char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME]; struct crypto_ahash *tfm; if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME, "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) return ERR_PTR(-EINVAL); tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask); if (IS_ERR(tfm)) return ERR_CAST(tfm); if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { crypto_free_ahash(tfm); return ERR_PTR(-EINVAL); } return __mcryptd_ahash_cast(tfm); } EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); int shash_ahash_mcryptd_digest(struct ahash_request *req, struct shash_desc *desc) { int err; err = crypto_shash_init(desc) ?: shash_ahash_mcryptd_finup(req, desc); return err; } EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest); int shash_ahash_mcryptd_update(struct ahash_request *req, struct shash_desc *desc) { struct crypto_shash *tfm = desc->tfm; struct shash_alg *shash = crypto_shash_alg(tfm); /* alignment is to be done by multi-buffer crypto algorithm if needed */ return shash->update(desc, NULL, 0); } EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update); int shash_ahash_mcryptd_finup(struct ahash_request *req, struct shash_desc *desc) { struct crypto_shash *tfm = desc->tfm; struct shash_alg *shash = crypto_shash_alg(tfm); /* alignment is to be done by multi-buffer crypto algorithm if needed */ return shash->finup(desc, NULL, 0, req->result); } EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup); int shash_ahash_mcryptd_final(struct ahash_request *req, struct shash_desc *desc) { struct crypto_shash *tfm = desc->tfm; struct shash_alg *shash = crypto_shash_alg(tfm); /* alignment is to be done by multi-buffer crypto algorithm if needed */ return shash->final(desc, req->result); } EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final); struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) { struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); return ctx->child; } EXPORT_SYMBOL_GPL(mcryptd_ahash_child); struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) { struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); return &rctx->desc; } EXPORT_SYMBOL_GPL(mcryptd_shash_desc); void mcryptd_free_ahash(struct mcryptd_ahash *tfm) { crypto_free_ahash(&tfm->base); } EXPORT_SYMBOL_GPL(mcryptd_free_ahash); static int __init mcryptd_init(void) { int err, cpu; struct mcryptd_flush_list *flist; mcryptd_flist = alloc_percpu(struct mcryptd_flush_list); for_each_possible_cpu(cpu) { flist = per_cpu_ptr(mcryptd_flist, cpu); INIT_LIST_HEAD(&flist->list); mutex_init(&flist->lock); } err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN); if (err) { free_percpu(mcryptd_flist); return err; } err = crypto_register_template(&mcryptd_tmpl); if (err) { mcryptd_fini_queue(&mqueue); free_percpu(mcryptd_flist); } return err; } static void __exit mcryptd_exit(void) { mcryptd_fini_queue(&mqueue); crypto_unregister_template(&mcryptd_tmpl); free_percpu(mcryptd_flist); } subsys_initcall(mcryptd_init); module_exit(mcryptd_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2399_16
crossvul-cpp_data_bad_5861_46
/* * PRNG: Pseudo Random Number Generator * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using * AES 128 cipher * * (C) Neil Horman <nhorman@tuxdriver.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * any later version. * * */ #include <crypto/internal/rng.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include "internal.h" #define DEFAULT_PRNG_KEY "0123456789abcdef" #define DEFAULT_PRNG_KSZ 16 #define DEFAULT_BLK_SZ 16 #define DEFAULT_V_SEED "zaybxcwdveuftgsh" /* * Flags for the prng_context flags field */ #define PRNG_FIXED_SIZE 0x1 #define PRNG_NEED_RESET 0x2 /* * Note: DT is our counter value * I is our intermediate value * V is our seed vector * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf * for implementation details */ struct prng_context { spinlock_t prng_lock; unsigned char rand_data[DEFAULT_BLK_SZ]; unsigned char last_rand_data[DEFAULT_BLK_SZ]; unsigned char DT[DEFAULT_BLK_SZ]; unsigned char I[DEFAULT_BLK_SZ]; unsigned char V[DEFAULT_BLK_SZ]; u32 rand_data_valid; struct crypto_cipher *tfm; u32 flags; }; static int dbg; static void hexdump(char *note, unsigned char *buf, unsigned int len) { if (dbg) { printk(KERN_CRIT "%s", note); print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); } } #define dbgprint(format, args...) do {\ if (dbg)\ printk(format, ##args);\ } while (0) static void xor_vectors(unsigned char *in1, unsigned char *in2, unsigned char *out, unsigned int size) { int i; for (i = 0; i < size; i++) out[i] = in1[i] ^ in2[i]; } /* * Returns DEFAULT_BLK_SZ bytes of random data per call * returns 0 if generation succeeded, <0 if something went wrong */ static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test) { int i; unsigned char tmp[DEFAULT_BLK_SZ]; unsigned char *output = NULL; dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n", ctx); hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ); hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ); hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ); /* * This algorithm is a 3 stage state machine */ for (i = 0; i < 3; i++) { switch (i) { case 0: /* * Start by encrypting the counter value * This gives us an intermediate value I */ memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ); output = ctx->I; hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ); break; case 1: /* * Next xor I with our secret vector V * encrypt that result to obtain our * pseudo random data which we output */ xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ); hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ); output = ctx->rand_data; break; case 2: /* * First check that we didn't produce the same * random data that we did last time around through this */ if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) { if (cont_test) { panic("cprng %p Failed repetition check!\n", ctx); } printk(KERN_ERR "ctx %p Failed repetition check!\n", ctx); ctx->flags |= PRNG_NEED_RESET; return -EINVAL; } memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ); /* * Lastly xor the random data with I * and encrypt that to obtain a new secret vector V */ xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ); output = ctx->V; hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ); break; } /* do the encryption */ crypto_cipher_encrypt_one(ctx->tfm, output, tmp); } /* * Now update our DT value */ for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) { ctx->DT[i] += 1; if (ctx->DT[i] != 0) break; } dbgprint("Returning new block for context %p\n", ctx); ctx->rand_data_valid = 0; hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ); hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ); hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ); hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ); return 0; } /* Our exported functions */ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx, int do_cont_test) { unsigned char *ptr = buf; unsigned int byte_count = (unsigned int)nbytes; int err; spin_lock_bh(&ctx->prng_lock); err = -EINVAL; if (ctx->flags & PRNG_NEED_RESET) goto done; /* * If the FIXED_SIZE flag is on, only return whole blocks of * pseudo random data */ err = -EINVAL; if (ctx->flags & PRNG_FIXED_SIZE) { if (nbytes < DEFAULT_BLK_SZ) goto done; byte_count = DEFAULT_BLK_SZ; } err = byte_count; dbgprint(KERN_CRIT "getting %d random bytes for context %p\n", byte_count, ctx); remainder: if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { memset(buf, 0, nbytes); err = -EINVAL; goto done; } } /* * Copy any data less than an entire block */ if (byte_count < DEFAULT_BLK_SZ) { empty_rbuf: while (ctx->rand_data_valid < DEFAULT_BLK_SZ) { *ptr = ctx->rand_data[ctx->rand_data_valid]; ptr++; byte_count--; ctx->rand_data_valid++; if (byte_count == 0) goto done; } } /* * Now copy whole blocks */ for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { memset(buf, 0, nbytes); err = -EINVAL; goto done; } } if (ctx->rand_data_valid > 0) goto empty_rbuf; memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ); ctx->rand_data_valid += DEFAULT_BLK_SZ; ptr += DEFAULT_BLK_SZ; } /* * Now go back and get any remaining partial block */ if (byte_count) goto remainder; done: spin_unlock_bh(&ctx->prng_lock); dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", err, ctx); return err; } static void free_prng_context(struct prng_context *ctx) { crypto_free_cipher(ctx->tfm); } static int reset_prng_context(struct prng_context *ctx, unsigned char *key, size_t klen, unsigned char *V, unsigned char *DT) { int ret; unsigned char *prng_key; spin_lock_bh(&ctx->prng_lock); ctx->flags |= PRNG_NEED_RESET; prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; if (!key) klen = DEFAULT_PRNG_KSZ; if (V) memcpy(ctx->V, V, DEFAULT_BLK_SZ); else memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ); if (DT) memcpy(ctx->DT, DT, DEFAULT_BLK_SZ); else memset(ctx->DT, 0, DEFAULT_BLK_SZ); memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); ctx->rand_data_valid = DEFAULT_BLK_SZ; ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); if (ret) { dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", crypto_cipher_get_flags(ctx->tfm)); goto out; } ret = 0; ctx->flags &= ~PRNG_NEED_RESET; out: spin_unlock_bh(&ctx->prng_lock); return ret; } static int cprng_init(struct crypto_tfm *tfm) { struct prng_context *ctx = crypto_tfm_ctx(tfm); spin_lock_init(&ctx->prng_lock); ctx->tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(ctx->tfm)) { dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", ctx); return PTR_ERR(ctx->tfm); } if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) return -EINVAL; /* * after allocation, we should always force the user to reset * so they don't inadvertently use the insecure default values * without specifying them intentially */ ctx->flags |= PRNG_NEED_RESET; return 0; } static void cprng_exit(struct crypto_tfm *tfm) { free_prng_context(crypto_tfm_ctx(tfm)); } static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) { struct prng_context *prng = crypto_rng_ctx(tfm); return get_prng_bytes(rdata, dlen, prng, 0); } /* * This is the cprng_registered reset method the seed value is * interpreted as the tuple { V KEY DT} * V and KEY are required during reset, and DT is optional, detected * as being present by testing the length of the seed */ static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) { struct prng_context *prng = crypto_rng_ctx(tfm); u8 *key = seed + DEFAULT_BLK_SZ; u8 *dt = NULL; if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) return -EINVAL; if (slen >= (2 * DEFAULT_BLK_SZ + DEFAULT_PRNG_KSZ)) dt = key + DEFAULT_PRNG_KSZ; reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, dt); if (prng->flags & PRNG_NEED_RESET) return -EINVAL; return 0; } #ifdef CONFIG_CRYPTO_FIPS static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) { struct prng_context *prng = crypto_rng_ctx(tfm); return get_prng_bytes(rdata, dlen, prng, 1); } static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) { u8 rdata[DEFAULT_BLK_SZ]; u8 *key = seed + DEFAULT_BLK_SZ; int rc; struct prng_context *prng = crypto_rng_ctx(tfm); if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) return -EINVAL; /* fips strictly requires seed != key */ if (!memcmp(seed, key, DEFAULT_PRNG_KSZ)) return -EINVAL; rc = cprng_reset(tfm, seed, slen); if (!rc) goto out; /* this primes our continuity test */ rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0); prng->rand_data_valid = DEFAULT_BLK_SZ; out: return rc; } #endif static struct crypto_alg rng_algs[] = { { .cra_name = "stdrng", .cra_driver_name = "ansi_cprng", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_RNG, .cra_ctxsize = sizeof(struct prng_context), .cra_type = &crypto_rng_type, .cra_module = THIS_MODULE, .cra_init = cprng_init, .cra_exit = cprng_exit, .cra_u = { .rng = { .rng_make_random = cprng_get_random, .rng_reset = cprng_reset, .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, } } #ifdef CONFIG_CRYPTO_FIPS }, { .cra_name = "fips(ansi_cprng)", .cra_driver_name = "fips_ansi_cprng", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_RNG, .cra_ctxsize = sizeof(struct prng_context), .cra_type = &crypto_rng_type, .cra_module = THIS_MODULE, .cra_init = cprng_init, .cra_exit = cprng_exit, .cra_u = { .rng = { .rng_make_random = fips_cprng_get_random, .rng_reset = fips_cprng_reset, .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, } } #endif } }; /* Module initalization */ static int __init prng_mod_init(void) { return crypto_register_algs(rng_algs, ARRAY_SIZE(rng_algs)); } static void __exit prng_mod_fini(void) { crypto_unregister_algs(rng_algs, ARRAY_SIZE(rng_algs)); } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Software Pseudo Random Number Generator"); MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); module_param(dbg, int, 0); MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); module_init(prng_mod_init); module_exit(prng_mod_fini); MODULE_ALIAS("stdrng");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5861_46
crossvul-cpp_data_good_5615_0
/* * linux/fs/namespace.c * * (C) Copyright Al Viro 2000, 2001 * Released under GPL v2. * * Based on code from fs/super.c, copyright Linus Torvalds and others. * Heavily rewritten. */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/capability.h> #include <linux/mnt_namespace.h> #include <linux/user_namespace.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/idr.h> #include <linux/acct.h> /* acct_auto_close_mnt */ #include <linux/ramfs.h> /* init_rootfs */ #include <linux/fs_struct.h> /* get_fs_root et.al. */ #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ #include <linux/uaccess.h> #include <linux/proc_fs.h> #include "pnode.h" #include "internal.h" #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) #define HASH_SIZE (1UL << HASH_SHIFT) static int event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); static DEFINE_SPINLOCK(mnt_id_lock); static int mnt_id_start = 0; static int mnt_group_start = 1; static struct list_head *mount_hashtable __read_mostly; static struct kmem_cache *mnt_cache __read_mostly; static struct rw_semaphore namespace_sem; /* /sys/fs */ struct kobject *fs_kobj; EXPORT_SYMBOL_GPL(fs_kobj); /* * vfsmount lock may be taken for read to prevent changes to the * vfsmount hash, ie. during mountpoint lookups or walking back * up the tree. * * It should be taken for write in all cases where the vfsmount * tree or hash is modified or when a vfsmount structure is modified. */ DEFINE_BRLOCK(vfsmount_lock); static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) { unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); tmp += ((unsigned long)dentry / L1_CACHE_BYTES); tmp = tmp + (tmp >> HASH_SHIFT); return tmp & (HASH_SIZE - 1); } #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) /* * allocation is serialized by namespace_sem, but we need the spinlock to * serialize with freeing. */ static int mnt_alloc_id(struct mount *mnt) { int res; retry: ida_pre_get(&mnt_id_ida, GFP_KERNEL); spin_lock(&mnt_id_lock); res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); if (!res) mnt_id_start = mnt->mnt_id + 1; spin_unlock(&mnt_id_lock); if (res == -EAGAIN) goto retry; return res; } static void mnt_free_id(struct mount *mnt) { int id = mnt->mnt_id; spin_lock(&mnt_id_lock); ida_remove(&mnt_id_ida, id); if (mnt_id_start > id) mnt_id_start = id; spin_unlock(&mnt_id_lock); } /* * Allocate a new peer group ID * * mnt_group_ida is protected by namespace_sem */ static int mnt_alloc_group_id(struct mount *mnt) { int res; if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) return -ENOMEM; res = ida_get_new_above(&mnt_group_ida, mnt_group_start, &mnt->mnt_group_id); if (!res) mnt_group_start = mnt->mnt_group_id + 1; return res; } /* * Release a peer group ID */ void mnt_release_group_id(struct mount *mnt) { int id = mnt->mnt_group_id; ida_remove(&mnt_group_ida, id); if (mnt_group_start > id) mnt_group_start = id; mnt->mnt_group_id = 0; } /* * vfsmount lock must be held for read */ static inline void mnt_add_count(struct mount *mnt, int n) { #ifdef CONFIG_SMP this_cpu_add(mnt->mnt_pcp->mnt_count, n); #else preempt_disable(); mnt->mnt_count += n; preempt_enable(); #endif } /* * vfsmount lock must be held for write */ unsigned int mnt_get_count(struct mount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; } return count; #else return mnt->mnt_count; #endif } static struct mount *alloc_vfsmnt(const char *name) { struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); if (mnt) { int err; err = mnt_alloc_id(mnt); if (err) goto out_free_cache; if (name) { mnt->mnt_devname = kstrdup(name, GFP_KERNEL); if (!mnt->mnt_devname) goto out_free_id; } #ifdef CONFIG_SMP mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); if (!mnt->mnt_pcp) goto out_free_devname; this_cpu_add(mnt->mnt_pcp->mnt_count, 1); #else mnt->mnt_count = 1; mnt->mnt_writers = 0; #endif INIT_LIST_HEAD(&mnt->mnt_hash); INIT_LIST_HEAD(&mnt->mnt_child); INIT_LIST_HEAD(&mnt->mnt_mounts); INIT_LIST_HEAD(&mnt->mnt_list); INIT_LIST_HEAD(&mnt->mnt_expire); INIT_LIST_HEAD(&mnt->mnt_share); INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); #endif } return mnt; #ifdef CONFIG_SMP out_free_devname: kfree(mnt->mnt_devname); #endif out_free_id: mnt_free_id(mnt); out_free_cache: kmem_cache_free(mnt_cache, mnt); return NULL; } /* * Most r/o checks on a fs are for operations that take * discrete amounts of time, like a write() or unlink(). * We must keep track of when those operations start * (for permission checks) and when they end, so that * we can determine when writes are able to occur to * a filesystem. */ /* * __mnt_is_readonly: check whether a mount is read-only * @mnt: the mount to check for its write status * * This shouldn't be used directly ouside of the VFS. * It does not guarantee that the filesystem will stay * r/w, just that it is right *now*. This can not and * should not be used in place of IS_RDONLY(inode). * mnt_want/drop_write() will _keep_ the filesystem * r/w. */ int __mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_flags & MNT_READONLY) return 1; if (mnt->mnt_sb->s_flags & MS_RDONLY) return 1; return 0; } EXPORT_SYMBOL_GPL(__mnt_is_readonly); static inline void mnt_inc_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_inc(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers++; #endif } static inline void mnt_dec_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_dec(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers--; #endif } static unsigned int mnt_get_writers(struct mount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; } return count; #else return mnt->mnt_writers; #endif } static int mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_sb->s_readonly_remount) return 1; /* Order wrt setting s_flags/s_readonly_remount in do_remount() */ smp_rmb(); return __mnt_is_readonly(mnt); } /* * Most r/o & frozen checks on a fs are for operations that take discrete * amounts of time, like a write() or unlink(). We must keep track of when * those operations start (for permission checks) and when they end, so that we * can determine when writes are able to occur to a filesystem. */ /** * __mnt_want_write - get write access to a mount without freeze protection * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mnt it read-write) before * returning success. This operation does not protect against filesystem being * frozen. When the write operation is finished, __mnt_drop_write() must be * called. This is effectively a refcount. */ int __mnt_want_write(struct vfsmount *m) { struct mount *mnt = real_mount(m); int ret = 0; preempt_disable(); mnt_inc_writers(mnt); /* * The store to mnt_inc_writers must be visible before we pass * MNT_WRITE_HOLD loop below, so that the slowpath can see our * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) cpu_relax(); /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until * MNT_WRITE_HOLD is cleared. */ smp_rmb(); if (mnt_is_readonly(m)) { mnt_dec_writers(mnt); ret = -EROFS; } preempt_enable(); return ret; } /** * mnt_want_write - get write access to a mount * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mount is read-write, filesystem * is not frozen) before returning success. When the write operation is * finished, mnt_drop_write() must be called. This is effectively a refcount. */ int mnt_want_write(struct vfsmount *m) { int ret; sb_start_write(m->mnt_sb); ret = __mnt_want_write(m); if (ret) sb_end_write(m->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write); /** * mnt_clone_write - get write access to a mount * @mnt: the mount on which to take a write * * This is effectively like mnt_want_write, except * it must only be used to take an extra write reference * on a mountpoint that we already know has a write reference * on it. This allows some optimisation. * * After finished, mnt_drop_write must be called as usual to * drop the reference. */ int mnt_clone_write(struct vfsmount *mnt) { /* superblock may be r/o */ if (__mnt_is_readonly(mnt)) return -EROFS; preempt_disable(); mnt_inc_writers(real_mount(mnt)); preempt_enable(); return 0; } EXPORT_SYMBOL_GPL(mnt_clone_write); /** * __mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like __mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int __mnt_want_write_file(struct file *file) { struct inode *inode = file_inode(file); if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) return __mnt_want_write(file->f_path.mnt); else return mnt_clone_write(file->f_path.mnt); } /** * mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int mnt_want_write_file(struct file *file) { int ret; sb_start_write(file->f_path.mnt->mnt_sb); ret = __mnt_want_write_file(file); if (ret) sb_end_write(file->f_path.mnt->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write_file); /** * __mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done * performing writes to it. Must be matched with * __mnt_want_write() call above. */ void __mnt_drop_write(struct vfsmount *mnt) { preempt_disable(); mnt_dec_writers(real_mount(mnt)); preempt_enable(); } /** * mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done performing writes to it and * also allows filesystem to be frozen again. Must be matched with * mnt_want_write() call above. */ void mnt_drop_write(struct vfsmount *mnt) { __mnt_drop_write(mnt); sb_end_write(mnt->mnt_sb); } EXPORT_SYMBOL_GPL(mnt_drop_write); void __mnt_drop_write_file(struct file *file) { __mnt_drop_write(file->f_path.mnt); } void mnt_drop_write_file(struct file *file) { mnt_drop_write(file->f_path.mnt); } EXPORT_SYMBOL(mnt_drop_write_file); static int mnt_make_readonly(struct mount *mnt) { int ret = 0; br_write_lock(&vfsmount_lock); mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; /* * After storing MNT_WRITE_HOLD, we'll read the counters. This store * should be visible before we do. */ smp_mb(); /* * With writers on hold, if this value is zero, then there are * definitely no active writers (although held writers may subsequently * increment the count, they'll have to wait, and decrement it after * seeing MNT_READONLY). * * It is OK to have counter incremented on one CPU and decremented on * another: the sum will add up correctly. The danger would be when we * sum up each counter, if we read a counter before it is incremented, * but then read another CPU's count which it has been subsequently * decremented from -- we would see more decrements than we should. * MNT_WRITE_HOLD protects against this scenario, because * mnt_want_write first increments count, then smp_mb, then spins on * MNT_WRITE_HOLD, so it can't be decremented by another CPU while * we're counting up here. */ if (mnt_get_writers(mnt) > 0) ret = -EBUSY; else mnt->mnt.mnt_flags |= MNT_READONLY; /* * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers * that become unheld will see MNT_READONLY. */ smp_wmb(); mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; br_write_unlock(&vfsmount_lock); return ret; } static void __mnt_unmake_readonly(struct mount *mnt) { br_write_lock(&vfsmount_lock); mnt->mnt.mnt_flags &= ~MNT_READONLY; br_write_unlock(&vfsmount_lock); } int sb_prepare_remount_readonly(struct super_block *sb) { struct mount *mnt; int err = 0; /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ if (atomic_long_read(&sb->s_remove_count)) return -EBUSY; br_write_lock(&vfsmount_lock); list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; smp_mb(); if (mnt_get_writers(mnt) > 0) { err = -EBUSY; break; } } } if (!err && atomic_long_read(&sb->s_remove_count)) err = -EBUSY; if (!err) { sb->s_readonly_remount = 1; smp_wmb(); } list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; } br_write_unlock(&vfsmount_lock); return err; } static void free_vfsmnt(struct mount *mnt) { kfree(mnt->mnt_devname); mnt_free_id(mnt); #ifdef CONFIG_SMP free_percpu(mnt->mnt_pcp); #endif kmem_cache_free(mnt_cache, mnt); } /* * find the first or last mount at @dentry on vfsmount @mnt depending on * @dir. If @dir is set return the first mount else return the last mount. * vfsmount_lock must be held for read or write. */ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, int dir) { struct list_head *head = mount_hashtable + hash(mnt, dentry); struct list_head *tmp = head; struct mount *p, *found = NULL; for (;;) { tmp = dir ? tmp->next : tmp->prev; p = NULL; if (tmp == head) break; p = list_entry(tmp, struct mount, mnt_hash); if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) { found = p; break; } } return found; } /* * lookup_mnt - Return the first child mount mounted at path * * "First" means first mounted chronologically. If you create the * following mounts: * * mount /dev/sda1 /mnt * mount /dev/sda2 /mnt * mount /dev/sda3 /mnt * * Then lookup_mnt() on the base /mnt dentry in the root mount will * return successively the root dentry and vfsmount of /dev/sda1, then * /dev/sda2, then /dev/sda3, then NULL. * * lookup_mnt takes a reference to the found vfsmount. */ struct vfsmount *lookup_mnt(struct path *path) { struct mount *child_mnt; br_read_lock(&vfsmount_lock); child_mnt = __lookup_mnt(path->mnt, path->dentry, 1); if (child_mnt) { mnt_add_count(child_mnt, 1); br_read_unlock(&vfsmount_lock); return &child_mnt->mnt; } else { br_read_unlock(&vfsmount_lock); return NULL; } } static inline int check_mnt(struct mount *mnt) { return mnt->mnt_ns == current->nsproxy->mnt_ns; } /* * vfsmount lock must be held for write */ static void touch_mnt_namespace(struct mnt_namespace *ns) { if (ns) { ns->event = ++event; wake_up_interruptible(&ns->poll); } } /* * vfsmount lock must be held for write */ static void __touch_mnt_namespace(struct mnt_namespace *ns) { if (ns && ns->event != event) { ns->event = event; wake_up_interruptible(&ns->poll); } } /* * Clear dentry's mounted state if it has no remaining mounts. * vfsmount_lock must be held for write. */ static void dentry_reset_mounted(struct dentry *dentry) { unsigned u; for (u = 0; u < HASH_SIZE; u++) { struct mount *p; list_for_each_entry(p, &mount_hashtable[u], mnt_hash) { if (p->mnt_mountpoint == dentry) return; } } spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_MOUNTED; spin_unlock(&dentry->d_lock); } /* * vfsmount lock must be held for write */ static void detach_mnt(struct mount *mnt, struct path *old_path) { old_path->dentry = mnt->mnt_mountpoint; old_path->mnt = &mnt->mnt_parent->mnt; mnt->mnt_parent = mnt; mnt->mnt_mountpoint = mnt->mnt.mnt_root; list_del_init(&mnt->mnt_child); list_del_init(&mnt->mnt_hash); dentry_reset_mounted(old_path->dentry); } /* * vfsmount lock must be held for write */ void mnt_set_mountpoint(struct mount *mnt, struct dentry *dentry, struct mount *child_mnt) { mnt_add_count(mnt, 1); /* essentially, that's mntget */ child_mnt->mnt_mountpoint = dget(dentry); child_mnt->mnt_parent = mnt; spin_lock(&dentry->d_lock); dentry->d_flags |= DCACHE_MOUNTED; spin_unlock(&dentry->d_lock); } /* * vfsmount lock must be held for write */ static void attach_mnt(struct mount *mnt, struct path *path) { mnt_set_mountpoint(real_mount(path->mnt), path->dentry, mnt); list_add_tail(&mnt->mnt_hash, mount_hashtable + hash(path->mnt, path->dentry)); list_add_tail(&mnt->mnt_child, &real_mount(path->mnt)->mnt_mounts); } /* * vfsmount lock must be held for write */ static void commit_tree(struct mount *mnt) { struct mount *parent = mnt->mnt_parent; struct mount *m; LIST_HEAD(head); struct mnt_namespace *n = parent->mnt_ns; BUG_ON(parent == mnt); list_add_tail(&head, &mnt->mnt_list); list_for_each_entry(m, &head, mnt_list) m->mnt_ns = n; list_splice(&head, n->list.prev); list_add_tail(&mnt->mnt_hash, mount_hashtable + hash(&parent->mnt, mnt->mnt_mountpoint)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); touch_mnt_namespace(n); } static struct mount *next_mnt(struct mount *p, struct mount *root) { struct list_head *next = p->mnt_mounts.next; if (next == &p->mnt_mounts) { while (1) { if (p == root) return NULL; next = p->mnt_child.next; if (next != &p->mnt_parent->mnt_mounts) break; p = p->mnt_parent; } } return list_entry(next, struct mount, mnt_child); } static struct mount *skip_mnt_tree(struct mount *p) { struct list_head *prev = p->mnt_mounts.prev; while (prev != &p->mnt_mounts) { p = list_entry(prev, struct mount, mnt_child); prev = p->mnt_mounts.prev; } return p; } struct vfsmount * vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) { struct mount *mnt; struct dentry *root; if (!type) return ERR_PTR(-ENODEV); mnt = alloc_vfsmnt(name); if (!mnt) return ERR_PTR(-ENOMEM); if (flags & MS_KERNMOUNT) mnt->mnt.mnt_flags = MNT_INTERNAL; root = mount_fs(type, flags, name, data); if (IS_ERR(root)) { free_vfsmnt(mnt); return ERR_CAST(root); } mnt->mnt.mnt_root = root; mnt->mnt.mnt_sb = root->d_sb; mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; br_write_lock(&vfsmount_lock); list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); br_write_unlock(&vfsmount_lock); return &mnt->mnt; } EXPORT_SYMBOL_GPL(vfs_kern_mount); static struct mount *clone_mnt(struct mount *old, struct dentry *root, int flag) { struct super_block *sb = old->mnt.mnt_sb; struct mount *mnt; int err; mnt = alloc_vfsmnt(old->mnt_devname); if (!mnt) return ERR_PTR(-ENOMEM); if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) mnt->mnt_group_id = 0; /* not a peer of original */ else mnt->mnt_group_id = old->mnt_group_id; if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { err = mnt_alloc_group_id(mnt); if (err) goto out_free; } mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; atomic_inc(&sb->s_active); mnt->mnt.mnt_sb = sb; mnt->mnt.mnt_root = dget(root); mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; br_write_lock(&vfsmount_lock); list_add_tail(&mnt->mnt_instance, &sb->s_mounts); br_write_unlock(&vfsmount_lock); if ((flag & CL_SLAVE) || ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { list_add(&mnt->mnt_slave, &old->mnt_slave_list); mnt->mnt_master = old; CLEAR_MNT_SHARED(mnt); } else if (!(flag & CL_PRIVATE)) { if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) list_add(&mnt->mnt_share, &old->mnt_share); if (IS_MNT_SLAVE(old)) list_add(&mnt->mnt_slave, &old->mnt_slave); mnt->mnt_master = old->mnt_master; } if (flag & CL_MAKE_SHARED) set_mnt_shared(mnt); /* stick the duplicate mount on the same expiry list * as the original if that was on one */ if (flag & CL_EXPIRE) { if (!list_empty(&old->mnt_expire)) list_add(&mnt->mnt_expire, &old->mnt_expire); } return mnt; out_free: free_vfsmnt(mnt); return ERR_PTR(err); } static inline void mntfree(struct mount *mnt) { struct vfsmount *m = &mnt->mnt; struct super_block *sb = m->mnt_sb; /* * This probably indicates that somebody messed * up a mnt_want/drop_write() pair. If this * happens, the filesystem was probably unable * to make r/w->r/o transitions. */ /* * The locking used to deal with mnt_count decrement provides barriers, * so mnt_get_writers() below is safe. */ WARN_ON(mnt_get_writers(mnt)); fsnotify_vfsmount_delete(m); dput(m->mnt_root); free_vfsmnt(mnt); deactivate_super(sb); } static void mntput_no_expire(struct mount *mnt) { put_again: #ifdef CONFIG_SMP br_read_lock(&vfsmount_lock); if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ mnt_add_count(mnt, -1); br_read_unlock(&vfsmount_lock); return; } br_read_unlock(&vfsmount_lock); br_write_lock(&vfsmount_lock); mnt_add_count(mnt, -1); if (mnt_get_count(mnt)) { br_write_unlock(&vfsmount_lock); return; } #else mnt_add_count(mnt, -1); if (likely(mnt_get_count(mnt))) return; br_write_lock(&vfsmount_lock); #endif if (unlikely(mnt->mnt_pinned)) { mnt_add_count(mnt, mnt->mnt_pinned + 1); mnt->mnt_pinned = 0; br_write_unlock(&vfsmount_lock); acct_auto_close_mnt(&mnt->mnt); goto put_again; } list_del(&mnt->mnt_instance); br_write_unlock(&vfsmount_lock); mntfree(mnt); } void mntput(struct vfsmount *mnt) { if (mnt) { struct mount *m = real_mount(mnt); /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ if (unlikely(m->mnt_expiry_mark)) m->mnt_expiry_mark = 0; mntput_no_expire(m); } } EXPORT_SYMBOL(mntput); struct vfsmount *mntget(struct vfsmount *mnt) { if (mnt) mnt_add_count(real_mount(mnt), 1); return mnt; } EXPORT_SYMBOL(mntget); void mnt_pin(struct vfsmount *mnt) { br_write_lock(&vfsmount_lock); real_mount(mnt)->mnt_pinned++; br_write_unlock(&vfsmount_lock); } EXPORT_SYMBOL(mnt_pin); void mnt_unpin(struct vfsmount *m) { struct mount *mnt = real_mount(m); br_write_lock(&vfsmount_lock); if (mnt->mnt_pinned) { mnt_add_count(mnt, 1); mnt->mnt_pinned--; } br_write_unlock(&vfsmount_lock); } EXPORT_SYMBOL(mnt_unpin); static inline void mangle(struct seq_file *m, const char *s) { seq_escape(m, s, " \t\n\\"); } /* * Simple .show_options callback for filesystems which don't want to * implement more complex mount option showing. * * See also save_mount_options(). */ int generic_show_options(struct seq_file *m, struct dentry *root) { const char *options; rcu_read_lock(); options = rcu_dereference(root->d_sb->s_options); if (options != NULL && options[0]) { seq_putc(m, ','); mangle(m, options); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(generic_show_options); /* * If filesystem uses generic_show_options(), this function should be * called from the fill_super() callback. * * The .remount_fs callback usually needs to be handled in a special * way, to make sure, that previous options are not overwritten if the * remount fails. * * Also note, that if the filesystem's .remount_fs function doesn't * reset all options to their default value, but changes only newly * given options, then the displayed options will not reflect reality * any more. */ void save_mount_options(struct super_block *sb, char *options) { BUG_ON(sb->s_options); rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL)); } EXPORT_SYMBOL(save_mount_options); void replace_mount_options(struct super_block *sb, char *options) { char *old = sb->s_options; rcu_assign_pointer(sb->s_options, options); if (old) { synchronize_rcu(); kfree(old); } } EXPORT_SYMBOL(replace_mount_options); #ifdef CONFIG_PROC_FS /* iterator; we want it to have access to namespace_sem, thus here... */ static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_mounts *p = proc_mounts(m); down_read(&namespace_sem); return seq_list_start(&p->ns->list, *pos); } static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_mounts *p = proc_mounts(m); return seq_list_next(v, &p->ns->list, pos); } static void m_stop(struct seq_file *m, void *v) { up_read(&namespace_sem); } static int m_show(struct seq_file *m, void *v) { struct proc_mounts *p = proc_mounts(m); struct mount *r = list_entry(v, struct mount, mnt_list); return p->show(m, &r->mnt); } const struct seq_operations mounts_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = m_show, }; #endif /* CONFIG_PROC_FS */ /** * may_umount_tree - check if a mount tree is busy * @mnt: root of mount tree * * This is called to check if a tree of mounts has any * open files, pwds, chroots or sub mounts that are * busy. */ int may_umount_tree(struct vfsmount *m) { struct mount *mnt = real_mount(m); int actual_refs = 0; int minimum_refs = 0; struct mount *p; BUG_ON(!m); /* write lock needed for mnt_get_count */ br_write_lock(&vfsmount_lock); for (p = mnt; p; p = next_mnt(p, mnt)) { actual_refs += mnt_get_count(p); minimum_refs += 2; } br_write_unlock(&vfsmount_lock); if (actual_refs > minimum_refs) return 0; return 1; } EXPORT_SYMBOL(may_umount_tree); /** * may_umount - check if a mount point is busy * @mnt: root of mount * * This is called to check if a mount point has any * open files, pwds, chroots or sub mounts. If the * mount has sub mounts this will return busy * regardless of whether the sub mounts are busy. * * Doesn't take quota and stuff into account. IOW, in some cases it will * give false negatives. The main reason why it's here is that we need * a non-destructive way to look for easily umountable filesystems. */ int may_umount(struct vfsmount *mnt) { int ret = 1; down_read(&namespace_sem); br_write_lock(&vfsmount_lock); if (propagate_mount_busy(real_mount(mnt), 2)) ret = 0; br_write_unlock(&vfsmount_lock); up_read(&namespace_sem); return ret; } EXPORT_SYMBOL(may_umount); void release_mounts(struct list_head *head) { struct mount *mnt; while (!list_empty(head)) { mnt = list_first_entry(head, struct mount, mnt_hash); list_del_init(&mnt->mnt_hash); if (mnt_has_parent(mnt)) { struct dentry *dentry; struct mount *m; br_write_lock(&vfsmount_lock); dentry = mnt->mnt_mountpoint; m = mnt->mnt_parent; mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; m->mnt_ghosts--; br_write_unlock(&vfsmount_lock); dput(dentry); mntput(&m->mnt); } mntput(&mnt->mnt); } } /* * vfsmount lock must be held for write * namespace_sem must be held for write */ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill) { LIST_HEAD(tmp_list); struct mount *p; for (p = mnt; p; p = next_mnt(p, mnt)) list_move(&p->mnt_hash, &tmp_list); if (propagate) propagate_umount(&tmp_list); list_for_each_entry(p, &tmp_list, mnt_hash) { list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; list_del_init(&p->mnt_child); if (mnt_has_parent(p)) { p->mnt_parent->mnt_ghosts++; dentry_reset_mounted(p->mnt_mountpoint); } change_mnt_propagation(p, MS_PRIVATE); } list_splice(&tmp_list, kill); } static void shrink_submounts(struct mount *mnt, struct list_head *umounts); static int do_umount(struct mount *mnt, int flags) { struct super_block *sb = mnt->mnt.mnt_sb; int retval; LIST_HEAD(umount_list); retval = security_sb_umount(&mnt->mnt, flags); if (retval) return retval; /* * Allow userspace to request a mountpoint be expired rather than * unmounting unconditionally. Unmount only happens if: * (1) the mark is already set (the mark is cleared by mntput()) * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] */ if (flags & MNT_EXPIRE) { if (&mnt->mnt == current->fs->root.mnt || flags & (MNT_FORCE | MNT_DETACH)) return -EINVAL; /* * probably don't strictly need the lock here if we examined * all race cases, but it's a slowpath. */ br_write_lock(&vfsmount_lock); if (mnt_get_count(mnt) != 2) { br_write_unlock(&vfsmount_lock); return -EBUSY; } br_write_unlock(&vfsmount_lock); if (!xchg(&mnt->mnt_expiry_mark, 1)) return -EAGAIN; } /* * If we may have to abort operations to get out of this * mount, and they will themselves hold resources we must * allow the fs to do things. In the Unix tradition of * 'Gee thats tricky lets do it in userspace' the umount_begin * might fail to complete on the first run through as other tasks * must return, and the like. Thats for the mount program to worry * about for the moment. */ if (flags & MNT_FORCE && sb->s_op->umount_begin) { sb->s_op->umount_begin(sb); } /* * No sense to grab the lock for this test, but test itself looks * somewhat bogus. Suggestions for better replacement? * Ho-hum... In principle, we might treat that as umount + switch * to rootfs. GC would eventually take care of the old vfsmount. * Actually it makes sense, especially if rootfs would contain a * /reboot - static binary that would close all descriptors and * call reboot(9). Then init(8) could umount root and exec /reboot. */ if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { /* * Special case for "unmounting" root ... * we just try to remount it readonly. */ down_write(&sb->s_umount); if (!(sb->s_flags & MS_RDONLY)) retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); up_write(&sb->s_umount); return retval; } down_write(&namespace_sem); br_write_lock(&vfsmount_lock); event++; if (!(flags & MNT_DETACH)) shrink_submounts(mnt, &umount_list); retval = -EBUSY; if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) { if (!list_empty(&mnt->mnt_list)) umount_tree(mnt, 1, &umount_list); retval = 0; } br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); return retval; } /* * Is the caller allowed to modify his namespace? */ static inline bool may_mount(void) { return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); } /* * Now umount can handle mount points as well as block devices. * This is important for filesystems which use unnamed block devices. * * We now support a flag for forced unmount like the other 'big iron' * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD */ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) { struct path path; struct mount *mnt; int retval; int lookup_flags = 0; if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) return -EINVAL; if (!may_mount()) return -EPERM; if (!(flags & UMOUNT_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; retval = user_path_at(AT_FDCWD, name, lookup_flags, &path); if (retval) goto out; mnt = real_mount(path.mnt); retval = -EINVAL; if (path.dentry != path.mnt->mnt_root) goto dput_and_out; if (!check_mnt(mnt)) goto dput_and_out; retval = do_umount(mnt, flags); dput_and_out: /* we mustn't call path_put() as that would clear mnt_expiry_mark */ dput(path.dentry); mntput_no_expire(mnt); out: return retval; } #ifdef __ARCH_WANT_SYS_OLDUMOUNT /* * The 2.0 compatible umount. No flags. */ SYSCALL_DEFINE1(oldumount, char __user *, name) { return sys_umount(name, 0); } #endif static bool mnt_ns_loop(struct path *path) { /* Could bind mounting the mount namespace inode cause a * mount namespace loop? */ struct inode *inode = path->dentry->d_inode; struct proc_inode *ei; struct mnt_namespace *mnt_ns; if (!proc_ns_inode(inode)) return false; ei = PROC_I(inode); if (ei->ns_ops != &mntns_operations) return false; mnt_ns = ei->ns; return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; } struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, int flag) { struct mount *res, *p, *q, *r; struct path path; if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt)) return ERR_PTR(-EINVAL); res = q = clone_mnt(mnt, dentry, flag); if (IS_ERR(q)) return q; q->mnt_mountpoint = mnt->mnt_mountpoint; p = mnt; list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { struct mount *s; if (!is_subdir(r->mnt_mountpoint, dentry)) continue; for (s = r; s; s = next_mnt(s, r)) { if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) { s = skip_mnt_tree(s); continue; } while (p != s->mnt_parent) { p = p->mnt_parent; q = q->mnt_parent; } p = s; path.mnt = &q->mnt; path.dentry = p->mnt_mountpoint; q = clone_mnt(p, p->mnt.mnt_root, flag); if (IS_ERR(q)) goto out; br_write_lock(&vfsmount_lock); list_add_tail(&q->mnt_list, &res->mnt_list); attach_mnt(q, &path); br_write_unlock(&vfsmount_lock); } } return res; out: if (res) { LIST_HEAD(umount_list); br_write_lock(&vfsmount_lock); umount_tree(res, 0, &umount_list); br_write_unlock(&vfsmount_lock); release_mounts(&umount_list); } return q; } /* Caller should check returned pointer for errors */ struct vfsmount *collect_mounts(struct path *path) { struct mount *tree; down_write(&namespace_sem); tree = copy_tree(real_mount(path->mnt), path->dentry, CL_COPY_ALL | CL_PRIVATE); up_write(&namespace_sem); if (IS_ERR(tree)) return NULL; return &tree->mnt; } void drop_collected_mounts(struct vfsmount *mnt) { LIST_HEAD(umount_list); down_write(&namespace_sem); br_write_lock(&vfsmount_lock); umount_tree(real_mount(mnt), 0, &umount_list); br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); } int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, struct vfsmount *root) { struct mount *mnt; int res = f(root, arg); if (res) return res; list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { res = f(&mnt->mnt, arg); if (res) return res; } return 0; } static void cleanup_group_ids(struct mount *mnt, struct mount *end) { struct mount *p; for (p = mnt; p != end; p = next_mnt(p, mnt)) { if (p->mnt_group_id && !IS_MNT_SHARED(p)) mnt_release_group_id(p); } } static int invent_group_ids(struct mount *mnt, bool recurse) { struct mount *p; for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { int err = mnt_alloc_group_id(p); if (err) { cleanup_group_ids(mnt, p); return err; } } } return 0; } /* * @source_mnt : mount tree to be attached * @nd : place the mount tree @source_mnt is attached * @parent_nd : if non-null, detach the source_mnt from its parent and * store the parent mount and mountpoint dentry. * (done when source_mnt is moved) * * NOTE: in the table below explains the semantics when a source mount * of a given type is attached to a destination mount of a given type. * --------------------------------------------------------------------------- * | BIND MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (++) | shared (+) | shared(+++)| invalid | * | | | | | | * |non-shared| shared (+) | private | slave (*) | invalid | * *************************************************************************** * A bind operation clones the source mount and mounts the clone on the * destination mount. * * (++) the cloned mount is propagated to all the mounts in the propagation * tree of the destination mount and the cloned mount is added to * the peer group of the source mount. * (+) the cloned mount is created under the destination mount and is marked * as shared. The cloned mount is added to the peer group of the source * mount. * (+++) the mount is propagated to all the mounts in the propagation tree * of the destination mount and the cloned mount is made slave * of the same master as that of the source mount. The cloned mount * is marked as 'shared and slave'. * (*) the cloned mount is made a slave of the same master as that of the * source mount. * * --------------------------------------------------------------------------- * | MOVE MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (+) | shared (+) | shared(+++) | invalid | * | | | | | | * |non-shared| shared (+*) | private | slave (*) | unbindable | * *************************************************************************** * * (+) the mount is moved to the destination. And is then propagated to * all the mounts in the propagation tree of the destination mount. * (+*) the mount is moved to the destination. * (+++) the mount is moved to the destination and is then propagated to * all the mounts belonging to the destination mount's propagation tree. * the mount is marked as 'shared and slave'. * (*) the mount continues to be a slave at the new location. * * if the source mount is a tree, the operations explained above is * applied to each mount in the tree. * Must be called without spinlocks held, since this function can sleep * in allocations. */ static int attach_recursive_mnt(struct mount *source_mnt, struct path *path, struct path *parent_path) { LIST_HEAD(tree_list); struct mount *dest_mnt = real_mount(path->mnt); struct dentry *dest_dentry = path->dentry; struct mount *child, *p; int err; if (IS_MNT_SHARED(dest_mnt)) { err = invent_group_ids(source_mnt, true); if (err) goto out; } err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list); if (err) goto out_cleanup_ids; br_write_lock(&vfsmount_lock); if (IS_MNT_SHARED(dest_mnt)) { for (p = source_mnt; p; p = next_mnt(p, source_mnt)) set_mnt_shared(p); } if (parent_path) { detach_mnt(source_mnt, parent_path); attach_mnt(source_mnt, path); touch_mnt_namespace(source_mnt->mnt_ns); } else { mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt); commit_tree(source_mnt); } list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { list_del_init(&child->mnt_hash); commit_tree(child); } br_write_unlock(&vfsmount_lock); return 0; out_cleanup_ids: if (IS_MNT_SHARED(dest_mnt)) cleanup_group_ids(source_mnt, NULL); out: return err; } static int lock_mount(struct path *path) { struct vfsmount *mnt; retry: mutex_lock(&path->dentry->d_inode->i_mutex); if (unlikely(cant_mount(path->dentry))) { mutex_unlock(&path->dentry->d_inode->i_mutex); return -ENOENT; } down_write(&namespace_sem); mnt = lookup_mnt(path); if (likely(!mnt)) return 0; up_write(&namespace_sem); mutex_unlock(&path->dentry->d_inode->i_mutex); path_put(path); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); goto retry; } static void unlock_mount(struct path *path) { up_write(&namespace_sem); mutex_unlock(&path->dentry->d_inode->i_mutex); } static int graft_tree(struct mount *mnt, struct path *path) { if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) return -EINVAL; if (S_ISDIR(path->dentry->d_inode->i_mode) != S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode)) return -ENOTDIR; if (d_unlinked(path->dentry)) return -ENOENT; return attach_recursive_mnt(mnt, path, NULL); } /* * Sanity check the flags to change_mnt_propagation. */ static int flags_to_propagation_type(int flags) { int type = flags & ~(MS_REC | MS_SILENT); /* Fail if any non-propagation flags are set */ if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) return 0; /* Only one propagation flag should be set */ if (!is_power_of_2(type)) return 0; return type; } /* * recursively change the type of the mountpoint. */ static int do_change_type(struct path *path, int flag) { struct mount *m; struct mount *mnt = real_mount(path->mnt); int recurse = flag & MS_REC; int type; int err = 0; if (path->dentry != path->mnt->mnt_root) return -EINVAL; type = flags_to_propagation_type(flag); if (!type) return -EINVAL; down_write(&namespace_sem); if (type == MS_SHARED) { err = invent_group_ids(mnt, recurse); if (err) goto out_unlock; } br_write_lock(&vfsmount_lock); for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) change_mnt_propagation(m, type); br_write_unlock(&vfsmount_lock); out_unlock: up_write(&namespace_sem); return err; } /* * do loopback mount. */ static int do_loopback(struct path *path, const char *old_name, int recurse) { LIST_HEAD(umount_list); struct path old_path; struct mount *mnt = NULL, *old; int err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); if (err) return err; err = -EINVAL; if (mnt_ns_loop(&old_path)) goto out; err = lock_mount(path); if (err) goto out; old = real_mount(old_path.mnt); err = -EINVAL; if (IS_MNT_UNBINDABLE(old)) goto out2; if (!check_mnt(real_mount(path->mnt)) || !check_mnt(old)) goto out2; if (recurse) mnt = copy_tree(old, old_path.dentry, 0); else mnt = clone_mnt(old, old_path.dentry, 0); if (IS_ERR(mnt)) { err = PTR_ERR(mnt); goto out; } err = graft_tree(mnt, path); if (err) { br_write_lock(&vfsmount_lock); umount_tree(mnt, 0, &umount_list); br_write_unlock(&vfsmount_lock); } out2: unlock_mount(path); release_mounts(&umount_list); out: path_put(&old_path); return err; } static int change_mount_flags(struct vfsmount *mnt, int ms_flags) { int error = 0; int readonly_request = 0; if (ms_flags & MS_RDONLY) readonly_request = 1; if (readonly_request == __mnt_is_readonly(mnt)) return 0; if (readonly_request) error = mnt_make_readonly(real_mount(mnt)); else __mnt_unmake_readonly(real_mount(mnt)); return error; } /* * change filesystem flags. dir should be a physical root of filesystem. * If you've mounted a non-root directory somewhere and want to do remount * on it - tough luck. */ static int do_remount(struct path *path, int flags, int mnt_flags, void *data) { int err; struct super_block *sb = path->mnt->mnt_sb; struct mount *mnt = real_mount(path->mnt); if (!check_mnt(mnt)) return -EINVAL; if (path->dentry != path->mnt->mnt_root) return -EINVAL; err = security_sb_remount(sb, data); if (err) return err; down_write(&sb->s_umount); if (flags & MS_BIND) err = change_mount_flags(path->mnt, flags); else if (!capable(CAP_SYS_ADMIN)) err = -EPERM; else err = do_remount_sb(sb, flags, data, 0); if (!err) { br_write_lock(&vfsmount_lock); mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; mnt->mnt.mnt_flags = mnt_flags; br_write_unlock(&vfsmount_lock); } up_write(&sb->s_umount); if (!err) { br_write_lock(&vfsmount_lock); touch_mnt_namespace(mnt->mnt_ns); br_write_unlock(&vfsmount_lock); } return err; } static inline int tree_contains_unbindable(struct mount *mnt) { struct mount *p; for (p = mnt; p; p = next_mnt(p, mnt)) { if (IS_MNT_UNBINDABLE(p)) return 1; } return 0; } static int do_move_mount(struct path *path, const char *old_name) { struct path old_path, parent_path; struct mount *p; struct mount *old; int err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); if (err) return err; err = lock_mount(path); if (err < 0) goto out; old = real_mount(old_path.mnt); p = real_mount(path->mnt); err = -EINVAL; if (!check_mnt(p) || !check_mnt(old)) goto out1; if (d_unlinked(path->dentry)) goto out1; err = -EINVAL; if (old_path.dentry != old_path.mnt->mnt_root) goto out1; if (!mnt_has_parent(old)) goto out1; if (S_ISDIR(path->dentry->d_inode->i_mode) != S_ISDIR(old_path.dentry->d_inode->i_mode)) goto out1; /* * Don't move a mount residing in a shared parent. */ if (IS_MNT_SHARED(old->mnt_parent)) goto out1; /* * Don't move a mount tree containing unbindable mounts to a destination * mount which is shared. */ if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) goto out1; err = -ELOOP; for (; mnt_has_parent(p); p = p->mnt_parent) if (p == old) goto out1; err = attach_recursive_mnt(old, path, &parent_path); if (err) goto out1; /* if the mount is moved, it should no longer be expire * automatically */ list_del_init(&old->mnt_expire); out1: unlock_mount(path); out: if (!err) path_put(&parent_path); path_put(&old_path); return err; } static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) { int err; const char *subtype = strchr(fstype, '.'); if (subtype) { subtype++; err = -EINVAL; if (!subtype[0]) goto err; } else subtype = ""; mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); err = -ENOMEM; if (!mnt->mnt_sb->s_subtype) goto err; return mnt; err: mntput(mnt); return ERR_PTR(err); } /* * add a mount into a namespace's mount tree */ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) { int err; mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL); err = lock_mount(path); if (err) return err; err = -EINVAL; if (unlikely(!check_mnt(real_mount(path->mnt)))) { /* that's acceptable only for automounts done in private ns */ if (!(mnt_flags & MNT_SHRINKABLE)) goto unlock; /* ... and for those we'd better have mountpoint still alive */ if (!real_mount(path->mnt)->mnt_ns) goto unlock; } /* Refuse the same filesystem on the same mount point */ err = -EBUSY; if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path->mnt->mnt_root == path->dentry) goto unlock; err = -EINVAL; if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode)) goto unlock; newmnt->mnt.mnt_flags = mnt_flags; err = graft_tree(newmnt, path); unlock: unlock_mount(path); return err; } /* * create a new mount for userspace and request it to be added into the * namespace's tree */ static int do_new_mount(struct path *path, const char *fstype, int flags, int mnt_flags, const char *name, void *data) { struct file_system_type *type; struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; struct vfsmount *mnt; int err; if (!fstype) return -EINVAL; type = get_fs_type(fstype); if (!type) return -ENODEV; if (user_ns != &init_user_ns) { if (!(type->fs_flags & FS_USERNS_MOUNT)) { put_filesystem(type); return -EPERM; } /* Only in special cases allow devices from mounts * created outside the initial user namespace. */ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { flags |= MS_NODEV; mnt_flags |= MNT_NODEV; } } mnt = vfs_kern_mount(type, flags, name, data); if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && !mnt->mnt_sb->s_subtype) mnt = fs_set_subtype(mnt, fstype); put_filesystem(type); if (IS_ERR(mnt)) return PTR_ERR(mnt); err = do_add_mount(real_mount(mnt), path, mnt_flags); if (err) mntput(mnt); return err; } int finish_automount(struct vfsmount *m, struct path *path) { struct mount *mnt = real_mount(m); int err; /* The new mount record should have at least 2 refs to prevent it being * expired before we get a chance to add it */ BUG_ON(mnt_get_count(mnt) < 2); if (m->mnt_sb == path->mnt->mnt_sb && m->mnt_root == path->dentry) { err = -ELOOP; goto fail; } err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); if (!err) return 0; fail: /* remove m from any expiration list it may be on */ if (!list_empty(&mnt->mnt_expire)) { down_write(&namespace_sem); br_write_lock(&vfsmount_lock); list_del_init(&mnt->mnt_expire); br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); } mntput(m); mntput(m); return err; } /** * mnt_set_expiry - Put a mount on an expiration list * @mnt: The mount to list. * @expiry_list: The list to add the mount to. */ void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) { down_write(&namespace_sem); br_write_lock(&vfsmount_lock); list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); } EXPORT_SYMBOL(mnt_set_expiry); /* * process a list of expirable mountpoints with the intent of discarding any * mountpoints that aren't in use and haven't been touched since last we came * here */ void mark_mounts_for_expiry(struct list_head *mounts) { struct mount *mnt, *next; LIST_HEAD(graveyard); LIST_HEAD(umounts); if (list_empty(mounts)) return; down_write(&namespace_sem); br_write_lock(&vfsmount_lock); /* extract from the expiration list every vfsmount that matches the * following criteria: * - only referenced by its parent vfsmount * - still marked for expiry (marked on the last call here; marks are * cleared by mntput()) */ list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { if (!xchg(&mnt->mnt_expiry_mark, 1) || propagate_mount_busy(mnt, 1)) continue; list_move(&mnt->mnt_expire, &graveyard); } while (!list_empty(&graveyard)) { mnt = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(mnt->mnt_ns); umount_tree(mnt, 1, &umounts); } br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umounts); } EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); /* * Ripoff of 'select_parent()' * * search the list of submounts for a given mountpoint, and move any * shrinkable submounts to the 'graveyard' list. */ static int select_submounts(struct mount *parent, struct list_head *graveyard) { struct mount *this_parent = parent; struct list_head *next; int found = 0; repeat: next = this_parent->mnt_mounts.next; resume: while (next != &this_parent->mnt_mounts) { struct list_head *tmp = next; struct mount *mnt = list_entry(tmp, struct mount, mnt_child); next = tmp->next; if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) continue; /* * Descend a level if the d_mounts list is non-empty. */ if (!list_empty(&mnt->mnt_mounts)) { this_parent = mnt; goto repeat; } if (!propagate_mount_busy(mnt, 1)) { list_move_tail(&mnt->mnt_expire, graveyard); found++; } } /* * All done at this level ... ascend and resume the search */ if (this_parent != parent) { next = this_parent->mnt_child.next; this_parent = this_parent->mnt_parent; goto resume; } return found; } /* * process a list of expirable mountpoints with the intent of discarding any * submounts of a specific parent mountpoint * * vfsmount_lock must be held for write */ static void shrink_submounts(struct mount *mnt, struct list_head *umounts) { LIST_HEAD(graveyard); struct mount *m; /* extract submounts of 'mountpoint' from the expiration list */ while (select_submounts(mnt, &graveyard)) { while (!list_empty(&graveyard)) { m = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(m->mnt_ns); umount_tree(m, 1, umounts); } } } /* * Some copy_from_user() implementations do not return the exact number of * bytes remaining to copy on a fault. But copy_mount_options() requires that. * Note that this function differs from copy_from_user() in that it will oops * on bad values of `to', rather than returning a short copy. */ static long exact_copy_from_user(void *to, const void __user * from, unsigned long n) { char *t = to; const char __user *f = from; char c; if (!access_ok(VERIFY_READ, from, n)) return n; while (n) { if (__get_user(c, f)) { memset(t, 0, n); break; } *t++ = c; f++; n--; } return n; } int copy_mount_options(const void __user * data, unsigned long *where) { int i; unsigned long page; unsigned long size; *where = 0; if (!data) return 0; if (!(page = __get_free_page(GFP_KERNEL))) return -ENOMEM; /* We only care that *some* data at the address the user * gave us is valid. Just in case, we'll zero * the remainder of the page. */ /* copy_from_user cannot cross TASK_SIZE ! */ size = TASK_SIZE - (unsigned long)data; if (size > PAGE_SIZE) size = PAGE_SIZE; i = size - exact_copy_from_user((void *)page, data, size); if (!i) { free_page(page); return -EFAULT; } if (i != PAGE_SIZE) memset((char *)page + i, 0, PAGE_SIZE - i); *where = page; return 0; } int copy_mount_string(const void __user *data, char **where) { char *tmp; if (!data) { *where = NULL; return 0; } tmp = strndup_user(data, PAGE_SIZE); if (IS_ERR(tmp)) return PTR_ERR(tmp); *where = tmp; return 0; } /* * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to * be given to the mount() call (ie: read-only, no-dev, no-suid etc). * * data is a (void *) that can point to any structure up to * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent * information (or be NULL). * * Pre-0.97 versions of mount() didn't have a flags word. * When the flags word was introduced its top half was required * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. * Therefore, if this magic number is present, it carries no information * and must be discarded. */ long do_mount(const char *dev_name, const char *dir_name, const char *type_page, unsigned long flags, void *data_page) { struct path path; int retval = 0; int mnt_flags = 0; /* Discard magic */ if ((flags & MS_MGC_MSK) == MS_MGC_VAL) flags &= ~MS_MGC_MSK; /* Basic sanity checks */ if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE)) return -EINVAL; if (data_page) ((char *)data_page)[PAGE_SIZE - 1] = 0; /* ... and get the mountpoint */ retval = kern_path(dir_name, LOOKUP_FOLLOW, &path); if (retval) return retval; retval = security_sb_mount(dev_name, &path, type_page, flags, data_page); if (retval) goto dput_out; if (!may_mount()) return -EPERM; /* Default to relatime unless overriden */ if (!(flags & MS_NOATIME)) mnt_flags |= MNT_RELATIME; /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) mnt_flags |= MNT_NOSUID; if (flags & MS_NODEV) mnt_flags |= MNT_NODEV; if (flags & MS_NOEXEC) mnt_flags |= MNT_NOEXEC; if (flags & MS_NOATIME) mnt_flags |= MNT_NOATIME; if (flags & MS_NODIRATIME) mnt_flags |= MNT_NODIRATIME; if (flags & MS_STRICTATIME) mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, data_page); else if (flags & MS_BIND) retval = do_loopback(&path, dev_name, flags & MS_REC); else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) retval = do_change_type(&path, flags); else if (flags & MS_MOVE) retval = do_move_mount(&path, dev_name); else retval = do_new_mount(&path, type_page, flags, mnt_flags, dev_name, data_page); dput_out: path_put(&path); return retval; } static void free_mnt_ns(struct mnt_namespace *ns) { proc_free_inum(ns->proc_inum); put_user_ns(ns->user_ns); kfree(ns); } /* * Assign a sequence number so we can detect when we attempt to bind * mount a reference to an older mount namespace into the current * mount namespace, preventing reference counting loops. A 64bit * number incrementing at 10Ghz will take 12,427 years to wrap which * is effectively never, so we can ignore the possibility. */ static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) { struct mnt_namespace *new_ns; int ret; new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL); if (!new_ns) return ERR_PTR(-ENOMEM); ret = proc_alloc_inum(&new_ns->proc_inum); if (ret) { kfree(new_ns); return ERR_PTR(ret); } new_ns->seq = atomic64_add_return(1, &mnt_ns_seq); atomic_set(&new_ns->count, 1); new_ns->root = NULL; INIT_LIST_HEAD(&new_ns->list); init_waitqueue_head(&new_ns->poll); new_ns->event = 0; new_ns->user_ns = get_user_ns(user_ns); return new_ns; } /* * Allocate a new namespace structure and populate it with contents * copied from the namespace of the passed in task structure. */ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, struct user_namespace *user_ns, struct fs_struct *fs) { struct mnt_namespace *new_ns; struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; struct mount *p, *q; struct mount *old = mnt_ns->root; struct mount *new; int copy_flags; new_ns = alloc_mnt_ns(user_ns); if (IS_ERR(new_ns)) return new_ns; down_write(&namespace_sem); /* First pass: copy the tree topology */ copy_flags = CL_COPY_ALL | CL_EXPIRE; if (user_ns != mnt_ns->user_ns) copy_flags |= CL_SHARED_TO_SLAVE; new = copy_tree(old, old->mnt.mnt_root, copy_flags); if (IS_ERR(new)) { up_write(&namespace_sem); free_mnt_ns(new_ns); return ERR_CAST(new); } new_ns->root = new; br_write_lock(&vfsmount_lock); list_add_tail(&new_ns->list, &new->mnt_list); br_write_unlock(&vfsmount_lock); /* * Second pass: switch the tsk->fs->* elements and mark new vfsmounts * as belonging to new namespace. We have already acquired a private * fs_struct, so tsk->fs->lock is not needed. */ p = old; q = new; while (p) { q->mnt_ns = new_ns; if (fs) { if (&p->mnt == fs->root.mnt) { fs->root.mnt = mntget(&q->mnt); rootmnt = &p->mnt; } if (&p->mnt == fs->pwd.mnt) { fs->pwd.mnt = mntget(&q->mnt); pwdmnt = &p->mnt; } } p = next_mnt(p, old); q = next_mnt(q, new); } up_write(&namespace_sem); if (rootmnt) mntput(rootmnt); if (pwdmnt) mntput(pwdmnt); return new_ns; } struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; BUG_ON(!ns); get_mnt_ns(ns); if (!(flags & CLONE_NEWNS)) return ns; new_ns = dup_mnt_ns(ns, user_ns, new_fs); put_mnt_ns(ns); return new_ns; } /** * create_mnt_ns - creates a private namespace and adds a root filesystem * @mnt: pointer to the new root filesystem mountpoint */ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m) { struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns); if (!IS_ERR(new_ns)) { struct mount *mnt = real_mount(m); mnt->mnt_ns = new_ns; new_ns->root = mnt; list_add(&new_ns->list, &mnt->mnt_list); } else { mntput(m); } return new_ns; } struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) { struct mnt_namespace *ns; struct super_block *s; struct path path; int err; ns = create_mnt_ns(mnt); if (IS_ERR(ns)) return ERR_CAST(ns); err = vfs_path_lookup(mnt->mnt_root, mnt, name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); put_mnt_ns(ns); if (err) return ERR_PTR(err); /* trade a vfsmount reference for active sb one */ s = path.mnt->mnt_sb; atomic_inc(&s->s_active); mntput(path.mnt); /* lock the sucker */ down_write(&s->s_umount); /* ... and return the root of (sub)tree on it */ return path.dentry; } EXPORT_SYMBOL(mount_subtree); SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, char __user *, type, unsigned long, flags, void __user *, data) { int ret; char *kernel_type; struct filename *kernel_dir; char *kernel_dev; unsigned long data_page; ret = copy_mount_string(type, &kernel_type); if (ret < 0) goto out_type; kernel_dir = getname(dir_name); if (IS_ERR(kernel_dir)) { ret = PTR_ERR(kernel_dir); goto out_dir; } ret = copy_mount_string(dev_name, &kernel_dev); if (ret < 0) goto out_dev; ret = copy_mount_options(data, &data_page); if (ret < 0) goto out_data; ret = do_mount(kernel_dev, kernel_dir->name, kernel_type, flags, (void *) data_page); free_page(data_page); out_data: kfree(kernel_dev); out_dev: putname(kernel_dir); out_dir: kfree(kernel_type); out_type: return ret; } /* * Return true if path is reachable from root * * namespace_sem or vfsmount_lock is held */ bool is_path_reachable(struct mount *mnt, struct dentry *dentry, const struct path *root) { while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { dentry = mnt->mnt_mountpoint; mnt = mnt->mnt_parent; } return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); } int path_is_under(struct path *path1, struct path *path2) { int res; br_read_lock(&vfsmount_lock); res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); br_read_unlock(&vfsmount_lock); return res; } EXPORT_SYMBOL(path_is_under); /* * pivot_root Semantics: * Moves the root file system of the current process to the directory put_old, * makes new_root as the new root file system of the current process, and sets * root/cwd of all processes which had them on the current root to new_root. * * Restrictions: * The new_root and put_old must be directories, and must not be on the * same file system as the current process root. The put_old must be * underneath new_root, i.e. adding a non-zero number of /.. to the string * pointed to by put_old must yield the same directory as new_root. No other * file system may be mounted on put_old. After all, new_root is a mountpoint. * * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives * in this situation. * * Notes: * - we don't move root/cwd if they are not at the root (reason: if something * cared enough to change them, it's probably wrong to force them elsewhere) * - it's okay to pick a root that isn't the root of a file system, e.g. * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root * first. */ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, const char __user *, put_old) { struct path new, old, parent_path, root_parent, root; struct mount *new_mnt, *root_mnt; int error; if (!may_mount()) return -EPERM; error = user_path_dir(new_root, &new); if (error) goto out0; error = user_path_dir(put_old, &old); if (error) goto out1; error = security_sb_pivotroot(&old, &new); if (error) goto out2; get_fs_root(current->fs, &root); error = lock_mount(&old); if (error) goto out3; error = -EINVAL; new_mnt = real_mount(new.mnt); root_mnt = real_mount(root.mnt); if (IS_MNT_SHARED(real_mount(old.mnt)) || IS_MNT_SHARED(new_mnt->mnt_parent) || IS_MNT_SHARED(root_mnt->mnt_parent)) goto out4; if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) goto out4; error = -ENOENT; if (d_unlinked(new.dentry)) goto out4; if (d_unlinked(old.dentry)) goto out4; error = -EBUSY; if (new.mnt == root.mnt || old.mnt == root.mnt) goto out4; /* loop, on the same file system */ error = -EINVAL; if (root.mnt->mnt_root != root.dentry) goto out4; /* not a mountpoint */ if (!mnt_has_parent(root_mnt)) goto out4; /* not attached */ if (new.mnt->mnt_root != new.dentry) goto out4; /* not a mountpoint */ if (!mnt_has_parent(new_mnt)) goto out4; /* not attached */ /* make sure we can reach put_old from new_root */ if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new)) goto out4; br_write_lock(&vfsmount_lock); detach_mnt(new_mnt, &parent_path); detach_mnt(root_mnt, &root_parent); /* mount old root on put_old */ attach_mnt(root_mnt, &old); /* mount new_root on / */ attach_mnt(new_mnt, &root_parent); touch_mnt_namespace(current->nsproxy->mnt_ns); br_write_unlock(&vfsmount_lock); chroot_fs_refs(&root, &new); error = 0; out4: unlock_mount(&old); if (!error) { path_put(&root_parent); path_put(&parent_path); } out3: path_put(&root); out2: path_put(&old); out1: path_put(&new); out0: return error; } static void __init init_mount_tree(void) { struct vfsmount *mnt; struct mnt_namespace *ns; struct path root; struct file_system_type *type; type = get_fs_type("rootfs"); if (!type) panic("Can't find rootfs type"); mnt = vfs_kern_mount(type, 0, "rootfs", NULL); put_filesystem(type); if (IS_ERR(mnt)) panic("Can't create rootfs"); ns = create_mnt_ns(mnt); if (IS_ERR(ns)) panic("Can't allocate initial namespace"); init_task.nsproxy->mnt_ns = ns; get_mnt_ns(ns); root.mnt = mnt; root.dentry = mnt->mnt_root; set_fs_pwd(current->fs, &root); set_fs_root(current->fs, &root); } void __init mnt_init(void) { unsigned u; int err; init_rwsem(&namespace_sem); mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); if (!mount_hashtable) panic("Failed to allocate mount hash table\n"); printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE); for (u = 0; u < HASH_SIZE; u++) INIT_LIST_HEAD(&mount_hashtable[u]); br_lock_init(&vfsmount_lock); err = sysfs_init(); if (err) printk(KERN_WARNING "%s: sysfs_init error: %d\n", __func__, err); fs_kobj = kobject_create_and_add("fs", NULL); if (!fs_kobj) printk(KERN_WARNING "%s: kobj create error\n", __func__); init_rootfs(); init_mount_tree(); } void put_mnt_ns(struct mnt_namespace *ns) { LIST_HEAD(umount_list); if (!atomic_dec_and_test(&ns->count)) return; down_write(&namespace_sem); br_write_lock(&vfsmount_lock); umount_tree(ns->root, 0, &umount_list); br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); free_mnt_ns(ns); } struct vfsmount *kern_mount_data(struct file_system_type *type, void *data) { struct vfsmount *mnt; mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data); if (!IS_ERR(mnt)) { /* * it is a longterm mount, don't release mnt until * we unmount before file sys is unregistered */ real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; } return mnt; } EXPORT_SYMBOL_GPL(kern_mount_data); void kern_unmount(struct vfsmount *mnt) { /* release long term mount so mount point can be released */ if (!IS_ERR_OR_NULL(mnt)) { br_write_lock(&vfsmount_lock); real_mount(mnt)->mnt_ns = NULL; br_write_unlock(&vfsmount_lock); mntput(mnt); } } EXPORT_SYMBOL(kern_unmount); bool our_mnt(struct vfsmount *mnt) { return check_mnt(real_mount(mnt)); } bool current_chrooted(void) { /* Does the current process have a non-standard root */ struct path ns_root; struct path fs_root; bool chrooted; /* Find the namespace root */ ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt; ns_root.dentry = ns_root.mnt->mnt_root; path_get(&ns_root); while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) ; get_fs_root(current->fs, &fs_root); chrooted = !path_equal(&fs_root, &ns_root); path_put(&fs_root); path_put(&ns_root); return chrooted; } static void *mntns_get(struct task_struct *task) { struct mnt_namespace *ns = NULL; struct nsproxy *nsproxy; rcu_read_lock(); nsproxy = task_nsproxy(task); if (nsproxy) { ns = nsproxy->mnt_ns; get_mnt_ns(ns); } rcu_read_unlock(); return ns; } static void mntns_put(void *ns) { put_mnt_ns(ns); } static int mntns_install(struct nsproxy *nsproxy, void *ns) { struct fs_struct *fs = current->fs; struct mnt_namespace *mnt_ns = ns; struct path root; if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || !nsown_capable(CAP_SYS_CHROOT) || !nsown_capable(CAP_SYS_ADMIN)) return -EPERM; if (fs->users != 1) return -EINVAL; get_mnt_ns(mnt_ns); put_mnt_ns(nsproxy->mnt_ns); nsproxy->mnt_ns = mnt_ns; /* Find the root */ root.mnt = &mnt_ns->root->mnt; root.dentry = mnt_ns->root->mnt.mnt_root; path_get(&root); while(d_mountpoint(root.dentry) && follow_down_one(&root)) ; /* Update the pwd and root */ set_fs_pwd(fs, &root); set_fs_root(fs, &root); path_put(&root); return 0; } static unsigned int mntns_inum(void *ns) { struct mnt_namespace *mnt_ns = ns; return mnt_ns->proc_inum; } const struct proc_ns_operations mntns_operations = { .name = "mnt", .type = CLONE_NEWNS, .get = mntns_get, .put = mntns_put, .install = mntns_install, .inum = mntns_inum, };
./CrossVul/dataset_final_sorted/CWE-264/c/good_5615_0
crossvul-cpp_data_good_3439_4
/* * debugfs.c - ACPI debugfs interface to userspace. */ #include <linux/init.h> #include <linux/debugfs.h> #include <acpi/acpi_drivers.h> #define _COMPONENT ACPI_SYSTEM_COMPONENT ACPI_MODULE_NAME("debugfs"); struct dentry *acpi_debugfs_dir; EXPORT_SYMBOL_GPL(acpi_debugfs_dir); void __init acpi_debugfs_init(void) { acpi_debugfs_dir = debugfs_create_dir("acpi", NULL); }
./CrossVul/dataset_final_sorted/CWE-264/c/good_3439_4
crossvul-cpp_data_good_3482_1
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * PACKET - implements raw packet sockets. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * Fixes: * Alan Cox : verify_area() now used correctly * Alan Cox : new skbuff lists, look ma no backlogs! * Alan Cox : tidied skbuff lists. * Alan Cox : Now uses generic datagram routines I * added. Also fixed the peek/read crash * from all old Linux datagram code. * Alan Cox : Uses the improved datagram code. * Alan Cox : Added NULL's for socket options. * Alan Cox : Re-commented the code. * Alan Cox : Use new kernel side addressing * Rob Janssen : Correct MTU usage. * Dave Platt : Counter leaks caused by incorrect * interrupt locking and some slightly * dubious gcc output. Can you read * compiler: it said _VOLATILE_ * Richard Kooijman : Timestamp fixes. * Alan Cox : New buffers. Use sk->mac.raw. * Alan Cox : sendmsg/recvmsg support. * Alan Cox : Protocol setting support * Alexey Kuznetsov : Untied from IPv4 stack. * Cyrus Durgin : Fixed kerneld for kmod. * Michal Ostrowski : Module initialization cleanup. * Ulises Alonso : Frame number limit removal and * packet_set_ring memory leak. * Eric Biederman : Allow for > 8 byte hardware addresses. * The convention is that longer addresses * will simply extend the hardware address * byte arrays at the end of sockaddr_ll * and packet_mreq. * Johann Baudy : Added TX RING. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/types.h> #include <linux/mm.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_packet.h> #include <linux/wireless.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/errno.h> #include <linux/timer.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/ioctls.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/if_vlan.h> #include <linux/virtio_net.h> #include <linux/errqueue.h> #include <linux/net_tstamp.h> #ifdef CONFIG_INET #include <net/inet_common.h> #endif /* Assumptions: - if device has no dev->hard_header routine, it adds and removes ll header inside itself. In this case ll header is invisible outside of device, but higher levels still should reserve dev->hard_header_len. Some devices are enough clever to reallocate skb, when header will not fit to reserved space (tunnel), another ones are silly (PPP). - packet socket receives packets with pulled ll header, so that SOCK_RAW should push it back. On receive: ----------- Incoming, dev->hard_header!=NULL mac_header -> ll header data -> data Outgoing, dev->hard_header!=NULL mac_header -> ll header data -> ll header Incoming, dev->hard_header==NULL mac_header -> UNKNOWN position. It is very likely, that it points to ll header. PPP makes it, that is wrong, because introduce assymetry between rx and tx paths. data -> data Outgoing, dev->hard_header==NULL mac_header -> data. ll header is still not built! data -> data Resume If dev->hard_header==NULL we are unlikely to restore sensible ll header. On transmit: ------------ dev->hard_header != NULL mac_header -> ll header data -> ll header dev->hard_header == NULL (ll header is added by device, we cannot control it) mac_header -> data data -> data We should set nh.raw on output to correct posistion, packet classifier depends on it. */ /* Private packet socket structures. */ struct packet_mclist { struct packet_mclist *next; int ifindex; int count; unsigned short type; unsigned short alen; unsigned char addr[MAX_ADDR_LEN]; }; /* identical to struct packet_mreq except it has * a longer address field. */ struct packet_mreq_max { int mr_ifindex; unsigned short mr_type; unsigned short mr_alen; unsigned char mr_address[MAX_ADDR_LEN]; }; static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing, int tx_ring); struct pgv { char *buffer; }; struct packet_ring_buffer { struct pgv *pg_vec; unsigned int head; unsigned int frames_per_block; unsigned int frame_size; unsigned int frame_max; unsigned int pg_vec_order; unsigned int pg_vec_pages; unsigned int pg_vec_len; atomic_t pending; }; struct packet_sock; static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); static void packet_flush_mclist(struct sock *sk); struct packet_sock { /* struct sock has to be the first member of packet_sock */ struct sock sk; struct tpacket_stats stats; struct packet_ring_buffer rx_ring; struct packet_ring_buffer tx_ring; int copy_thresh; spinlock_t bind_lock; struct mutex pg_vec_lock; unsigned int running:1, /* prot_hook is attached*/ auxdata:1, origdev:1, has_vnet_hdr:1; int ifindex; /* bound device */ __be16 num; struct packet_mclist *mclist; atomic_t mapped; enum tpacket_versions tp_version; unsigned int tp_hdrlen; unsigned int tp_reserve; unsigned int tp_loss:1; unsigned int tp_tstamp; struct packet_type prot_hook ____cacheline_aligned_in_smp; }; struct packet_skb_cb { unsigned int origlen; union { struct sockaddr_pkt pkt; struct sockaddr_ll ll; } sa; }; #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) static inline __pure struct page *pgv_to_page(void *addr) { if (is_vmalloc_addr(addr)) return vmalloc_to_page(addr); return virt_to_page(addr); } static void __packet_set_status(struct packet_sock *po, void *frame, int status) { union { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; void *raw; } h; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_status = status; flush_dcache_page(pgv_to_page(&h.h1->tp_status)); break; case TPACKET_V2: h.h2->tp_status = status; flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; default: pr_err("TPACKET version not supported\n"); BUG(); } smp_wmb(); } static int __packet_get_status(struct packet_sock *po, void *frame) { union { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; void *raw; } h; smp_rmb(); h.raw = frame; switch (po->tp_version) { case TPACKET_V1: flush_dcache_page(pgv_to_page(&h.h1->tp_status)); return h.h1->tp_status; case TPACKET_V2: flush_dcache_page(pgv_to_page(&h.h2->tp_status)); return h.h2->tp_status; default: pr_err("TPACKET version not supported\n"); BUG(); return 0; } } static void *packet_lookup_frame(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int position, int status) { unsigned int pg_vec_pos, frame_offset; union { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; void *raw; } h; pg_vec_pos = position / rb->frames_per_block; frame_offset = position % rb->frames_per_block; h.raw = rb->pg_vec[pg_vec_pos].buffer + (frame_offset * rb->frame_size); if (status != __packet_get_status(po, h.raw)) return NULL; return h.raw; } static inline void *packet_current_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { return packet_lookup_frame(po, rb, rb->head, status); } static inline void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; return packet_lookup_frame(po, rb, previous, status); } static inline void packet_increment_head(struct packet_ring_buffer *buff) { buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; } static inline struct packet_sock *pkt_sk(struct sock *sk) { return (struct packet_sock *)sk; } static void packet_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_error_queue); WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive packet socket: %p\n", sk); return; } sk_refcnt_debug_dec(sk); } static const struct proto_ops packet_ops; static const struct proto_ops packet_ops_spkt; static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_pkt *spkt; /* * When we registered the protocol we saved the socket in the data * field for just this event. */ sk = pt->af_packet_priv; /* * Yank back the headers [hope the device set this * right or kerboom...] * * Incoming packets have ll header pulled, * push it back. * * For outgoing ones skb->data == skb_mac_header(skb) * so that this procedure is noop. */ if (skb->pkt_type == PACKET_LOOPBACK) goto out; if (!net_eq(dev_net(dev), sock_net(sk))) goto out; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto oom; /* drop any routing info */ skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spkt = &PACKET_SKB_CB(skb)->sa.pkt; skb_push(skb, skb->data - skb_mac_header(skb)); /* * The SOCK_PACKET socket receives _all_ frames. */ spkt->spkt_family = dev->type; strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); spkt->spkt_protocol = skb->protocol; /* * Charge the memory to the socket. This is done specifically * to prevent sockets using all the memory up. */ if (sock_queue_rcv_skb(sk, skb) == 0) return 0; out: kfree_skb(skb); oom: return 0; } /* * Output a raw packet to a device layer. This bypasses all the other * protocol layers and you must therefore supply it with a complete frame */ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; struct sk_buff *skb = NULL; struct net_device *dev; __be16 proto = 0; int err; /* * Get and verify the address. */ if (saddr) { if (msg->msg_namelen < sizeof(struct sockaddr)) return -EINVAL; if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) proto = saddr->spkt_protocol; } else return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ /* * Find the device first to size check it */ saddr->spkt_device[13] = 0; retry: rcu_read_lock(); dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); err = -ENODEV; if (dev == NULL) goto out_unlock; err = -ENETDOWN; if (!(dev->flags & IFF_UP)) goto out_unlock; /* * You may not queue a frame bigger than the mtu. This is the lowest level * raw protocol and you must do your own fragmentation at this level. */ err = -EMSGSIZE; if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN) goto out_unlock; if (!skb) { size_t reserved = LL_RESERVED_SPACE(dev); unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; rcu_read_unlock(); skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL); if (skb == NULL) return -ENOBUFS; /* FIXME: Save some space for broken drivers that write a hard * header at transmission time by themselves. PPP is the notable * one here. This should really be fixed at the driver level. */ skb_reserve(skb, reserved); skb_reset_network_header(skb); /* Try to align data part correctly */ if (hhlen) { skb->data -= hhlen; skb->tail -= hhlen; if (len < hhlen) skb_reset_network_header(skb); } err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (err) goto out_free; goto retry; } if (len > (dev->mtu + dev->hard_header_len)) { /* Earlier code assumed this would be a VLAN pkt, * double-check this now that we have the actual * packet in hand. */ struct ethhdr *ehdr; skb_reset_mac_header(skb); ehdr = eth_hdr(skb); if (ehdr->h_proto != htons(ETH_P_8021Q)) { err = -EMSGSIZE; goto out_unlock; } } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); if (err < 0) goto out_unlock; dev_queue_xmit(skb); rcu_read_unlock(); return len; out_unlock: rcu_read_unlock(); out_free: kfree_skb(skb); return err; } static inline unsigned int run_filter(const struct sk_buff *skb, const struct sock *sk, unsigned int res) { struct sk_filter *filter; rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter != NULL) res = SK_RUN_FILTER(filter, skb); rcu_read_unlock(); return res; } /* * This function makes lazy skb cloning in hope that most of packets * are discarded by BPF. * * Note tricky part: we DO mangle shared skb! skb->data, skb->len * and skb->cb are mangled. It works because (and until) packets * falling here are owned by current CPU. Output packets are cloned * by dev_queue_xmit_nit(), input packets are processed by net_bh * sequencially, so that if we return skb to original state on exit, * we will not harm anyone. */ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_ll *sll; struct packet_sock *po; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; skb->dev = dev; if (dev->header_ops) { /* The device has an explicit notion of ll header, * exported to higher levels. * * Otherwise, the device hides details of its frame * structure, so that corresponding packet head is * never delivered to user. */ if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (snaplen > res) snaplen = res; if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned)sk->sk_rcvbuf) goto drop_n_acct; if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (nskb == NULL) goto drop_n_acct; if (skb_head != skb->data) { skb->data = skb_head; skb->len = skb_len; } kfree_skb(skb); skb = nskb; } BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 > sizeof(skb->cb)); sll = &PACKET_SKB_CB(skb)->sa.ll; sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; sll->sll_halen = dev_parse_header(skb, sll->sll_addr); PACKET_SKB_CB(skb)->origlen = skb->len; if (pskb_trim(skb, snaplen)) goto drop_n_acct; skb_set_owner_r(skb, sk); skb->dev = NULL; skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spin_lock(&sk->sk_receive_queue.lock); po->stats.tp_packets++; skb->dropcount = atomic_read(&sk->sk_drops); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk, skb->len); return 0; drop_n_acct: po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: consume_skb(skb); return 0; } static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct packet_sock *po; struct sockaddr_ll *sll; union { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; void *raw; } h; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; unsigned short macoff, netoff, hdrlen; struct sk_buff *copy_skb = NULL; struct timeval tv; struct timespec ts; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; if (dev->header_ops) { if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } if (skb->ip_summed == CHECKSUM_PARTIAL) status |= TP_STATUS_CSUMNOTREADY; snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (snaplen > res) snaplen = res; if (sk->sk_type == SOCK_DGRAM) { macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + po->tp_reserve; } else { unsigned maclen = skb_network_offset(skb); netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; macoff = netoff - maclen; } if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && atomic_read(&sk->sk_rmem_alloc) + skb->truesize < (unsigned)sk->sk_rcvbuf) { if (skb_shared(skb)) { copy_skb = skb_clone(skb, GFP_ATOMIC); } else { copy_skb = skb_get(skb); skb_head = skb->data; } if (copy_skb) skb_set_owner_r(copy_skb, sk); } snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) snaplen = 0; } spin_lock(&sk->sk_receive_queue.lock); h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL); if (!h.raw) goto ring_is_full; packet_increment_head(&po->rx_ring); po->stats.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; __skb_queue_tail(&sk->sk_receive_queue, copy_skb); } if (!po->stats.tp_drops) status &= ~TP_STATUS_LOSING; spin_unlock(&sk->sk_receive_queue.lock); skb_copy_bits(skb, 0, h.raw + macoff, snaplen); switch (po->tp_version) { case TPACKET_V1: h.h1->tp_len = skb->len; h.h1->tp_snaplen = snaplen; h.h1->tp_mac = macoff; h.h1->tp_net = netoff; if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) && shhwtstamps->syststamp.tv64) tv = ktime_to_timeval(shhwtstamps->syststamp); else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) && shhwtstamps->hwtstamp.tv64) tv = ktime_to_timeval(shhwtstamps->hwtstamp); else if (skb->tstamp.tv64) tv = ktime_to_timeval(skb->tstamp); else do_gettimeofday(&tv); h.h1->tp_sec = tv.tv_sec; h.h1->tp_usec = tv.tv_usec; hdrlen = sizeof(*h.h1); break; case TPACKET_V2: h.h2->tp_len = skb->len; h.h2->tp_snaplen = snaplen; h.h2->tp_mac = macoff; h.h2->tp_net = netoff; if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) && shhwtstamps->syststamp.tv64) ts = ktime_to_timespec(shhwtstamps->syststamp); else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) && shhwtstamps->hwtstamp.tv64) ts = ktime_to_timespec(shhwtstamps->hwtstamp); else if (skb->tstamp.tv64) ts = ktime_to_timespec(skb->tstamp); else getnstimeofday(&ts); h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; if (vlan_tx_tag_present(skb)) { h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); status |= TP_STATUS_VLAN_VALID; } else { h.h2->tp_vlan_tci = 0; } h.h2->tp_padding = 0; hdrlen = sizeof(*h.h2); break; default: BUG(); } sll = h.raw + TPACKET_ALIGN(hdrlen); sll->sll_halen = dev_parse_header(skb, sll->sll_addr); sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; __packet_set_status(po, h.raw, status); smp_mb(); #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 { u8 *start, *end; end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen); for (start = h.raw; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); } #endif sk->sk_data_ready(sk, 0); drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: kfree_skb(skb); return 0; ring_is_full: po->stats.tp_drops++; spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk, 0); kfree_skb(copy_skb); goto drop_n_restore; } static void tpacket_destruct_skb(struct sk_buff *skb) { struct packet_sock *po = pkt_sk(skb->sk); void *ph; BUG_ON(skb == NULL); if (likely(po->tx_ring.pg_vec)) { ph = skb_shinfo(skb)->destructor_arg; BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING); BUG_ON(atomic_read(&po->tx_ring.pending) == 0); atomic_dec(&po->tx_ring.pending); __packet_set_status(po, ph, TP_STATUS_AVAILABLE); } sock_wfree(skb); } static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, int size_max, __be16 proto, unsigned char *addr) { union { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; void *raw; } ph; int to_write, offset, len, tp_len, nr_frags, len_max; struct socket *sock = po->sk.sk_socket; struct page *page; void *data; int err; ph.raw = frame; skb->protocol = proto; skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; skb_shinfo(skb)->destructor_arg = ph.raw; switch (po->tp_version) { case TPACKET_V2: tp_len = ph.h2->tp_len; break; default: tp_len = ph.h1->tp_len; break; } if (unlikely(tp_len > size_max)) { pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); return -EMSGSIZE; } skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); to_write = tp_len; if (sock->type == SOCK_DGRAM) { err = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, tp_len); if (unlikely(err < 0)) return -EINVAL; } else if (dev->hard_header_len) { /* net device doesn't like empty head */ if (unlikely(tp_len <= dev->hard_header_len)) { pr_err("packet size is too short (%d < %d)\n", tp_len, dev->hard_header_len); return -EINVAL; } skb_push(skb, dev->hard_header_len); err = skb_store_bits(skb, 0, data, dev->hard_header_len); if (unlikely(err)) return err; data += dev->hard_header_len; to_write -= dev->hard_header_len; } err = -EFAULT; offset = offset_in_page(data); len_max = PAGE_SIZE - offset; len = ((to_write > len_max) ? len_max : to_write); skb->data_len = to_write; skb->len += to_write; skb->truesize += to_write; atomic_add(to_write, &po->sk.sk_wmem_alloc); while (likely(to_write)) { nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { pr_err("Packet exceed the number of skb frags(%lu)\n", MAX_SKB_FRAGS); return -EFAULT; } page = pgv_to_page(data); data += len; flush_dcache_page(page); get_page(page); skb_fill_page_desc(skb, nr_frags, page, offset, len); to_write -= len; offset = 0; len_max = PAGE_SIZE; len = ((to_write > len_max) ? len_max : to_write); } return tp_len; } static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) { struct sk_buff *skb; struct net_device *dev; __be16 proto; int ifindex, err, reserve = 0; void *ph; struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; int tp_len, size_max; unsigned char *addr; int len_sum = 0; int status = 0; mutex_lock(&po->pg_vec_lock); err = -EBUSY; if (saddr == NULL) { ifindex = po->ifindex; proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; ifindex = saddr->sll_ifindex; proto = saddr->sll_protocol; addr = saddr->sll_addr; } dev = dev_get_by_index(sock_net(&po->sk), ifindex); err = -ENXIO; if (unlikely(dev == NULL)) goto out; reserve = dev->hard_header_len; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_put; size_max = po->tx_ring.frame_size - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); if (size_max > dev->mtu + reserve) size_max = dev->mtu + reserve; do { ph = packet_current_frame(po, &po->tx_ring, TP_STATUS_SEND_REQUEST); if (unlikely(ph == NULL)) { schedule(); continue; } status = TP_STATUS_SEND_REQUEST; skb = sock_alloc_send_skb(&po->sk, LL_ALLOCATED_SPACE(dev) + sizeof(struct sockaddr_ll), 0, &err); if (unlikely(skb == NULL)) goto out_status; tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, addr); if (unlikely(tp_len < 0)) { if (po->tp_loss) { __packet_set_status(po, ph, TP_STATUS_AVAILABLE); packet_increment_head(&po->tx_ring); kfree_skb(skb); continue; } else { status = TP_STATUS_WRONG_FORMAT; err = tp_len; goto out_status; } } skb->destructor = tpacket_destruct_skb; __packet_set_status(po, ph, TP_STATUS_SENDING); atomic_inc(&po->tx_ring.pending); status = TP_STATUS_SEND_REQUEST; err = dev_queue_xmit(skb); if (unlikely(err > 0)) { err = net_xmit_errno(err); if (err && __packet_get_status(po, ph) == TP_STATUS_AVAILABLE) { /* skb was destructed already */ skb = NULL; goto out_status; } /* * skb was dropped but not destructed yet; * let's treat it like congestion or err < 0 */ err = 0; } packet_increment_head(&po->tx_ring); len_sum += tp_len; } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT)) && (atomic_read(&po->tx_ring.pending)))) ); err = len_sum; goto out_put; out_status: __packet_set_status(po, ph, status); kfree_skb(skb); out_put: dev_put(dev); out: mutex_unlock(&po->pg_vec_lock); return err; } static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, size_t reserve, size_t len, size_t linear, int noblock, int *err) { struct sk_buff *skb; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE || !linear) linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, err); if (!skb) return NULL; skb_reserve(skb, reserve); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; struct sk_buff *skb; struct net_device *dev; __be16 proto; unsigned char *addr; int ifindex, err, reserve = 0; struct virtio_net_hdr vnet_hdr = { 0 }; int offset = 0; int vnet_hdr_len; struct packet_sock *po = pkt_sk(sk); unsigned short gso_type = 0; /* * Get and verify the address. */ if (saddr == NULL) { ifindex = po->ifindex; proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; ifindex = saddr->sll_ifindex; proto = saddr->sll_protocol; addr = saddr->sll_addr; } dev = dev_get_by_index(sock_net(sk), ifindex); err = -ENXIO; if (dev == NULL) goto out_unlock; if (sock->type == SOCK_RAW) reserve = dev->hard_header_len; err = -ENETDOWN; if (!(dev->flags & IFF_UP)) goto out_unlock; if (po->has_vnet_hdr) { vnet_hdr_len = sizeof(vnet_hdr); err = -EINVAL; if (len < vnet_hdr_len) goto out_unlock; len -= vnet_hdr_len; err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov, vnet_hdr_len); if (err < 0) goto out_unlock; if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > vnet_hdr.hdr_len)) vnet_hdr.hdr_len = vnet_hdr.csum_start + vnet_hdr.csum_offset + 2; err = -EINVAL; if (vnet_hdr.hdr_len > len) goto out_unlock; if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: gso_type = SKB_GSO_UDP; break; default: goto out_unlock; } if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) gso_type |= SKB_GSO_TCP_ECN; if (vnet_hdr.gso_size == 0) goto out_unlock; } } err = -EMSGSIZE; if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN)) goto out_unlock; err = -ENOBUFS; skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev), LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; skb_set_network_header(skb, reserve); err = -EINVAL; if (sock->type == SOCK_DGRAM && (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0) goto out_free; /* Returns -EFAULT on error */ err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); if (err) goto out_free; err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); if (err < 0) goto out_free; if (!gso_type && (len > dev->mtu + reserve)) { /* Earlier code assumed this would be a VLAN pkt, * double-check this now that we have the actual * packet in hand. */ struct ethhdr *ehdr; skb_reset_mac_header(skb); ehdr = eth_hdr(skb); if (ehdr->h_proto != htons(ETH_P_8021Q)) { err = -EMSGSIZE; goto out_free; } } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; if (po->has_vnet_hdr) { if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { if (!skb_partial_csum_set(skb, vnet_hdr.csum_start, vnet_hdr.csum_offset)) { err = -EINVAL; goto out_free; } } skb_shinfo(skb)->gso_size = vnet_hdr.gso_size; skb_shinfo(skb)->gso_type = gso_type; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; len += vnet_hdr_len; } /* * Now send it */ err = dev_queue_xmit(skb); if (err > 0 && (err = net_xmit_errno(err)) != 0) goto out_unlock; dev_put(dev); return len; out_free: kfree_skb(skb); out_unlock: if (dev) dev_put(dev); out: return err; } static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); if (po->tx_ring.pg_vec) return tpacket_snd(po, msg); else return packet_snd(sock, msg, len); } /* * Close a PACKET socket. This is fairly simple. We immediately go * to 'closed' state and remove our protocol entry in the device list. */ static int packet_release(struct socket *sock) { struct sock *sk = sock->sk; struct packet_sock *po; struct net *net; struct tpacket_req req; if (!sk) return 0; net = sock_net(sk); po = pkt_sk(sk); spin_lock_bh(&net->packet.sklist_lock); sk_del_node_init_rcu(sk); sock_prot_inuse_add(net, sk->sk_prot, -1); spin_unlock_bh(&net->packet.sklist_lock); spin_lock(&po->bind_lock); if (po->running) { /* * Remove from protocol table */ po->running = 0; po->num = 0; __dev_remove_pack(&po->prot_hook); __sock_put(sk); } spin_unlock(&po->bind_lock); packet_flush_mclist(sk); memset(&req, 0, sizeof(req)); if (po->rx_ring.pg_vec) packet_set_ring(sk, &req, 1, 0); if (po->tx_ring.pg_vec) packet_set_ring(sk, &req, 1, 1); synchronize_net(); /* * Now the socket is dead. No more input will appear. */ sock_orphan(sk); sock->sk = NULL; /* Purge queues */ skb_queue_purge(&sk->sk_receive_queue); sk_refcnt_debug_release(sk); sock_put(sk); return 0; } /* * Attach a packet hook. */ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol) { struct packet_sock *po = pkt_sk(sk); /* * Detach an existing hook if present. */ lock_sock(sk); spin_lock(&po->bind_lock); if (po->running) { __sock_put(sk); po->running = 0; po->num = 0; spin_unlock(&po->bind_lock); dev_remove_pack(&po->prot_hook); spin_lock(&po->bind_lock); } po->num = protocol; po->prot_hook.type = protocol; po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; if (protocol == 0) goto out_unlock; if (!dev || (dev->flags & IFF_UP)) { dev_add_pack(&po->prot_hook); sock_hold(sk); po->running = 1; } else { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } out_unlock: spin_unlock(&po->bind_lock); release_sock(sk); return 0; } /* * Bind a packet socket to a device */ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; char name[15]; struct net_device *dev; int err = -ENODEV; /* * Check legality */ if (addr_len != sizeof(struct sockaddr)) return -EINVAL; strlcpy(name, uaddr->sa_data, sizeof(name)); dev = dev_get_by_name(sock_net(sk), name); if (dev) { err = packet_do_bind(sk, dev, pkt_sk(sk)->num); dev_put(dev); } return err; } static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; struct sock *sk = sock->sk; struct net_device *dev = NULL; int err; /* * Check legality */ if (addr_len < sizeof(struct sockaddr_ll)) return -EINVAL; if (sll->sll_family != AF_PACKET) return -EINVAL; if (sll->sll_ifindex) { err = -ENODEV; dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex); if (dev == NULL) goto out; } err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num); if (dev) dev_put(dev); out: return err; } static struct proto packet_proto = { .name = "PACKET", .owner = THIS_MODULE, .obj_size = sizeof(struct packet_sock), }; /* * Create a packet of type SOCK_PACKET. */ static int packet_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct packet_sock *po; __be16 proto = (__force __be16)protocol; /* weird, but documented */ int err; if (!capable(CAP_NET_RAW)) return -EPERM; if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && sock->type != SOCK_PACKET) return -ESOCKTNOSUPPORT; sock->state = SS_UNCONNECTED; err = -ENOBUFS; sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto); if (sk == NULL) goto out; sock->ops = &packet_ops; if (sock->type == SOCK_PACKET) sock->ops = &packet_ops_spkt; sock_init_data(sock, sk); po = pkt_sk(sk); sk->sk_family = PF_PACKET; po->num = proto; sk->sk_destruct = packet_sock_destruct; sk_refcnt_debug_inc(sk); /* * Attach a protocol block */ spin_lock_init(&po->bind_lock); mutex_init(&po->pg_vec_lock); po->prot_hook.func = packet_rcv; if (sock->type == SOCK_PACKET) po->prot_hook.func = packet_rcv_spkt; po->prot_hook.af_packet_priv = sk; if (proto) { po->prot_hook.type = proto; dev_add_pack(&po->prot_hook); sock_hold(sk); po->running = 1; } spin_lock_bh(&net->packet.sklist_lock); sk_add_node_rcu(sk, &net->packet.sklist); sock_prot_inuse_add(net, &packet_proto, 1); spin_unlock_bh(&net->packet.sklist_lock); return 0; out: return err; } static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len) { struct sock_exterr_skb *serr; struct sk_buff *skb, *skb2; int copied, err; err = -EAGAIN; skb = skb_dequeue(&sk->sk_error_queue); if (skb == NULL) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free_skb; sock_recv_timestamp(msg, sk, skb); serr = SKB_EXT_ERR(skb); put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP, sizeof(serr->ee), &serr->ee); msg->msg_flags |= MSG_ERRQUEUE; err = copied; /* Reset and regenerate socket error */ spin_lock_bh(&sk->sk_error_queue.lock); sk->sk_err = 0; if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; spin_unlock_bh(&sk->sk_error_queue.lock); sk->sk_error_report(sk); } else spin_unlock_bh(&sk->sk_error_queue.lock); out_free_skb: kfree_skb(skb); out: return err; } /* * Pull a packet from our receive queue and hand it to the user. * If necessary we block. */ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; struct sockaddr_ll *sll; int vnet_hdr_len = 0; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) goto out; #if 0 /* What error should we return now? EUNATTACH? */ if (pkt_sk(sk)->ifindex < 0) return -ENODEV; #endif if (flags & MSG_ERRQUEUE) { err = packet_recv_error(sk, msg, len); goto out; } /* * Call the generic datagram receiver. This handles all sorts * of horrible races and re-entrancy so we can forget about it * in the protocol layers. * * Now it will return ENETDOWN, if device have just gone down, * but then it will block. */ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); /* * An error occurred so return it. Because skb_recv_datagram() * handles the blocking we don't see and worry about blocking * retries. */ if (skb == NULL) goto out; if (pkt_sk(sk)->has_vnet_hdr) { struct virtio_net_hdr vnet_hdr = { 0 }; err = -EINVAL; vnet_hdr_len = sizeof(vnet_hdr); if (len < vnet_hdr_len) goto out_free; len -= vnet_hdr_len; if (skb_is_gso(skb)) { struct skb_shared_info *sinfo = skb_shinfo(skb); /* This is a hint as to how much should be linear. */ vnet_hdr.hdr_len = skb_headlen(skb); vnet_hdr.gso_size = sinfo->gso_size; if (sinfo->gso_type & SKB_GSO_TCPV4) vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (sinfo->gso_type & SKB_GSO_UDP) vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else if (sinfo->gso_type & SKB_GSO_FCOE) goto out_free; else BUG(); if (sinfo->gso_type & SKB_GSO_TCP_ECN) vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; if (skb->ip_summed == CHECKSUM_PARTIAL) { vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; vnet_hdr.csum_start = skb_checksum_start_offset(skb); vnet_hdr.csum_offset = skb->csum_offset; } /* else everything is zero */ err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr, vnet_hdr_len); if (err < 0) goto out_free; } /* * If the address length field is there to be filled in, we fill * it in now. */ sll = &PACKET_SKB_CB(skb)->sa.ll; if (sock->type == SOCK_PACKET) msg->msg_namelen = sizeof(struct sockaddr_pkt); else msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); /* * You lose any data beyond the buffer you gave. If it worries a * user program they can ask the device for its MTU anyway. */ copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, msg->msg_namelen); if (pkt_sk(sk)->auxdata) { struct tpacket_auxdata aux; aux.tp_status = TP_STATUS_USER; if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; aux.tp_len = PACKET_SKB_CB(skb)->origlen; aux.tp_snaplen = skb->len; aux.tp_mac = 0; aux.tp_net = skb_network_offset(skb); if (vlan_tx_tag_present(skb)) { aux.tp_vlan_tci = vlan_tx_tag_get(skb); aux.tp_status |= TP_STATUS_VLAN_VALID; } else { aux.tp_vlan_tci = 0; } aux.tp_padding = 0; put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } /* * Free or return the buffer as appropriate. Again this * hides all the races and re-entrancy issues from us. */ err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); out_free: skb_free_datagram(sk, skb); out: return err; } static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; if (peer) return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) strncpy(uaddr->sa_data, dev->name, 14); else memset(uaddr->sa_data, 0, 14); rcu_read_unlock(); *uaddr_len = sizeof(*uaddr); return 0; } static int packet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); if (peer) return -EOPNOTSUPP; sll->sll_family = AF_PACKET; sll->sll_ifindex = po->ifindex; sll->sll_protocol = po->num; sll->sll_pkttype = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); if (dev) { sll->sll_hatype = dev->type; sll->sll_halen = dev->addr_len; memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); } else { sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ sll->sll_halen = 0; } rcu_read_unlock(); *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; return 0; } static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what) { switch (i->type) { case PACKET_MR_MULTICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_mc_add(dev, i->addr); else return dev_mc_del(dev, i->addr); break; case PACKET_MR_PROMISC: return dev_set_promiscuity(dev, what); break; case PACKET_MR_ALLMULTI: return dev_set_allmulti(dev, what); break; case PACKET_MR_UNICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_uc_add(dev, i->addr); else return dev_uc_del(dev, i->addr); break; default: break; } return 0; } static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) { for ( ; i; i = i->next) { if (i->ifindex == dev->ifindex) packet_dev_mc(dev, i, what); } } static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml, *i; struct net_device *dev; int err; rtnl_lock(); err = -ENODEV; dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); if (!dev) goto done; err = -EINVAL; if (mreq->mr_alen > dev->addr_len) goto done; err = -ENOBUFS; i = kmalloc(sizeof(*i), GFP_KERNEL); if (i == NULL) goto done; err = 0; for (ml = po->mclist; ml; ml = ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { ml->count++; /* Free the new element ... */ kfree(i); goto done; } } i->type = mreq->mr_type; i->ifindex = mreq->mr_ifindex; i->alen = mreq->mr_alen; memcpy(i->addr, mreq->mr_address, i->alen); i->count = 1; i->next = po->mclist; po->mclist = i; err = packet_dev_mc(dev, i, 1); if (err) { po->mclist = i->next; kfree(i); } done: rtnl_unlock(); return err; } static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_mclist *ml, **mlp; rtnl_lock(); for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { if (--ml->count == 0) { struct net_device *dev; *mlp = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev) packet_dev_mc(dev, ml, -1); kfree(ml); } rtnl_unlock(); return 0; } } rtnl_unlock(); return -EADDRNOTAVAIL; } static void packet_flush_mclist(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml; if (!po->mclist) return; rtnl_lock(); while ((ml = po->mclist) != NULL) { struct net_device *dev; po->mclist = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev != NULL) packet_dev_mc(dev, ml, -1); kfree(ml); } rtnl_unlock(); } static int packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); int ret; if (level != SOL_PACKET) return -ENOPROTOOPT; switch (optname) { case PACKET_ADD_MEMBERSHIP: case PACKET_DROP_MEMBERSHIP: { struct packet_mreq_max mreq; int len = optlen; memset(&mreq, 0, sizeof(mreq)); if (len < sizeof(struct packet_mreq)) return -EINVAL; if (len > sizeof(mreq)) len = sizeof(mreq); if (copy_from_user(&mreq, optval, len)) return -EFAULT; if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) return -EINVAL; if (optname == PACKET_ADD_MEMBERSHIP) ret = packet_mc_add(sk, &mreq); else ret = packet_mc_drop(sk, &mreq); return ret; } case PACKET_RX_RING: case PACKET_TX_RING: { struct tpacket_req req; if (optlen < sizeof(req)) return -EINVAL; if (pkt_sk(sk)->has_vnet_hdr) return -EINVAL; if (copy_from_user(&req, optval, sizeof(req))) return -EFAULT; return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING); } case PACKET_COPY_THRESH: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; pkt_sk(sk)->copy_thresh = val; return 0; } case PACKET_VERSION: { int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: case TPACKET_V2: po->tp_version = val; return 0; default: return -EINVAL; } } case PACKET_RESERVE: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_reserve = val; return 0; } case PACKET_LOSS: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_loss = !!val; return 0; } case PACKET_AUXDATA: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->auxdata = !!val; return 0; } case PACKET_ORIGDEV: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->origdev = !!val; return 0; } case PACKET_VNET_HDR: { int val; if (sock->type != SOCK_RAW) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->has_vnet_hdr = !!val; return 0; } case PACKET_TIMESTAMP: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tstamp = val; return 0; } default: return -ENOPROTOOPT; } } static int packet_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { int len; int val; struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); void *data; struct tpacket_stats st; if (level != SOL_PACKET) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case PACKET_STATISTICS: if (len > sizeof(struct tpacket_stats)) len = sizeof(struct tpacket_stats); spin_lock_bh(&sk->sk_receive_queue.lock); st = po->stats; memset(&po->stats, 0, sizeof(st)); spin_unlock_bh(&sk->sk_receive_queue.lock); st.tp_packets += st.tp_drops; data = &st; break; case PACKET_AUXDATA: if (len > sizeof(int)) len = sizeof(int); val = po->auxdata; data = &val; break; case PACKET_ORIGDEV: if (len > sizeof(int)) len = sizeof(int); val = po->origdev; data = &val; break; case PACKET_VNET_HDR: if (len > sizeof(int)) len = sizeof(int); val = po->has_vnet_hdr; data = &val; break; case PACKET_VERSION: if (len > sizeof(int)) len = sizeof(int); val = po->tp_version; data = &val; break; case PACKET_HDRLEN: if (len > sizeof(int)) len = sizeof(int); if (copy_from_user(&val, optval, len)) return -EFAULT; switch (val) { case TPACKET_V1: val = sizeof(struct tpacket_hdr); break; case TPACKET_V2: val = sizeof(struct tpacket2_hdr); break; default: return -EINVAL; } data = &val; break; case PACKET_RESERVE: if (len > sizeof(unsigned int)) len = sizeof(unsigned int); val = po->tp_reserve; data = &val; break; case PACKET_LOSS: if (len > sizeof(unsigned int)) len = sizeof(unsigned int); val = po->tp_loss; data = &val; break; case PACKET_TIMESTAMP: if (len > sizeof(int)) len = sizeof(int); val = po->tp_tstamp; data = &val; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, data, len)) return -EFAULT; return 0; } static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) { struct sock *sk; struct hlist_node *node; struct net_device *dev = data; struct net *net = dev_net(dev); rcu_read_lock(); sk_for_each_rcu(sk, node, &net->packet.sklist) { struct packet_sock *po = pkt_sk(sk); switch (msg) { case NETDEV_UNREGISTER: if (po->mclist) packet_dev_mclist(dev, po->mclist, -1); /* fallthrough */ case NETDEV_DOWN: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->running) { __dev_remove_pack(&po->prot_hook); __sock_put(sk); po->running = 0; sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } if (msg == NETDEV_UNREGISTER) { po->ifindex = -1; po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); } break; case NETDEV_UP: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->num && !po->running) { dev_add_pack(&po->prot_hook); sock_hold(sk); po->running = 1; } spin_unlock(&po->bind_lock); } break; } } rcu_read_unlock(); return NOTIFY_DONE; } static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) amount = skb->len; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *)arg); #ifdef CONFIG_INET case SIOCADDRT: case SIOCDELRT: case SIOCDARP: case SIOCGARP: case SIOCSARP: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCSIFFLAGS: return inet_dgram_ops.ioctl(sock, cmd, arg); #endif default: return -ENOIOCTLCMD; } return 0; } static unsigned int packet_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned int mask = datagram_poll(file, sock, wait); spin_lock_bh(&sk->sk_receive_queue.lock); if (po->rx_ring.pg_vec) { if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL)) mask |= POLLIN | POLLRDNORM; } spin_unlock_bh(&sk->sk_receive_queue.lock); spin_lock_bh(&sk->sk_write_queue.lock); if (po->tx_ring.pg_vec) { if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) mask |= POLLOUT | POLLWRNORM; } spin_unlock_bh(&sk->sk_write_queue.lock); return mask; } /* Dirty? Well, I still did not learn better way to account * for user mmaps. */ static void packet_mm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_inc(&pkt_sk(sk)->mapped); } static void packet_mm_close(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_dec(&pkt_sk(sk)->mapped); } static const struct vm_operations_struct packet_mmap_ops = { .open = packet_mm_open, .close = packet_mm_close, }; static void free_pg_vec(struct pgv *pg_vec, unsigned int order, unsigned int len) { int i; for (i = 0; i < len; i++) { if (likely(pg_vec[i].buffer)) { if (is_vmalloc_addr(pg_vec[i].buffer)) vfree(pg_vec[i].buffer); else free_pages((unsigned long)pg_vec[i].buffer, order); pg_vec[i].buffer = NULL; } } kfree(pg_vec); } static inline char *alloc_one_pg_vec_page(unsigned long order) { char *buffer = NULL; gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* * __get_free_pages failed, fall back to vmalloc */ buffer = vzalloc((1 << order) * PAGE_SIZE); if (buffer) return buffer; /* * vmalloc failed, lets dig into swap here */ gfp_flags &= ~__GFP_NORETRY; buffer = (char *)__get_free_pages(gfp_flags, order); if (buffer) return buffer; /* * complete and utter failure */ return NULL; } static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) { unsigned int block_nr = req->tp_block_nr; struct pgv *pg_vec; int i; pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); if (unlikely(!pg_vec)) goto out; for (i = 0; i < block_nr; i++) { pg_vec[i].buffer = alloc_one_pg_vec_page(order); if (unlikely(!pg_vec[i].buffer)) goto out_free_pgvec; } out: return pg_vec; out_free_pgvec: free_pg_vec(pg_vec, order, block_nr); pg_vec = NULL; goto out; } static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing, int tx_ring) { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; __be16 num; int err; rb = tx_ring ? &po->tx_ring : &po->rx_ring; rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; err = -EBUSY; if (!closing) { if (atomic_read(&po->mapped)) goto out; if (atomic_read(&rb->pending)) goto out; } if (req->tp_block_nr) { /* Sanity tests and some calculations */ err = -EBUSY; if (unlikely(rb->pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V1: po->tp_hdrlen = TPACKET_HDRLEN; break; case TPACKET_V2: po->tp_hdrlen = TPACKET2_HDRLEN; break; } err = -EINVAL; if (unlikely((int)req->tp_block_size <= 0)) goto out; if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) goto out; if (unlikely(req->tp_frame_size < po->tp_hdrlen + po->tp_reserve)) goto out; if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) goto out; rb->frames_per_block = req->tp_block_size/req->tp_frame_size; if (unlikely(rb->frames_per_block <= 0)) goto out; if (unlikely((rb->frames_per_block * req->tp_block_nr) != req->tp_frame_nr)) goto out; err = -ENOMEM; order = get_order(req->tp_block_size); pg_vec = alloc_pg_vec(req, order); if (unlikely(!pg_vec)) goto out; } /* Done */ else { err = -EINVAL; if (unlikely(req->tp_frame_nr)) goto out; } lock_sock(sk); /* Detach socket from network */ spin_lock(&po->bind_lock); was_running = po->running; num = po->num; if (was_running) { __dev_remove_pack(&po->prot_hook); po->num = 0; po->running = 0; __sock_put(sk); } spin_unlock(&po->bind_lock); synchronize_net(); err = -EBUSY; mutex_lock(&po->pg_vec_lock); if (closing || atomic_read(&po->mapped) == 0) { err = 0; spin_lock_bh(&rb_queue->lock); swap(rb->pg_vec, pg_vec); rb->frame_max = (req->tp_frame_nr - 1); rb->head = 0; rb->frame_size = req->tp_frame_size; spin_unlock_bh(&rb_queue->lock); swap(rb->pg_vec_order, order); swap(rb->pg_vec_len, req->tp_block_nr); rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; po->prot_hook.func = (po->rx_ring.pg_vec) ? tpacket_rcv : packet_rcv; skb_queue_purge(rb_queue); if (atomic_read(&po->mapped)) pr_err("packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); } mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); if (was_running && !po->running) { sock_hold(sk); po->running = 1; po->num = num; dev_add_pack(&po->prot_hook); } spin_unlock(&po->bind_lock); release_sock(sk); if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: return err; } static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned long size, expected_size; struct packet_ring_buffer *rb; unsigned long start; int err = -EINVAL; int i; if (vma->vm_pgoff) return -EINVAL; mutex_lock(&po->pg_vec_lock); expected_size = 0; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec) { expected_size += rb->pg_vec_len * rb->pg_vec_pages * PAGE_SIZE; } } if (expected_size == 0) goto out; size = vma->vm_end - vma->vm_start; if (size != expected_size) goto out; start = vma->vm_start; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec == NULL) continue; for (i = 0; i < rb->pg_vec_len; i++) { struct page *page; void *kaddr = rb->pg_vec[i].buffer; int pg_num; for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { page = pgv_to_page(kaddr); err = vm_insert_page(vma, start, page); if (unlikely(err)) goto out; start += PAGE_SIZE; kaddr += PAGE_SIZE; } } } atomic_inc(&po->mapped); vma->vm_ops = &packet_mmap_ops; err = 0; out: mutex_unlock(&po->pg_vec_lock); return err; } static const struct proto_ops packet_ops_spkt = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind_spkt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname_spkt, .poll = datagram_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = packet_sendmsg_spkt, .recvmsg = packet_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops packet_ops = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname, .poll = packet_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = packet_setsockopt, .getsockopt = packet_getsockopt, .sendmsg = packet_sendmsg, .recvmsg = packet_recvmsg, .mmap = packet_mmap, .sendpage = sock_no_sendpage, }; static const struct net_proto_family packet_family_ops = { .family = PF_PACKET, .create = packet_create, .owner = THIS_MODULE, }; static struct notifier_block packet_netdev_notifier = { .notifier_call = packet_notifier, }; #ifdef CONFIG_PROC_FS static void *packet_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct net *net = seq_file_net(seq); rcu_read_lock(); return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); } static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net *net = seq_file_net(seq); return seq_hlist_next_rcu(v, &net->packet.sklist, pos); } static void packet_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int packet_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); else { struct sock *s = sk_entry(v); const struct packet_sock *po = pkt_sk(s); seq_printf(seq, "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", s, atomic_read(&s->sk_refcnt), s->sk_type, ntohs(po->num), po->ifindex, po->running, atomic_read(&s->sk_rmem_alloc), sock_i_uid(s), sock_i_ino(s)); } return 0; } static const struct seq_operations packet_seq_ops = { .start = packet_seq_start, .next = packet_seq_next, .stop = packet_seq_stop, .show = packet_seq_show, }; static int packet_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &packet_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations packet_seq_fops = { .owner = THIS_MODULE, .open = packet_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif static int __net_init packet_net_init(struct net *net) { spin_lock_init(&net->packet.sklist_lock); INIT_HLIST_HEAD(&net->packet.sklist); if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops)) return -ENOMEM; return 0; } static void __net_exit packet_net_exit(struct net *net) { proc_net_remove(net, "packet"); } static struct pernet_operations packet_net_ops = { .init = packet_net_init, .exit = packet_net_exit, }; static void __exit packet_exit(void) { unregister_netdevice_notifier(&packet_netdev_notifier); unregister_pernet_subsys(&packet_net_ops); sock_unregister(PF_PACKET); proto_unregister(&packet_proto); } static int __init packet_init(void) { int rc = proto_register(&packet_proto, 0); if (rc != 0) goto out; sock_register(&packet_family_ops); register_pernet_subsys(&packet_net_ops); register_netdevice_notifier(&packet_netdev_notifier); out: return rc; } module_init(packet_init); module_exit(packet_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_PACKET);
./CrossVul/dataset_final_sorted/CWE-264/c/good_3482_1
crossvul-cpp_data_bad_2073_0
/* * linux/drivers/block/floppy.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1993, 1994 Alain Knaff * Copyright (C) 1998 Alan Cox */ /* * 02.12.91 - Changed to static variables to indicate need for reset * and recalibrate. This makes some things easier (output_byte reset * checking etc), and means less interrupt jumping in case of errors, * so the code is hopefully easier to understand. */ /* * This file is certainly a mess. I've tried my best to get it working, * but I don't like programming floppies, and I have only one anyway. * Urgel. I should check for more errors, and do more graceful error * recovery. Seems there are problems with several drives. I've tried to * correct them. No promises. */ /* * As with hd.c, all routines within this file can (and will) be called * by interrupts, so extreme caution is needed. A hardware interrupt * handler may not sleep, or a kernel panic will happen. Thus I cannot * call "floppy-on" directly, but have to set a special timer interrupt * etc. */ /* * 28.02.92 - made track-buffering routines, based on the routines written * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus. */ /* * Automatic floppy-detection and formatting written by Werner Almesberger * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with * the floppy-change signal detection. */ /* * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed * FDC data overrun bug, added some preliminary stuff for vertical * recording support. * * 1992/9/17: Added DMA allocation & DMA functions. -- hhb. * * TODO: Errors are still not counted properly. */ /* 1992/9/20 * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl) * modeled after the freeware MS-DOS program fdformat/88 V1.8 by * Christoph H. Hochst\"atter. * I have fixed the shift values to the ones I always use. Maybe a new * ioctl() should be created to be able to modify them. * There is a bug in the driver that makes it impossible to format a * floppy as the first thing after bootup. */ /* * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and * this helped the floppy driver as well. Much cleaner, and still seems to * work. */ /* 1994/6/24 --bbroad-- added the floppy table entries and made * minor modifications to allow 2.88 floppies to be run. */ /* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more * disk types. */ /* * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger * format bug fixes, but unfortunately some new bugs too... */ /* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write * errors to allow safe writing by specialized programs. */ /* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks * by defining bit 1 of the "stretch" parameter to mean put sectors on the * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's * drives are "upside-down"). */ /* * 1995/8/26 -- Andreas Busse -- added Mips support. */ /* * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent * features to asm/floppy.h. */ /* * 1998/1/21 -- Richard Gooch <rgooch@atnf.csiro.au> -- devfs support */ /* * 1998/05/07 -- Russell King -- More portability cleanups; moved definition of * interrupt and dma channel to asm/floppy.h. Cleaned up some formatting & * use of '0' for NULL. */ /* * 1998/06/07 -- Alan Cox -- Merged the 2.0.34 fixes for resource allocation * failures. */ /* * 1998/09/20 -- David Weinehall -- Added slow-down code for buggy PS/2-drives. */ /* * 1999/08/13 -- Paul Slootman -- floppy stopped working on Alpha after 24 * days, 6 hours, 32 minutes and 32 seconds (i.e. MAXINT jiffies; ints were * being used to store jiffies, which are unsigned longs). */ /* * 2000/08/28 -- Arnaldo Carvalho de Melo <acme@conectiva.com.br> * - get rid of check_region * - s/suser/capable/ */ /* * 2001/08/26 -- Paul Gortmaker - fix insmod oops on machines with no * floppy controller (lingering task on list after module is gone... boom.) */ /* * 2002/02/07 -- Anton Altaparmakov - Fix io ports reservation to correct range * (0x3f2-0x3f5, 0x3f7). This fix is a bit of a hack but the proper fix * requires many non-obvious changes in arch dependent code. */ /* 2003/07/28 -- Daniele Bellucci <bellucda@tiscali.it>. * Better audit of register_blkdev. */ #undef FLOPPY_SILENT_DCL_CLEAR #define REALLY_SLOW_IO #define DEBUGT 2 #define DPRINT(format, args...) \ pr_info("floppy%d: " format, current_drive, ##args) #define DCL_DEBUG /* debug disk change line */ #ifdef DCL_DEBUG #define debug_dcl(test, fmt, args...) \ do { if ((test) & FD_DEBUG) DPRINT(fmt, ##args); } while (0) #else #define debug_dcl(test, fmt, args...) \ do { if (0) DPRINT(fmt, ##args); } while (0) #endif /* do print messages for unexpected interrupts */ static int print_unex = 1; #include <linux/module.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/workqueue.h> #define FDPATCHES #include <linux/fdreg.h> #include <linux/fd.h> #include <linux/hdreg.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/bio.h> #include <linux/string.h> #include <linux/jiffies.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/mc146818rtc.h> /* CMOS defines */ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mod_devicetable.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/async.h> /* * PS/2 floppies have much slower step rates than regular floppies. * It's been recommended that take about 1/4 of the default speed * in some more extreme cases. */ static DEFINE_MUTEX(floppy_mutex); static int slow_floppy; #include <asm/dma.h> #include <asm/irq.h> static int FLOPPY_IRQ = 6; static int FLOPPY_DMA = 2; static int can_use_virtual_dma = 2; /* ======= * can use virtual DMA: * 0 = use of virtual DMA disallowed by config * 1 = use of virtual DMA prescribed by config * 2 = no virtual DMA preference configured. By default try hard DMA, * but fall back on virtual DMA when not enough memory available */ static int use_virtual_dma; /* ======= * use virtual DMA * 0 using hard DMA * 1 using virtual DMA * This variable is set to virtual when a DMA mem problem arises, and * reset back in floppy_grab_irq_and_dma. * It is not safe to reset it in other circumstances, because the floppy * driver may have several buffers in use at once, and we do currently not * record each buffers capabilities */ static DEFINE_SPINLOCK(floppy_lock); static unsigned short virtual_dma_port = 0x3f0; irqreturn_t floppy_interrupt(int irq, void *dev_id); static int set_dor(int fdc, char mask, char data); #define K_64 0x10000 /* 64KB */ /* the following is the mask of allowed drives. By default units 2 and * 3 of both floppy controllers are disabled, because switching on the * motor of these drives causes system hangs on some PCI computers. drive * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if * a drive is allowed. * * NOTE: This must come before we include the arch floppy header because * some ports reference this variable from there. -DaveM */ static int allowed_drive_mask = 0x33; #include <asm/floppy.h> static int irqdma_allocated; #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/cdrom.h> /* for the compatibility eject ioctl */ #include <linux/completion.h> static struct request *current_req; static void do_fd_request(struct request_queue *q); static int set_next_request(void); #ifndef fd_get_dma_residue #define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA) #endif /* Dma Memory related stuff */ #ifndef fd_dma_mem_free #define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size)) #endif #ifndef fd_dma_mem_alloc #define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size)) #endif static inline void fallback_on_nodma_alloc(char **addr, size_t l) { #ifdef FLOPPY_CAN_FALLBACK_ON_NODMA if (*addr) return; /* we have the memory */ if (can_use_virtual_dma != 2) return; /* no fallback allowed */ pr_info("DMA memory shortage. Temporarily falling back on virtual DMA\n"); *addr = (char *)nodma_mem_alloc(l); #else return; #endif } /* End dma memory related stuff */ static unsigned long fake_change; static bool initialized; #define ITYPE(x) (((x) >> 2) & 0x1f) #define TOMINOR(x) ((x & 3) | ((x & 4) << 5)) #define UNIT(x) ((x) & 0x03) /* drive on fdc */ #define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */ /* reverse mapping from unit and fdc to drive */ #define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2)) #define DP (&drive_params[current_drive]) #define DRS (&drive_state[current_drive]) #define DRWE (&write_errors[current_drive]) #define FDCS (&fdc_state[fdc]) #define UDP (&drive_params[drive]) #define UDRS (&drive_state[drive]) #define UDRWE (&write_errors[drive]) #define UFDCS (&fdc_state[FDC(drive)]) #define PH_HEAD(floppy, head) (((((floppy)->stretch & 2) >> 1) ^ head) << 2) #define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH) /* read/write */ #define COMMAND (raw_cmd->cmd[0]) #define DR_SELECT (raw_cmd->cmd[1]) #define TRACK (raw_cmd->cmd[2]) #define HEAD (raw_cmd->cmd[3]) #define SECTOR (raw_cmd->cmd[4]) #define SIZECODE (raw_cmd->cmd[5]) #define SECT_PER_TRACK (raw_cmd->cmd[6]) #define GAP (raw_cmd->cmd[7]) #define SIZECODE2 (raw_cmd->cmd[8]) #define NR_RW 9 /* format */ #define F_SIZECODE (raw_cmd->cmd[2]) #define F_SECT_PER_TRACK (raw_cmd->cmd[3]) #define F_GAP (raw_cmd->cmd[4]) #define F_FILL (raw_cmd->cmd[5]) #define NR_F 6 /* * Maximum disk size (in kilobytes). * This default is used whenever the current disk size is unknown. * [Now it is rather a minimum] */ #define MAX_DISK_SIZE 4 /* 3984 */ /* * globals used by 'result()' */ #define MAX_REPLIES 16 static unsigned char reply_buffer[MAX_REPLIES]; static int inr; /* size of reply buffer, when called from interrupt */ #define ST0 (reply_buffer[0]) #define ST1 (reply_buffer[1]) #define ST2 (reply_buffer[2]) #define ST3 (reply_buffer[0]) /* result of GETSTATUS */ #define R_TRACK (reply_buffer[3]) #define R_HEAD (reply_buffer[4]) #define R_SECTOR (reply_buffer[5]) #define R_SIZECODE (reply_buffer[6]) #define SEL_DLY (2 * HZ / 100) /* * this struct defines the different floppy drive types. */ static struct { struct floppy_drive_params params; const char *name; /* name printed while booting */ } default_drive_params[] = { /* NOTE: the time values in jiffies should be in msec! CMOS drive type | Maximum data rate supported by drive type | | Head load time, msec | | | Head unload time, msec (not used) | | | | Step rate interval, usec | | | | | Time needed for spinup time (jiffies) | | | | | | Timeout for spinning down (jiffies) | | | | | | | Spindown offset (where disk stops) | | | | | | | | Select delay | | | | | | | | | RPS | | | | | | | | | | Max number of tracks | | | | | | | | | | | Interrupt timeout | | | | | | | | | | | | Max nonintlv. sectors | | | | | | | | | | | | | -Max Errors- flags */ {{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0, 0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" }, {{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0, 0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/ {{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0, 0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/ {{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0, 0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/ {{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0, 0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/ {{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0, 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/ {{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0, 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/ /* | --autodetected formats--- | | | * read_track | | Name printed when booting * | Native format * Frequency of disk change checks */ }; static struct floppy_drive_params drive_params[N_DRIVE]; static struct floppy_drive_struct drive_state[N_DRIVE]; static struct floppy_write_errors write_errors[N_DRIVE]; static struct timer_list motor_off_timer[N_DRIVE]; static struct gendisk *disks[N_DRIVE]; static struct block_device *opened_bdev[N_DRIVE]; static DEFINE_MUTEX(open_lock); static struct floppy_raw_cmd *raw_cmd, default_raw_cmd; static int fdc_queue; /* * This struct defines the different floppy types. * * Bit 0 of 'stretch' tells if the tracks need to be doubled for some * types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch' * tells if the disk is in Commodore 1581 format, which means side 0 sectors * are located on side 1 of the disk but with a side 0 ID, and vice-versa. * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical * side 0 is on physical side 0 (but with the misnamed sector IDs). * 'stretch' should probably be renamed to something more general, like * 'options'. * * Bits 2 through 9 of 'stretch' tell the number of the first sector. * The LSB (bit 2) is flipped. For most disks, the first sector * is 1 (represented by 0x00<<2). For some CP/M and music sampler * disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2). * For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2). * * Other parameters should be self-explanatory (see also setfdprm(8)). */ /* Size | Sectors per track | | Head | | | Tracks | | | | Stretch | | | | | Gap 1 size | | | | | | Data rate, | 0x40 for perp | | | | | | | Spec1 (stepping rate, head unload | | | | | | | | /fmt gap (gap2) */ static struct floppy_struct floppy_type[32] = { { 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */ { 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */ { 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */ { 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */ { 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */ { 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */ { 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */ { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */ { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */ { 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120" }, /* 9 3.12MB 3.5" */ { 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */ { 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */ { 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */ { 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */ { 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */ { 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */ { 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */ { 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */ { 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */ { 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */ { 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */ { 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */ { 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */ { 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */ { 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */ { 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */ { 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */ { 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */ { 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */ { 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */ { 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */ { 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */ }; #define SECTSIZE (_FD_SECTSIZE(*floppy)) /* Auto-detection: Disk type used until the next media change occurs. */ static struct floppy_struct *current_type[N_DRIVE]; /* * User-provided type information. current_type points to * the respective entry of this array. */ static struct floppy_struct user_params[N_DRIVE]; static sector_t floppy_sizes[256]; static char floppy_device_name[] = "floppy"; /* * The driver is trying to determine the correct media format * while probing is set. rw_interrupt() clears it after a * successful access. */ static int probing; /* Synchronization of FDC access. */ #define FD_COMMAND_NONE -1 #define FD_COMMAND_ERROR 2 #define FD_COMMAND_OKAY 3 static volatile int command_status = FD_COMMAND_NONE; static unsigned long fdc_busy; static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); static DECLARE_WAIT_QUEUE_HEAD(command_done); /* Errors during formatting are counted here. */ static int format_errors; /* Format request descriptor. */ static struct format_descr format_req; /* * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc), * H is head unload time (1=16ms, 2=32ms, etc) */ /* * Track buffer * Because these are written to by the DMA controller, they must * not contain a 64k byte boundary crossing, or data will be * corrupted/lost. */ static char *floppy_track_buffer; static int max_buffer_sectors; static int *errors; typedef void (*done_f)(int); static const struct cont_t { void (*interrupt)(void); /* this is called after the interrupt of the * main command */ void (*redo)(void); /* this is called to retry the operation */ void (*error)(void); /* this is called to tally an error */ done_f done; /* this is called to say if the operation has * succeeded/failed */ } *cont; static void floppy_ready(void); static void floppy_start(void); static void process_fd_request(void); static void recalibrate_floppy(void); static void floppy_shutdown(struct work_struct *); static int floppy_request_regions(int); static void floppy_release_regions(int); static int floppy_grab_irq_and_dma(void); static void floppy_release_irq_and_dma(void); /* * The "reset" variable should be tested whenever an interrupt is scheduled, * after the commands have been sent. This is to ensure that the driver doesn't * get wedged when the interrupt doesn't come because of a failed command. * reset doesn't need to be tested before sending commands, because * output_byte is automatically disabled when reset is set. */ static void reset_fdc(void); /* * These are global variables, as that's the easiest way to give * information to interrupts. They are the data used for the current * request. */ #define NO_TRACK -1 #define NEED_1_RECAL -2 #define NEED_2_RECAL -3 static atomic_t usage_count = ATOMIC_INIT(0); /* buffer related variables */ static int buffer_track = -1; static int buffer_drive = -1; static int buffer_min = -1; static int buffer_max = -1; /* fdc related variables, should end up in a struct */ static struct floppy_fdc_state fdc_state[N_FDC]; static int fdc; /* current fdc */ static struct workqueue_struct *floppy_wq; static struct floppy_struct *_floppy = floppy_type; static unsigned char current_drive; static long current_count_sectors; static unsigned char fsector_t; /* sector in track */ static unsigned char in_sector_offset; /* offset within physical sector, * expressed in units of 512 bytes */ static inline bool drive_no_geom(int drive) { return !current_type[drive] && !ITYPE(UDRS->fd_device); } #ifndef fd_eject static inline int fd_eject(int drive) { return -EINVAL; } #endif /* * Debugging * ========= */ #ifdef DEBUGT static long unsigned debugtimer; static inline void set_debugt(void) { debugtimer = jiffies; } static inline void debugt(const char *func, const char *msg) { if (DP->flags & DEBUGT) pr_info("%s:%s dtime=%lu\n", func, msg, jiffies - debugtimer); } #else static inline void set_debugt(void) { } static inline void debugt(const char *func, const char *msg) { } #endif /* DEBUGT */ static DECLARE_DELAYED_WORK(fd_timeout, floppy_shutdown); static const char *timeout_message; static void is_alive(const char *func, const char *message) { /* this routine checks whether the floppy driver is "alive" */ if (test_bit(0, &fdc_busy) && command_status < 2 && !delayed_work_pending(&fd_timeout)) { DPRINT("%s: timeout handler died. %s\n", func, message); } } static void (*do_floppy)(void) = NULL; #define OLOGSIZE 20 static void (*lasthandler)(void); static unsigned long interruptjiffies; static unsigned long resultjiffies; static int resultsize; static unsigned long lastredo; static struct output_log { unsigned char data; unsigned char status; unsigned long jiffies; } output_log[OLOGSIZE]; static int output_log_pos; #define current_reqD -1 #define MAXTIMEOUT -2 static void __reschedule_timeout(int drive, const char *message) { unsigned long delay; if (drive == current_reqD) drive = current_drive; if (drive < 0 || drive >= N_DRIVE) { delay = 20UL * HZ; drive = 0; } else delay = UDP->timeout; mod_delayed_work(floppy_wq, &fd_timeout, delay); if (UDP->flags & FD_DEBUG) DPRINT("reschedule timeout %s\n", message); timeout_message = message; } static void reschedule_timeout(int drive, const char *message) { unsigned long flags; spin_lock_irqsave(&floppy_lock, flags); __reschedule_timeout(drive, message); spin_unlock_irqrestore(&floppy_lock, flags); } #define INFBOUND(a, b) (a) = max_t(int, a, b) #define SUPBOUND(a, b) (a) = min_t(int, a, b) /* * Bottom half floppy driver. * ========================== * * This part of the file contains the code talking directly to the hardware, * and also the main service loop (seek-configure-spinup-command) */ /* * disk change. * This routine is responsible for maintaining the FD_DISK_CHANGE flag, * and the last_checked date. * * last_checked is the date of the last check which showed 'no disk change' * FD_DISK_CHANGE is set under two conditions: * 1. The floppy has been changed after some i/o to that floppy already * took place. * 2. No floppy disk is in the drive. This is done in order to ensure that * requests are quickly flushed in case there is no disk in the drive. It * follows that FD_DISK_CHANGE can only be cleared if there is a disk in * the drive. * * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet. * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on * each seek. If a disk is present, the disk change line should also be * cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk * change line is set, this means either that no disk is in the drive, or * that it has been removed since the last seek. * * This means that we really have a third possibility too: * The floppy has been changed after the last seek. */ static int disk_change(int drive) { int fdc = FDC(drive); if (time_before(jiffies, UDRS->select_date + UDP->select_delay)) DPRINT("WARNING disk change called early\n"); if (!(FDCS->dor & (0x10 << UNIT(drive))) || (FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) { DPRINT("probing disk change on unselected drive\n"); DPRINT("drive=%d fdc=%d dor=%x\n", drive, FDC(drive), (unsigned int)FDCS->dor); } debug_dcl(UDP->flags, "checking disk change line for drive %d\n", drive); debug_dcl(UDP->flags, "jiffies=%lu\n", jiffies); debug_dcl(UDP->flags, "disk change line=%x\n", fd_inb(FD_DIR) & 0x80); debug_dcl(UDP->flags, "flags=%lx\n", UDRS->flags); if (UDP->flags & FD_BROKEN_DCL) return test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80) { set_bit(FD_VERIFY_BIT, &UDRS->flags); /* verify write protection */ if (UDRS->maxblock) /* mark it changed */ set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); /* invalidate its geometry */ if (UDRS->keep_data >= 0) { if ((UDP->flags & FTD_MSG) && current_type[drive] != NULL) DPRINT("Disk type is undefined after disk change\n"); current_type[drive] = NULL; floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE << 1; } return 1; } else { UDRS->last_checked = jiffies; clear_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags); } return 0; } static inline int is_selected(int dor, int unit) { return ((dor & (0x10 << unit)) && (dor & 3) == unit); } static bool is_ready_state(int status) { int state = status & (STATUS_READY | STATUS_DIR | STATUS_DMA); return state == STATUS_READY; } static int set_dor(int fdc, char mask, char data) { unsigned char unit; unsigned char drive; unsigned char newdor; unsigned char olddor; if (FDCS->address == -1) return -1; olddor = FDCS->dor; newdor = (olddor & mask) | data; if (newdor != olddor) { unit = olddor & 0x3; if (is_selected(olddor, unit) && !is_selected(newdor, unit)) { drive = REVDRIVE(fdc, unit); debug_dcl(UDP->flags, "calling disk change from set_dor\n"); disk_change(drive); } FDCS->dor = newdor; fd_outb(newdor, FD_DOR); unit = newdor & 0x3; if (!is_selected(olddor, unit) && is_selected(newdor, unit)) { drive = REVDRIVE(fdc, unit); UDRS->select_date = jiffies; } } return olddor; } static void twaddle(void) { if (DP->select_delay) return; fd_outb(FDCS->dor & ~(0x10 << UNIT(current_drive)), FD_DOR); fd_outb(FDCS->dor, FD_DOR); DRS->select_date = jiffies; } /* * Reset all driver information about the current fdc. * This is needed after a reset, and after a raw command. */ static void reset_fdc_info(int mode) { int drive; FDCS->spec1 = FDCS->spec2 = -1; FDCS->need_configure = 1; FDCS->perp_mode = 1; FDCS->rawcmd = 0; for (drive = 0; drive < N_DRIVE; drive++) if (FDC(drive) == fdc && (mode || UDRS->track != NEED_1_RECAL)) UDRS->track = NEED_2_RECAL; } /* selects the fdc and drive, and enables the fdc's input/dma. */ static void set_fdc(int drive) { if (drive >= 0 && drive < N_DRIVE) { fdc = FDC(drive); current_drive = drive; } if (fdc != 1 && fdc != 0) { pr_info("bad fdc value\n"); return; } set_dor(fdc, ~0, 8); #if N_FDC > 1 set_dor(1 - fdc, ~8, 0); #endif if (FDCS->rawcmd == 2) reset_fdc_info(1); if (fd_inb(FD_STATUS) != STATUS_READY) FDCS->reset = 1; } /* locks the driver */ static int lock_fdc(int drive, bool interruptible) { if (WARN(atomic_read(&usage_count) == 0, "Trying to lock fdc while usage count=0\n")) return -1; if (wait_event_interruptible(fdc_wait, !test_and_set_bit(0, &fdc_busy))) return -EINTR; command_status = FD_COMMAND_NONE; reschedule_timeout(drive, "lock fdc"); set_fdc(drive); return 0; } /* unlocks the driver */ static void unlock_fdc(void) { if (!test_bit(0, &fdc_busy)) DPRINT("FDC access conflict!\n"); raw_cmd = NULL; command_status = FD_COMMAND_NONE; cancel_delayed_work(&fd_timeout); do_floppy = NULL; cont = NULL; clear_bit(0, &fdc_busy); wake_up(&fdc_wait); } /* switches the motor off after a given timeout */ static void motor_off_callback(unsigned long nr) { unsigned char mask = ~(0x10 << UNIT(nr)); set_dor(FDC(nr), mask, 0); } /* schedules motor off */ static void floppy_off(unsigned int drive) { unsigned long volatile delta; int fdc = FDC(drive); if (!(FDCS->dor & (0x10 << UNIT(drive)))) return; del_timer(motor_off_timer + drive); /* make spindle stop in a position which minimizes spinup time * next time */ if (UDP->rps) { delta = jiffies - UDRS->first_read_date + HZ - UDP->spindown_offset; delta = ((delta * UDP->rps) % HZ) / UDP->rps; motor_off_timer[drive].expires = jiffies + UDP->spindown - delta; } add_timer(motor_off_timer + drive); } /* * cycle through all N_DRIVE floppy drives, for disk change testing. * stopping at current drive. This is done before any long operation, to * be sure to have up to date disk change information. */ static void scandrives(void) { int i; int drive; int saved_drive; if (DP->select_delay) return; saved_drive = current_drive; for (i = 0; i < N_DRIVE; i++) { drive = (saved_drive + i + 1) % N_DRIVE; if (UDRS->fd_ref == 0 || UDP->select_delay != 0) continue; /* skip closed drives */ set_fdc(drive); if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) & (0x10 << UNIT(drive)))) /* switch the motor off again, if it was off to * begin with */ set_dor(fdc, ~(0x10 << UNIT(drive)), 0); } set_fdc(saved_drive); } static void empty(void) { } static void (*floppy_work_fn)(void); static void floppy_work_workfn(struct work_struct *work) { floppy_work_fn(); } static DECLARE_WORK(floppy_work, floppy_work_workfn); static void schedule_bh(void (*handler)(void)) { WARN_ON(work_pending(&floppy_work)); floppy_work_fn = handler; queue_work(floppy_wq, &floppy_work); } static void (*fd_timer_fn)(void) = NULL; static void fd_timer_workfn(struct work_struct *work) { fd_timer_fn(); } static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn); static void cancel_activity(void) { do_floppy = NULL; cancel_delayed_work_sync(&fd_timer); cancel_work_sync(&floppy_work); } /* this function makes sure that the disk stays in the drive during the * transfer */ static void fd_watchdog(void) { debug_dcl(DP->flags, "calling disk change from watchdog\n"); if (disk_change(current_drive)) { DPRINT("disk removed during i/o\n"); cancel_activity(); cont->done(0); reset_fdc(); } else { cancel_delayed_work(&fd_timer); fd_timer_fn = fd_watchdog; queue_delayed_work(floppy_wq, &fd_timer, HZ / 10); } } static void main_command_interrupt(void) { cancel_delayed_work(&fd_timer); cont->interrupt(); } /* waits for a delay (spinup or select) to pass */ static int fd_wait_for_completion(unsigned long expires, void (*function)(void)) { if (FDCS->reset) { reset_fdc(); /* do the reset during sleep to win time * if we don't need to sleep, it's a good * occasion anyways */ return 1; } if (time_before(jiffies, expires)) { cancel_delayed_work(&fd_timer); fd_timer_fn = function; queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies); return 1; } return 0; } static void setup_DMA(void) { unsigned long f; if (raw_cmd->length == 0) { int i; pr_info("zero dma transfer size:"); for (i = 0; i < raw_cmd->cmd_count; i++) pr_cont("%x,", raw_cmd->cmd[i]); pr_cont("\n"); cont->done(0); FDCS->reset = 1; return; } if (((unsigned long)raw_cmd->kernel_data) % 512) { pr_info("non aligned address: %p\n", raw_cmd->kernel_data); cont->done(0); FDCS->reset = 1; return; } f = claim_dma_lock(); fd_disable_dma(); #ifdef fd_dma_setup if (fd_dma_setup(raw_cmd->kernel_data, raw_cmd->length, (raw_cmd->flags & FD_RAW_READ) ? DMA_MODE_READ : DMA_MODE_WRITE, FDCS->address) < 0) { release_dma_lock(f); cont->done(0); FDCS->reset = 1; return; } release_dma_lock(f); #else fd_clear_dma_ff(); fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length); fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ) ? DMA_MODE_READ : DMA_MODE_WRITE); fd_set_dma_addr(raw_cmd->kernel_data); fd_set_dma_count(raw_cmd->length); virtual_dma_port = FDCS->address; fd_enable_dma(); release_dma_lock(f); #endif } static void show_floppy(void); /* waits until the fdc becomes ready */ static int wait_til_ready(void) { int status; int counter; if (FDCS->reset) return -1; for (counter = 0; counter < 10000; counter++) { status = fd_inb(FD_STATUS); if (status & STATUS_READY) return status; } if (initialized) { DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc); show_floppy(); } FDCS->reset = 1; return -1; } /* sends a command byte to the fdc */ static int output_byte(char byte) { int status = wait_til_ready(); if (status < 0) return -1; if (is_ready_state(status)) { fd_outb(byte, FD_DATA); output_log[output_log_pos].data = byte; output_log[output_log_pos].status = status; output_log[output_log_pos].jiffies = jiffies; output_log_pos = (output_log_pos + 1) % OLOGSIZE; return 0; } FDCS->reset = 1; if (initialized) { DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n", byte, fdc, status); show_floppy(); } return -1; } /* gets the response from the fdc */ static int result(void) { int i; int status = 0; for (i = 0; i < MAX_REPLIES; i++) { status = wait_til_ready(); if (status < 0) break; status &= STATUS_DIR | STATUS_READY | STATUS_BUSY | STATUS_DMA; if ((status & ~STATUS_BUSY) == STATUS_READY) { resultjiffies = jiffies; resultsize = i; return i; } if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY)) reply_buffer[i] = fd_inb(FD_DATA); else break; } if (initialized) { DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n", fdc, status, i); show_floppy(); } FDCS->reset = 1; return -1; } #define MORE_OUTPUT -2 /* does the fdc need more output? */ static int need_more_output(void) { int status = wait_til_ready(); if (status < 0) return -1; if (is_ready_state(status)) return MORE_OUTPUT; return result(); } /* Set perpendicular mode as required, based on data rate, if supported. * 82077 Now tested. 1Mbps data rate only possible with 82077-1. */ static void perpendicular_mode(void) { unsigned char perp_mode; if (raw_cmd->rate & 0x40) { switch (raw_cmd->rate & 3) { case 0: perp_mode = 2; break; case 3: perp_mode = 3; break; default: DPRINT("Invalid data rate for perpendicular mode!\n"); cont->done(0); FDCS->reset = 1; /* * convenient way to return to * redo without too much hassle * (deep stack et al.) */ return; } } else perp_mode = 0; if (FDCS->perp_mode == perp_mode) return; if (FDCS->version >= FDC_82077_ORIG) { output_byte(FD_PERPENDICULAR); output_byte(perp_mode); FDCS->perp_mode = perp_mode; } else if (perp_mode) { DPRINT("perpendicular mode not supported by this FDC.\n"); } } /* perpendicular_mode */ static int fifo_depth = 0xa; static int no_fifo; static int fdc_configure(void) { /* Turn on FIFO */ output_byte(FD_CONFIGURE); if (need_more_output() != MORE_OUTPUT) return 0; output_byte(0); output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf)); output_byte(0); /* pre-compensation from track 0 upwards */ return 1; } #define NOMINAL_DTR 500 /* Issue a "SPECIFY" command to set the step rate time, head unload time, * head load time, and DMA disable flag to values needed by floppy. * * The value "dtr" is the data transfer rate in Kbps. It is needed * to account for the data rate-based scaling done by the 82072 and 82077 * FDC types. This parameter is ignored for other types of FDCs (i.e. * 8272a). * * Note that changing the data transfer rate has a (probably deleterious) * effect on the parameters subject to scaling for 82072/82077 FDCs, so * fdc_specify is called again after each data transfer rate * change. * * srt: 1000 to 16000 in microseconds * hut: 16 to 240 milliseconds * hlt: 2 to 254 milliseconds * * These values are rounded up to the next highest available delay time. */ static void fdc_specify(void) { unsigned char spec1; unsigned char spec2; unsigned long srt; unsigned long hlt; unsigned long hut; unsigned long dtr = NOMINAL_DTR; unsigned long scale_dtr = NOMINAL_DTR; int hlt_max_code = 0x7f; int hut_max_code = 0xf; if (FDCS->need_configure && FDCS->version >= FDC_82072A) { fdc_configure(); FDCS->need_configure = 0; } switch (raw_cmd->rate & 0x03) { case 3: dtr = 1000; break; case 1: dtr = 300; if (FDCS->version >= FDC_82078) { /* chose the default rate table, not the one * where 1 = 2 Mbps */ output_byte(FD_DRIVESPEC); if (need_more_output() == MORE_OUTPUT) { output_byte(UNIT(current_drive)); output_byte(0xc0); } } break; case 2: dtr = 250; break; } if (FDCS->version >= FDC_82072) { scale_dtr = dtr; hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */ hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */ } /* Convert step rate from microseconds to milliseconds and 4 bits */ srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR); if (slow_floppy) srt = srt / 4; SUPBOUND(srt, 0xf); INFBOUND(srt, 0); hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR); if (hlt < 0x01) hlt = 0x01; else if (hlt > 0x7f) hlt = hlt_max_code; hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR); if (hut < 0x1) hut = 0x1; else if (hut > 0xf) hut = hut_max_code; spec1 = (srt << 4) | hut; spec2 = (hlt << 1) | (use_virtual_dma & 1); /* If these parameters did not change, just return with success */ if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) { /* Go ahead and set spec1 and spec2 */ output_byte(FD_SPECIFY); output_byte(FDCS->spec1 = spec1); output_byte(FDCS->spec2 = spec2); } } /* fdc_specify */ /* Set the FDC's data transfer rate on behalf of the specified drive. * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue * of the specify command (i.e. using the fdc_specify function). */ static int fdc_dtr(void) { /* If data rate not already set to desired value, set it. */ if ((raw_cmd->rate & 3) == FDCS->dtr) return 0; /* Set dtr */ fd_outb(raw_cmd->rate & 3, FD_DCR); /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB) * need a stabilization period of several milliseconds to be * enforced after data rate changes before R/W operations. * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies) */ FDCS->dtr = raw_cmd->rate & 3; return fd_wait_for_completion(jiffies + 2UL * HZ / 100, floppy_ready); } /* fdc_dtr */ static void tell_sector(void) { pr_cont(": track %d, head %d, sector %d, size %d", R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE); } /* tell_sector */ static void print_errors(void) { DPRINT(""); if (ST0 & ST0_ECE) { pr_cont("Recalibrate failed!"); } else if (ST2 & ST2_CRC) { pr_cont("data CRC error"); tell_sector(); } else if (ST1 & ST1_CRC) { pr_cont("CRC error"); tell_sector(); } else if ((ST1 & (ST1_MAM | ST1_ND)) || (ST2 & ST2_MAM)) { if (!probing) { pr_cont("sector not found"); tell_sector(); } else pr_cont("probe failed..."); } else if (ST2 & ST2_WC) { /* seek error */ pr_cont("wrong cylinder"); } else if (ST2 & ST2_BC) { /* cylinder marked as bad */ pr_cont("bad cylinder"); } else { pr_cont("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x", ST0, ST1, ST2); tell_sector(); } pr_cont("\n"); } /* * OK, this error interpreting routine is called after a * DMA read/write has succeeded * or failed, so we check the results, and copy any buffers. * hhb: Added better error reporting. * ak: Made this into a separate routine. */ static int interpret_errors(void) { char bad; if (inr != 7) { DPRINT("-- FDC reply error\n"); FDCS->reset = 1; return 1; } /* check IC to find cause of interrupt */ switch (ST0 & ST0_INTR) { case 0x40: /* error occurred during command execution */ if (ST1 & ST1_EOC) return 0; /* occurs with pseudo-DMA */ bad = 1; if (ST1 & ST1_WP) { DPRINT("Drive is write protected\n"); clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags); cont->done(0); bad = 2; } else if (ST1 & ST1_ND) { set_bit(FD_NEED_TWADDLE_BIT, &DRS->flags); } else if (ST1 & ST1_OR) { if (DP->flags & FTD_MSG) DPRINT("Over/Underrun - retrying\n"); bad = 0; } else if (*errors >= DP->max_errors.reporting) { print_errors(); } if (ST2 & ST2_WC || ST2 & ST2_BC) /* wrong cylinder => recal */ DRS->track = NEED_2_RECAL; return bad; case 0x80: /* invalid command given */ DPRINT("Invalid FDC command given!\n"); cont->done(0); return 2; case 0xc0: DPRINT("Abnormal termination caused by polling\n"); cont->error(); return 2; default: /* (0) Normal command termination */ return 0; } } /* * This routine is called when everything should be correctly set up * for the transfer (i.e. floppy motor is on, the correct floppy is * selected, and the head is sitting on the right track). */ static void setup_rw_floppy(void) { int i; int r; int flags; int dflags; unsigned long ready_date; void (*function)(void); flags = raw_cmd->flags; if (flags & (FD_RAW_READ | FD_RAW_WRITE)) flags |= FD_RAW_INTR; if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) { ready_date = DRS->spinup_date + DP->spinup; /* If spinup will take a long time, rerun scandrives * again just before spinup completion. Beware that * after scandrives, we must again wait for selection. */ if (time_after(ready_date, jiffies + DP->select_delay)) { ready_date -= DP->select_delay; function = floppy_start; } else function = setup_rw_floppy; /* wait until the floppy is spinning fast enough */ if (fd_wait_for_completion(ready_date, function)) return; } dflags = DRS->flags; if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE)) setup_DMA(); if (flags & FD_RAW_INTR) do_floppy = main_command_interrupt; r = 0; for (i = 0; i < raw_cmd->cmd_count; i++) r |= output_byte(raw_cmd->cmd[i]); debugt(__func__, "rw_command"); if (r) { cont->error(); reset_fdc(); return; } if (!(flags & FD_RAW_INTR)) { inr = result(); cont->interrupt(); } else if (flags & FD_RAW_NEED_DISK) fd_watchdog(); } static int blind_seek; /* * This is the routine called after every seek (or recalibrate) interrupt * from the floppy controller. */ static void seek_interrupt(void) { debugt(__func__, ""); if (inr != 2 || (ST0 & 0xF8) != 0x20) { DPRINT("seek failed\n"); DRS->track = NEED_2_RECAL; cont->error(); cont->redo(); return; } if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek) { debug_dcl(DP->flags, "clearing NEWCHANGE flag because of effective seek\n"); debug_dcl(DP->flags, "jiffies=%lu\n", jiffies); clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); /* effective seek */ DRS->select_date = jiffies; } DRS->track = ST1; floppy_ready(); } static void check_wp(void) { if (test_bit(FD_VERIFY_BIT, &DRS->flags)) { /* check write protection */ output_byte(FD_GETSTATUS); output_byte(UNIT(current_drive)); if (result() != 1) { FDCS->reset = 1; return; } clear_bit(FD_VERIFY_BIT, &DRS->flags); clear_bit(FD_NEED_TWADDLE_BIT, &DRS->flags); debug_dcl(DP->flags, "checking whether disk is write protected\n"); debug_dcl(DP->flags, "wp=%x\n", ST3 & 0x40); if (!(ST3 & 0x40)) set_bit(FD_DISK_WRITABLE_BIT, &DRS->flags); else clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags); } } static void seek_floppy(void) { int track; blind_seek = 0; debug_dcl(DP->flags, "calling disk change from %s\n", __func__); if (!test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) && disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) { /* the media changed flag should be cleared after the seek. * If it isn't, this means that there is really no disk in * the drive. */ set_bit(FD_DISK_CHANGED_BIT, &DRS->flags); cont->done(0); cont->redo(); return; } if (DRS->track <= NEED_1_RECAL) { recalibrate_floppy(); return; } else if (test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) && (raw_cmd->flags & FD_RAW_NEED_DISK) && (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) { /* we seek to clear the media-changed condition. Does anybody * know a more elegant way, which works on all drives? */ if (raw_cmd->track) track = raw_cmd->track - 1; else { if (DP->flags & FD_SILENT_DCL_CLEAR) { set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0); blind_seek = 1; raw_cmd->flags |= FD_RAW_NEED_SEEK; } track = 1; } } else { check_wp(); if (raw_cmd->track != DRS->track && (raw_cmd->flags & FD_RAW_NEED_SEEK)) track = raw_cmd->track; else { setup_rw_floppy(); return; } } do_floppy = seek_interrupt; output_byte(FD_SEEK); output_byte(UNIT(current_drive)); if (output_byte(track) < 0) { reset_fdc(); return; } debugt(__func__, ""); } static void recal_interrupt(void) { debugt(__func__, ""); if (inr != 2) FDCS->reset = 1; else if (ST0 & ST0_ECE) { switch (DRS->track) { case NEED_1_RECAL: debugt(__func__, "need 1 recal"); /* after a second recalibrate, we still haven't * reached track 0. Probably no drive. Raise an * error, as failing immediately might upset * computers possessed by the Devil :-) */ cont->error(); cont->redo(); return; case NEED_2_RECAL: debugt(__func__, "need 2 recal"); /* If we already did a recalibrate, * and we are not at track 0, this * means we have moved. (The only way * not to move at recalibration is to * be already at track 0.) Clear the * new change flag */ debug_dcl(DP->flags, "clearing NEWCHANGE flag because of second recalibrate\n"); clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); DRS->select_date = jiffies; /* fall through */ default: debugt(__func__, "default"); /* Recalibrate moves the head by at * most 80 steps. If after one * recalibrate we don't have reached * track 0, this might mean that we * started beyond track 80. Try * again. */ DRS->track = NEED_1_RECAL; break; } } else DRS->track = ST1; floppy_ready(); } static void print_result(char *message, int inr) { int i; DPRINT("%s ", message); if (inr >= 0) for (i = 0; i < inr; i++) pr_cont("repl[%d]=%x ", i, reply_buffer[i]); pr_cont("\n"); } /* interrupt handler. Note that this can be called externally on the Sparc */ irqreturn_t floppy_interrupt(int irq, void *dev_id) { int do_print; unsigned long f; void (*handler)(void) = do_floppy; lasthandler = handler; interruptjiffies = jiffies; f = claim_dma_lock(); fd_disable_dma(); release_dma_lock(f); do_floppy = NULL; if (fdc >= N_FDC || FDCS->address == -1) { /* we don't even know which FDC is the culprit */ pr_info("DOR0=%x\n", fdc_state[0].dor); pr_info("floppy interrupt on bizarre fdc %d\n", fdc); pr_info("handler=%pf\n", handler); is_alive(__func__, "bizarre fdc"); return IRQ_NONE; } FDCS->reset = 0; /* We have to clear the reset flag here, because apparently on boxes * with level triggered interrupts (PS/2, Sparc, ...), it is needed to * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the * emission of the SENSEI's. * It is OK to emit floppy commands because we are in an interrupt * handler here, and thus we have to fear no interference of other * activity. */ do_print = !handler && print_unex && initialized; inr = result(); if (do_print) print_result("unexpected interrupt", inr); if (inr == 0) { int max_sensei = 4; do { output_byte(FD_SENSEI); inr = result(); if (do_print) print_result("sensei", inr); max_sensei--; } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2 && max_sensei); } if (!handler) { FDCS->reset = 1; return IRQ_NONE; } schedule_bh(handler); is_alive(__func__, "normal interrupt end"); /* FIXME! Was it really for us? */ return IRQ_HANDLED; } static void recalibrate_floppy(void) { debugt(__func__, ""); do_floppy = recal_interrupt; output_byte(FD_RECALIBRATE); if (output_byte(UNIT(current_drive)) < 0) reset_fdc(); } /* * Must do 4 FD_SENSEIs after reset because of ``drive polling''. */ static void reset_interrupt(void) { debugt(__func__, ""); result(); /* get the status ready for set_fdc */ if (FDCS->reset) { pr_info("reset set in interrupt, calling %pf\n", cont->error); cont->error(); /* a reset just after a reset. BAD! */ } cont->redo(); } /* * reset is done by pulling bit 2 of DOR low for a while (old FDCs), * or by setting the self clearing bit 7 of STATUS (newer FDCs) */ static void reset_fdc(void) { unsigned long flags; do_floppy = reset_interrupt; FDCS->reset = 0; reset_fdc_info(0); /* Pseudo-DMA may intercept 'reset finished' interrupt. */ /* Irrelevant for systems with true DMA (i386). */ flags = claim_dma_lock(); fd_disable_dma(); release_dma_lock(flags); if (FDCS->version >= FDC_82072A) fd_outb(0x80 | (FDCS->dtr & 3), FD_STATUS); else { fd_outb(FDCS->dor & ~0x04, FD_DOR); udelay(FD_RESET_DELAY); fd_outb(FDCS->dor, FD_DOR); } } static void show_floppy(void) { int i; pr_info("\n"); pr_info("floppy driver state\n"); pr_info("-------------------\n"); pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%pf\n", jiffies, interruptjiffies, jiffies - interruptjiffies, lasthandler); pr_info("timeout_message=%s\n", timeout_message); pr_info("last output bytes:\n"); for (i = 0; i < OLOGSIZE; i++) pr_info("%2x %2x %lu\n", output_log[(i + output_log_pos) % OLOGSIZE].data, output_log[(i + output_log_pos) % OLOGSIZE].status, output_log[(i + output_log_pos) % OLOGSIZE].jiffies); pr_info("last result at %lu\n", resultjiffies); pr_info("last redo_fd_request at %lu\n", lastredo); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, reply_buffer, resultsize, true); pr_info("status=%x\n", fd_inb(FD_STATUS)); pr_info("fdc_busy=%lu\n", fdc_busy); if (do_floppy) pr_info("do_floppy=%pf\n", do_floppy); if (work_pending(&floppy_work)) pr_info("floppy_work.func=%pf\n", floppy_work.func); if (delayed_work_pending(&fd_timer)) pr_info("delayed work.function=%p expires=%ld\n", fd_timer.work.func, fd_timer.timer.expires - jiffies); if (delayed_work_pending(&fd_timeout)) pr_info("timer_function=%p expires=%ld\n", fd_timeout.work.func, fd_timeout.timer.expires - jiffies); pr_info("cont=%p\n", cont); pr_info("current_req=%p\n", current_req); pr_info("command_status=%d\n", command_status); pr_info("\n"); } static void floppy_shutdown(struct work_struct *arg) { unsigned long flags; if (initialized) show_floppy(); cancel_activity(); flags = claim_dma_lock(); fd_disable_dma(); release_dma_lock(flags); /* avoid dma going to a random drive after shutdown */ if (initialized) DPRINT("floppy timeout called\n"); FDCS->reset = 1; if (cont) { cont->done(0); cont->redo(); /* this will recall reset when needed */ } else { pr_info("no cont in shutdown!\n"); process_fd_request(); } is_alive(__func__, ""); } /* start motor, check media-changed condition and write protection */ static int start_motor(void (*function)(void)) { int mask; int data; mask = 0xfc; data = UNIT(current_drive); if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)) { if (!(FDCS->dor & (0x10 << UNIT(current_drive)))) { set_debugt(); /* no read since this drive is running */ DRS->first_read_date = 0; /* note motor start time if motor is not yet running */ DRS->spinup_date = jiffies; data |= (0x10 << UNIT(current_drive)); } } else if (FDCS->dor & (0x10 << UNIT(current_drive))) mask &= ~(0x10 << UNIT(current_drive)); /* starts motor and selects floppy */ del_timer(motor_off_timer + current_drive); set_dor(fdc, mask, data); /* wait_for_completion also schedules reset if needed. */ return fd_wait_for_completion(DRS->select_date + DP->select_delay, function); } static void floppy_ready(void) { if (FDCS->reset) { reset_fdc(); return; } if (start_motor(floppy_ready)) return; if (fdc_dtr()) return; debug_dcl(DP->flags, "calling disk change from floppy_ready\n"); if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) && disk_change(current_drive) && !DP->select_delay) twaddle(); /* this clears the dcl on certain * drive/controller combinations */ #ifdef fd_chose_dma_mode if ((raw_cmd->flags & FD_RAW_READ) || (raw_cmd->flags & FD_RAW_WRITE)) { unsigned long flags = claim_dma_lock(); fd_chose_dma_mode(raw_cmd->kernel_data, raw_cmd->length); release_dma_lock(flags); } #endif if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)) { perpendicular_mode(); fdc_specify(); /* must be done here because of hut, hlt ... */ seek_floppy(); } else { if ((raw_cmd->flags & FD_RAW_READ) || (raw_cmd->flags & FD_RAW_WRITE)) fdc_specify(); setup_rw_floppy(); } } static void floppy_start(void) { reschedule_timeout(current_reqD, "floppy start"); scandrives(); debug_dcl(DP->flags, "setting NEWCHANGE in floppy_start\n"); set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); floppy_ready(); } /* * ======================================================================== * here ends the bottom half. Exported routines are: * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc, * start_motor, reset_fdc, reset_fdc_info, interpret_errors. * Initialization also uses output_byte, result, set_dor, floppy_interrupt * and set_dor. * ======================================================================== */ /* * General purpose continuations. * ============================== */ static void do_wakeup(void) { reschedule_timeout(MAXTIMEOUT, "do wakeup"); cont = NULL; command_status += 2; wake_up(&command_done); } static const struct cont_t wakeup_cont = { .interrupt = empty, .redo = do_wakeup, .error = empty, .done = (done_f)empty }; static const struct cont_t intr_cont = { .interrupt = empty, .redo = process_fd_request, .error = empty, .done = (done_f)empty }; static int wait_til_done(void (*handler)(void), bool interruptible) { int ret; schedule_bh(handler); if (interruptible) wait_event_interruptible(command_done, command_status >= 2); else wait_event(command_done, command_status >= 2); if (command_status < 2) { cancel_activity(); cont = &intr_cont; reset_fdc(); return -EINTR; } if (FDCS->reset) command_status = FD_COMMAND_ERROR; if (command_status == FD_COMMAND_OKAY) ret = 0; else ret = -EIO; command_status = FD_COMMAND_NONE; return ret; } static void generic_done(int result) { command_status = result; cont = &wakeup_cont; } static void generic_success(void) { cont->done(1); } static void generic_failure(void) { cont->done(0); } static void success_and_wakeup(void) { generic_success(); cont->redo(); } /* * formatting and rw support. * ========================== */ static int next_valid_format(void) { int probed_format; probed_format = DRS->probed_format; while (1) { if (probed_format >= 8 || !DP->autodetect[probed_format]) { DRS->probed_format = 0; return 1; } if (floppy_type[DP->autodetect[probed_format]].sect) { DRS->probed_format = probed_format; return 0; } probed_format++; } } static void bad_flp_intr(void) { int err_count; if (probing) { DRS->probed_format++; if (!next_valid_format()) return; } err_count = ++(*errors); INFBOUND(DRWE->badness, err_count); if (err_count > DP->max_errors.abort) cont->done(0); if (err_count > DP->max_errors.reset) FDCS->reset = 1; else if (err_count > DP->max_errors.recal) DRS->track = NEED_2_RECAL; } static void set_floppy(int drive) { int type = ITYPE(UDRS->fd_device); if (type) _floppy = floppy_type + type; else _floppy = current_type[drive]; } /* * formatting support. * =================== */ static void format_interrupt(void) { switch (interpret_errors()) { case 1: cont->error(); case 2: break; case 0: cont->done(1); } cont->redo(); } #define FM_MODE(x, y) ((y) & ~(((x)->rate & 0x80) >> 1)) #define CT(x) ((x) | 0xc0) static void setup_format_params(int track) { int n; int il; int count; int head_shift; int track_shift; struct fparm { unsigned char track, head, sect, size; } *here = (struct fparm *)floppy_track_buffer; raw_cmd = &default_raw_cmd; raw_cmd->track = track; raw_cmd->flags = (FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK); raw_cmd->rate = _floppy->rate & 0x43; raw_cmd->cmd_count = NR_F; COMMAND = FM_MODE(_floppy, FD_FORMAT); DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head); F_SIZECODE = FD_SIZECODE(_floppy); F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE; F_GAP = _floppy->fmt_gap; F_FILL = FD_FILL_BYTE; raw_cmd->kernel_data = floppy_track_buffer; raw_cmd->length = 4 * F_SECT_PER_TRACK; /* allow for about 30ms for data transport per track */ head_shift = (F_SECT_PER_TRACK + 5) / 6; /* a ``cylinder'' is two tracks plus a little stepping time */ track_shift = 2 * head_shift + 3; /* position of logical sector 1 on this track */ n = (track_shift * format_req.track + head_shift * format_req.head) % F_SECT_PER_TRACK; /* determine interleave */ il = 1; if (_floppy->fmt_gap < 0x22) il++; /* initialize field */ for (count = 0; count < F_SECT_PER_TRACK; ++count) { here[count].track = format_req.track; here[count].head = format_req.head; here[count].sect = 0; here[count].size = F_SIZECODE; } /* place logical sectors */ for (count = 1; count <= F_SECT_PER_TRACK; ++count) { here[n].sect = count; n = (n + il) % F_SECT_PER_TRACK; if (here[n].sect) { /* sector busy, find next free sector */ ++n; if (n >= F_SECT_PER_TRACK) { n -= F_SECT_PER_TRACK; while (here[n].sect) ++n; } } } if (_floppy->stretch & FD_SECTBASEMASK) { for (count = 0; count < F_SECT_PER_TRACK; count++) here[count].sect += FD_SECTBASE(_floppy) - 1; } } static void redo_format(void) { buffer_track = -1; setup_format_params(format_req.track << STRETCH(_floppy)); floppy_start(); debugt(__func__, "queue format request"); } static const struct cont_t format_cont = { .interrupt = format_interrupt, .redo = redo_format, .error = bad_flp_intr, .done = generic_done }; static int do_format(int drive, struct format_descr *tmp_format_req) { int ret; if (lock_fdc(drive, true)) return -EINTR; set_floppy(drive); if (!_floppy || _floppy->track > DP->tracks || tmp_format_req->track >= _floppy->track || tmp_format_req->head >= _floppy->head || (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) || !_floppy->fmt_gap) { process_fd_request(); return -EINVAL; } format_req = *tmp_format_req; format_errors = 0; cont = &format_cont; errors = &format_errors; ret = wait_til_done(redo_format, true); if (ret == -EINTR) return -EINTR; process_fd_request(); return ret; } /* * Buffer read/write and support * ============================= */ static void floppy_end_request(struct request *req, int error) { unsigned int nr_sectors = current_count_sectors; unsigned int drive = (unsigned long)req->rq_disk->private_data; /* current_count_sectors can be zero if transfer failed */ if (error) nr_sectors = blk_rq_cur_sectors(req); if (__blk_end_request(req, error, nr_sectors << 9)) return; /* We're done with the request */ floppy_off(drive); current_req = NULL; } /* new request_done. Can handle physical sectors which are smaller than a * logical buffer */ static void request_done(int uptodate) { struct request *req = current_req; struct request_queue *q; unsigned long flags; int block; char msg[sizeof("request done ") + sizeof(int) * 3]; probing = 0; snprintf(msg, sizeof(msg), "request done %d", uptodate); reschedule_timeout(MAXTIMEOUT, msg); if (!req) { pr_info("floppy.c: no request in request_done\n"); return; } q = req->q; if (uptodate) { /* maintain values for invalidation on geometry * change */ block = current_count_sectors + blk_rq_pos(req); INFBOUND(DRS->maxblock, block); if (block > _floppy->sect) DRS->maxtrack = 1; /* unlock chained buffers */ spin_lock_irqsave(q->queue_lock, flags); floppy_end_request(req, 0); spin_unlock_irqrestore(q->queue_lock, flags); } else { if (rq_data_dir(req) == WRITE) { /* record write error information */ DRWE->write_errors++; if (DRWE->write_errors == 1) { DRWE->first_error_sector = blk_rq_pos(req); DRWE->first_error_generation = DRS->generation; } DRWE->last_error_sector = blk_rq_pos(req); DRWE->last_error_generation = DRS->generation; } spin_lock_irqsave(q->queue_lock, flags); floppy_end_request(req, -EIO); spin_unlock_irqrestore(q->queue_lock, flags); } } /* Interrupt handler evaluating the result of the r/w operation */ static void rw_interrupt(void) { int eoc; int ssize; int heads; int nr_sectors; if (R_HEAD >= 2) { /* some Toshiba floppy controllers occasionnally seem to * return bogus interrupts after read/write operations, which * can be recognized by a bad head number (>= 2) */ return; } if (!DRS->first_read_date) DRS->first_read_date = jiffies; nr_sectors = 0; ssize = DIV_ROUND_UP(1 << SIZECODE, 4); if (ST1 & ST1_EOC) eoc = 1; else eoc = 0; if (COMMAND & 0x80) heads = 2; else heads = 1; nr_sectors = (((R_TRACK - TRACK) * heads + R_HEAD - HEAD) * SECT_PER_TRACK + R_SECTOR - SECTOR + eoc) << SIZECODE >> 2; if (nr_sectors / ssize > DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) { DPRINT("long rw: %x instead of %lx\n", nr_sectors, current_count_sectors); pr_info("rs=%d s=%d\n", R_SECTOR, SECTOR); pr_info("rh=%d h=%d\n", R_HEAD, HEAD); pr_info("rt=%d t=%d\n", R_TRACK, TRACK); pr_info("heads=%d eoc=%d\n", heads, eoc); pr_info("spt=%d st=%d ss=%d\n", SECT_PER_TRACK, fsector_t, ssize); pr_info("in_sector_offset=%d\n", in_sector_offset); } nr_sectors -= in_sector_offset; INFBOUND(nr_sectors, 0); SUPBOUND(current_count_sectors, nr_sectors); switch (interpret_errors()) { case 2: cont->redo(); return; case 1: if (!current_count_sectors) { cont->error(); cont->redo(); return; } break; case 0: if (!current_count_sectors) { cont->redo(); return; } current_type[current_drive] = _floppy; floppy_sizes[TOMINOR(current_drive)] = _floppy->size; break; } if (probing) { if (DP->flags & FTD_MSG) DPRINT("Auto-detected floppy type %s in fd%d\n", _floppy->name, current_drive); current_type[current_drive] = _floppy; floppy_sizes[TOMINOR(current_drive)] = _floppy->size; probing = 0; } if (CT(COMMAND) != FD_READ || raw_cmd->kernel_data == current_req->buffer) { /* transfer directly from buffer */ cont->done(1); } else if (CT(COMMAND) == FD_READ) { buffer_track = raw_cmd->track; buffer_drive = current_drive; INFBOUND(buffer_max, nr_sectors + fsector_t); } cont->redo(); } /* Compute maximal contiguous buffer size. */ static int buffer_chain_size(void) { struct bio_vec bv; int size; struct req_iterator iter; char *base; base = bio_data(current_req->bio); size = 0; rq_for_each_segment(bv, current_req, iter) { if (page_address(bv.bv_page) + bv.bv_offset != base + size) break; size += bv.bv_len; } return size >> 9; } /* Compute the maximal transfer size */ static int transfer_size(int ssize, int max_sector, int max_size) { SUPBOUND(max_sector, fsector_t + max_size); /* alignment */ max_sector -= (max_sector % _floppy->sect) % ssize; /* transfer size, beginning not aligned */ current_count_sectors = max_sector - fsector_t; return max_sector; } /* * Move data from/to the track buffer to/from the buffer cache. */ static void copy_buffer(int ssize, int max_sector, int max_sector_2) { int remaining; /* number of transferred 512-byte sectors */ struct bio_vec bv; char *buffer; char *dma_buffer; int size; struct req_iterator iter; max_sector = transfer_size(ssize, min(max_sector, max_sector_2), blk_rq_sectors(current_req)); if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && buffer_max > fsector_t + blk_rq_sectors(current_req)) current_count_sectors = min_t(int, buffer_max - fsector_t, blk_rq_sectors(current_req)); remaining = current_count_sectors << 9; if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) { DPRINT("in copy buffer\n"); pr_info("current_count_sectors=%ld\n", current_count_sectors); pr_info("remaining=%d\n", remaining >> 9); pr_info("current_req->nr_sectors=%u\n", blk_rq_sectors(current_req)); pr_info("current_req->current_nr_sectors=%u\n", blk_rq_cur_sectors(current_req)); pr_info("max_sector=%d\n", max_sector); pr_info("ssize=%d\n", ssize); } buffer_max = max(max_sector, buffer_max); dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); size = blk_rq_cur_bytes(current_req); rq_for_each_segment(bv, current_req, iter) { if (!remaining) break; size = bv.bv_len; SUPBOUND(size, remaining); buffer = page_address(bv.bv_page) + bv.bv_offset; if (dma_buffer + size > floppy_track_buffer + (max_buffer_sectors << 10) || dma_buffer < floppy_track_buffer) { DPRINT("buffer overrun in copy buffer %d\n", (int)((floppy_track_buffer - dma_buffer) >> 9)); pr_info("fsector_t=%d buffer_min=%d\n", fsector_t, buffer_min); pr_info("current_count_sectors=%ld\n", current_count_sectors); if (CT(COMMAND) == FD_READ) pr_info("read\n"); if (CT(COMMAND) == FD_WRITE) pr_info("write\n"); break; } if (((unsigned long)buffer) % 512) DPRINT("%p buffer not aligned\n", buffer); if (CT(COMMAND) == FD_READ) memcpy(buffer, dma_buffer, size); else memcpy(dma_buffer, buffer, size); remaining -= size; dma_buffer += size; } if (remaining) { if (remaining > 0) max_sector -= remaining >> 9; DPRINT("weirdness: remaining %d\n", remaining >> 9); } } /* work around a bug in pseudo DMA * (on some FDCs) pseudo DMA does not stop when the CPU stops * sending data. Hence we need a different way to signal the * transfer length: We use SECT_PER_TRACK. Unfortunately, this * does not work with MT, hence we can only transfer one head at * a time */ static void virtualdmabug_workaround(void) { int hard_sectors; int end_sector; if (CT(COMMAND) == FD_WRITE) { COMMAND &= ~0x80; /* switch off multiple track mode */ hard_sectors = raw_cmd->length >> (7 + SIZECODE); end_sector = SECTOR + hard_sectors - 1; if (end_sector > SECT_PER_TRACK) { pr_info("too many sectors %d > %d\n", end_sector, SECT_PER_TRACK); return; } SECT_PER_TRACK = end_sector; /* make sure SECT_PER_TRACK * points to end of transfer */ } } /* * Formulate a read/write request. * this routine decides where to load the data (directly to buffer, or to * tmp floppy area), how much data to load (the size of the buffer, the whole * track, or a single sector) * All floppy_track_buffer handling goes in here. If we ever add track buffer * allocation on the fly, it should be done here. No other part should need * modification. */ static int make_raw_rw_request(void) { int aligned_sector_t; int max_sector; int max_size; int tracksize; int ssize; if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n")) return 0; set_fdc((long)current_req->rq_disk->private_data); raw_cmd = &default_raw_cmd; raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK; raw_cmd->cmd_count = NR_RW; if (rq_data_dir(current_req) == READ) { raw_cmd->flags |= FD_RAW_READ; COMMAND = FM_MODE(_floppy, FD_READ); } else if (rq_data_dir(current_req) == WRITE) { raw_cmd->flags |= FD_RAW_WRITE; COMMAND = FM_MODE(_floppy, FD_WRITE); } else { DPRINT("%s: unknown command\n", __func__); return 0; } max_sector = _floppy->sect * _floppy->head; TRACK = (int)blk_rq_pos(current_req) / max_sector; fsector_t = (int)blk_rq_pos(current_req) % max_sector; if (_floppy->track && TRACK >= _floppy->track) { if (blk_rq_cur_sectors(current_req) & 1) { current_count_sectors = 1; return 1; } else return 0; } HEAD = fsector_t / _floppy->sect; if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) || test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags)) && fsector_t < _floppy->sect) max_sector = _floppy->sect; /* 2M disks have phantom sectors on the first track */ if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)) { max_sector = 2 * _floppy->sect / 3; if (fsector_t >= max_sector) { current_count_sectors = min_t(int, _floppy->sect - fsector_t, blk_rq_sectors(current_req)); return 1; } SIZECODE = 2; } else SIZECODE = FD_SIZECODE(_floppy); raw_cmd->rate = _floppy->rate & 0x43; if ((_floppy->rate & FD_2M) && (TRACK || HEAD) && raw_cmd->rate == 2) raw_cmd->rate = 1; if (SIZECODE) SIZECODE2 = 0xff; else SIZECODE2 = 0x80; raw_cmd->track = TRACK << STRETCH(_floppy); DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, HEAD); GAP = _floppy->gap; ssize = DIV_ROUND_UP(1 << SIZECODE, 4); SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE; SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) + FD_SECTBASE(_floppy); /* tracksize describes the size which can be filled up with sectors * of size ssize. */ tracksize = _floppy->sect - _floppy->sect % ssize; if (tracksize < _floppy->sect) { SECT_PER_TRACK++; if (tracksize <= fsector_t % _floppy->sect) SECTOR--; /* if we are beyond tracksize, fill up using smaller sectors */ while (tracksize <= fsector_t % _floppy->sect) { while (tracksize + ssize > _floppy->sect) { SIZECODE--; ssize >>= 1; } SECTOR++; SECT_PER_TRACK++; tracksize += ssize; } max_sector = HEAD * _floppy->sect + tracksize; } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing) { max_sector = _floppy->sect; } else if (!HEAD && CT(COMMAND) == FD_WRITE) { /* for virtual DMA bug workaround */ max_sector = _floppy->sect; } in_sector_offset = (fsector_t % _floppy->sect) % ssize; aligned_sector_t = fsector_t - in_sector_offset; max_size = blk_rq_sectors(current_req); if ((raw_cmd->track == buffer_track) && (current_drive == buffer_drive) && (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { /* data already in track buffer */ if (CT(COMMAND) == FD_READ) { copy_buffer(1, max_sector, buffer_max); return 1; } } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) { if (CT(COMMAND) == FD_WRITE) { unsigned int sectors; sectors = fsector_t + blk_rq_sectors(current_req); if (sectors > ssize && sectors < ssize + ssize) max_size = ssize + ssize; else max_size = ssize; } raw_cmd->flags &= ~FD_RAW_WRITE; raw_cmd->flags |= FD_RAW_READ; COMMAND = FM_MODE(_floppy, FD_READ); } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) { unsigned long dma_limit; int direct, indirect; indirect = transfer_size(ssize, max_sector, max_buffer_sectors * 2) - fsector_t; /* * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide * on a 64 bit machine! */ max_size = buffer_chain_size(); dma_limit = (MAX_DMA_ADDRESS - ((unsigned long)current_req->buffer)) >> 9; if ((unsigned long)max_size > dma_limit) max_size = dma_limit; /* 64 kb boundaries */ if (CROSS_64KB(current_req->buffer, max_size << 9)) max_size = (K_64 - ((unsigned long)current_req->buffer) % K_64) >> 9; direct = transfer_size(ssize, max_sector, max_size) - fsector_t; /* * We try to read tracks, but if we get too many errors, we * go back to reading just one sector at a time. * * This means we should be able to read a sector even if there * are other bad sectors on this track. */ if (!direct || (indirect * 2 > direct * 3 && *errors < DP->max_errors.read_track && ((!probing || (DP->read_track & (1 << DRS->probed_format)))))) { max_size = blk_rq_sectors(current_req); } else { raw_cmd->kernel_data = current_req->buffer; raw_cmd->length = current_count_sectors << 9; if (raw_cmd->length == 0) { DPRINT("%s: zero dma transfer attempted\n", __func__); DPRINT("indirect=%d direct=%d fsector_t=%d\n", indirect, direct, fsector_t); return 0; } virtualdmabug_workaround(); return 2; } } if (CT(COMMAND) == FD_READ) max_size = max_sector; /* unbounded */ /* claim buffer track if needed */ if (buffer_track != raw_cmd->track || /* bad track */ buffer_drive != current_drive || /* bad drive */ fsector_t > buffer_max || fsector_t < buffer_min || ((CT(COMMAND) == FD_READ || (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) && max_sector > 2 * max_buffer_sectors + buffer_min && max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)) { /* not enough space */ buffer_track = -1; buffer_drive = current_drive; buffer_max = buffer_min = aligned_sector_t; } raw_cmd->kernel_data = floppy_track_buffer + ((aligned_sector_t - buffer_min) << 9); if (CT(COMMAND) == FD_WRITE) { /* copy write buffer to track buffer. * if we get here, we know that the write * is either aligned or the data already in the buffer * (buffer will be overwritten) */ if (in_sector_offset && buffer_track == -1) DPRINT("internal error offset !=0 on write\n"); buffer_track = raw_cmd->track; buffer_drive = current_drive; copy_buffer(ssize, max_sector, 2 * max_buffer_sectors + buffer_min); } else transfer_size(ssize, max_sector, 2 * max_buffer_sectors + buffer_min - aligned_sector_t); /* round up current_count_sectors to get dma xfer size */ raw_cmd->length = in_sector_offset + current_count_sectors; raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1; raw_cmd->length <<= 9; if ((raw_cmd->length < current_count_sectors << 9) || (raw_cmd->kernel_data != current_req->buffer && CT(COMMAND) == FD_WRITE && (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || aligned_sector_t < buffer_min)) || raw_cmd->length % (128 << SIZECODE) || raw_cmd->length <= 0 || current_count_sectors <= 0) { DPRINT("fractionary current count b=%lx s=%lx\n", raw_cmd->length, current_count_sectors); if (raw_cmd->kernel_data != current_req->buffer) pr_info("addr=%d, length=%ld\n", (int)((raw_cmd->kernel_data - floppy_track_buffer) >> 9), current_count_sectors); pr_info("st=%d ast=%d mse=%d msi=%d\n", fsector_t, aligned_sector_t, max_sector, max_size); pr_info("ssize=%x SIZECODE=%d\n", ssize, SIZECODE); pr_info("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n", COMMAND, SECTOR, HEAD, TRACK); pr_info("buffer drive=%d\n", buffer_drive); pr_info("buffer track=%d\n", buffer_track); pr_info("buffer_min=%d\n", buffer_min); pr_info("buffer_max=%d\n", buffer_max); return 0; } if (raw_cmd->kernel_data != current_req->buffer) { if (raw_cmd->kernel_data < floppy_track_buffer || current_count_sectors < 0 || raw_cmd->length < 0 || raw_cmd->kernel_data + raw_cmd->length > floppy_track_buffer + (max_buffer_sectors << 10)) { DPRINT("buffer overrun in schedule dma\n"); pr_info("fsector_t=%d buffer_min=%d current_count=%ld\n", fsector_t, buffer_min, raw_cmd->length >> 9); pr_info("current_count_sectors=%ld\n", current_count_sectors); if (CT(COMMAND) == FD_READ) pr_info("read\n"); if (CT(COMMAND) == FD_WRITE) pr_info("write\n"); return 0; } } else if (raw_cmd->length > blk_rq_bytes(current_req) || current_count_sectors > blk_rq_sectors(current_req)) { DPRINT("buffer overrun in direct transfer\n"); return 0; } else if (raw_cmd->length < current_count_sectors << 9) { DPRINT("more sectors than bytes\n"); pr_info("bytes=%ld\n", raw_cmd->length >> 9); pr_info("sectors=%ld\n", current_count_sectors); } if (raw_cmd->length == 0) { DPRINT("zero dma transfer attempted from make_raw_request\n"); return 0; } virtualdmabug_workaround(); return 2; } /* * Round-robin between our available drives, doing one request from each */ static int set_next_request(void) { struct request_queue *q; int old_pos = fdc_queue; do { q = disks[fdc_queue]->queue; if (++fdc_queue == N_DRIVE) fdc_queue = 0; if (q) { current_req = blk_fetch_request(q); if (current_req) break; } } while (fdc_queue != old_pos); return current_req != NULL; } static void redo_fd_request(void) { int drive; int tmp; lastredo = jiffies; if (current_drive < N_DRIVE) floppy_off(current_drive); do_request: if (!current_req) { int pending; spin_lock_irq(&floppy_lock); pending = set_next_request(); spin_unlock_irq(&floppy_lock); if (!pending) { do_floppy = NULL; unlock_fdc(); return; } } drive = (long)current_req->rq_disk->private_data; set_fdc(drive); reschedule_timeout(current_reqD, "redo fd request"); set_floppy(drive); raw_cmd = &default_raw_cmd; raw_cmd->flags = 0; if (start_motor(redo_fd_request)) return; disk_change(current_drive); if (test_bit(current_drive, &fake_change) || test_bit(FD_DISK_CHANGED_BIT, &DRS->flags)) { DPRINT("disk absent or changed during operation\n"); request_done(0); goto do_request; } if (!_floppy) { /* Autodetection */ if (!probing) { DRS->probed_format = 0; if (next_valid_format()) { DPRINT("no autodetectable formats\n"); _floppy = NULL; request_done(0); goto do_request; } } probing = 1; _floppy = floppy_type + DP->autodetect[DRS->probed_format]; } else probing = 0; errors = &(current_req->errors); tmp = make_raw_rw_request(); if (tmp < 2) { request_done(tmp); goto do_request; } if (test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags)) twaddle(); schedule_bh(floppy_start); debugt(__func__, "queue fd request"); return; } static const struct cont_t rw_cont = { .interrupt = rw_interrupt, .redo = redo_fd_request, .error = bad_flp_intr, .done = request_done }; static void process_fd_request(void) { cont = &rw_cont; schedule_bh(redo_fd_request); } static void do_fd_request(struct request_queue *q) { if (WARN(max_buffer_sectors == 0, "VFS: %s called on non-open device\n", __func__)) return; if (WARN(atomic_read(&usage_count) == 0, "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n", current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, (unsigned long long) current_req->cmd_flags)) return; if (test_and_set_bit(0, &fdc_busy)) { /* fdc busy, this new request will be treated when the current one is done */ is_alive(__func__, "old request running"); return; } command_status = FD_COMMAND_NONE; __reschedule_timeout(MAXTIMEOUT, "fd_request"); set_fdc(0); process_fd_request(); is_alive(__func__, ""); } static const struct cont_t poll_cont = { .interrupt = success_and_wakeup, .redo = floppy_ready, .error = generic_failure, .done = generic_done }; static int poll_drive(bool interruptible, int flag) { /* no auto-sense, just clear dcl */ raw_cmd = &default_raw_cmd; raw_cmd->flags = flag; raw_cmd->track = 0; raw_cmd->cmd_count = 0; cont = &poll_cont; debug_dcl(DP->flags, "setting NEWCHANGE in poll_drive\n"); set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags); return wait_til_done(floppy_ready, interruptible); } /* * User triggered reset * ==================== */ static void reset_intr(void) { pr_info("weird, reset interrupt called\n"); } static const struct cont_t reset_cont = { .interrupt = reset_intr, .redo = success_and_wakeup, .error = generic_failure, .done = generic_done }; static int user_reset_fdc(int drive, int arg, bool interruptible) { int ret; if (lock_fdc(drive, interruptible)) return -EINTR; if (arg == FD_RESET_ALWAYS) FDCS->reset = 1; if (FDCS->reset) { cont = &reset_cont; ret = wait_til_done(reset_fdc, interruptible); if (ret == -EINTR) return -EINTR; } process_fd_request(); return 0; } /* * Misc Ioctl's and support * ======================== */ static inline int fd_copyout(void __user *param, const void *address, unsigned long size) { return copy_to_user(param, address, size) ? -EFAULT : 0; } static inline int fd_copyin(void __user *param, void *address, unsigned long size) { return copy_from_user(address, param, size) ? -EFAULT : 0; } static const char *drive_name(int type, int drive) { struct floppy_struct *floppy; if (type) floppy = floppy_type + type; else { if (UDP->native_format) floppy = floppy_type + UDP->native_format; else return "(null)"; } if (floppy->name) return floppy->name; else return "(null)"; } /* raw commands */ static void raw_cmd_done(int flag) { int i; if (!flag) { raw_cmd->flags |= FD_RAW_FAILURE; raw_cmd->flags |= FD_RAW_HARDFAILURE; } else { raw_cmd->reply_count = inr; if (raw_cmd->reply_count > MAX_REPLIES) raw_cmd->reply_count = 0; for (i = 0; i < raw_cmd->reply_count; i++) raw_cmd->reply[i] = reply_buffer[i]; if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) { unsigned long flags; flags = claim_dma_lock(); raw_cmd->length = fd_get_dma_residue(); release_dma_lock(flags); } if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) && (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0))) raw_cmd->flags |= FD_RAW_FAILURE; if (disk_change(current_drive)) raw_cmd->flags |= FD_RAW_DISK_CHANGE; else raw_cmd->flags &= ~FD_RAW_DISK_CHANGE; if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER) motor_off_callback(current_drive); if (raw_cmd->next && (!(raw_cmd->flags & FD_RAW_FAILURE) || !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) && ((raw_cmd->flags & FD_RAW_FAILURE) || !(raw_cmd->flags & FD_RAW_STOP_IF_SUCCESS))) { raw_cmd = raw_cmd->next; return; } } generic_done(flag); } static const struct cont_t raw_cmd_cont = { .interrupt = success_and_wakeup, .redo = floppy_start, .error = generic_failure, .done = raw_cmd_done }; static int raw_cmd_copyout(int cmd, void __user *param, struct floppy_raw_cmd *ptr) { int ret; while (ptr) { ret = copy_to_user(param, ptr, sizeof(*ptr)); if (ret) return -EFAULT; param += sizeof(struct floppy_raw_cmd); if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) { if (ptr->length >= 0 && ptr->length <= ptr->buffer_length) { long length = ptr->buffer_length - ptr->length; ret = fd_copyout(ptr->data, ptr->kernel_data, length); if (ret) return ret; } } ptr = ptr->next; } return 0; } static void raw_cmd_free(struct floppy_raw_cmd **ptr) { struct floppy_raw_cmd *next; struct floppy_raw_cmd *this; this = *ptr; *ptr = NULL; while (this) { if (this->buffer_length) { fd_dma_mem_free((unsigned long)this->kernel_data, this->buffer_length); this->buffer_length = 0; } next = this->next; kfree(this); this = next; } } static int raw_cmd_copyin(int cmd, void __user *param, struct floppy_raw_cmd **rcmd) { struct floppy_raw_cmd *ptr; int ret; int i; *rcmd = NULL; loop: ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER); if (!ptr) return -ENOMEM; *rcmd = ptr; ret = copy_from_user(ptr, param, sizeof(*ptr)); if (ret) return -EFAULT; ptr->next = NULL; ptr->buffer_length = 0; param += sizeof(struct floppy_raw_cmd); if (ptr->cmd_count > 33) /* the command may now also take up the space * initially intended for the reply & the * reply count. Needed for long 82078 commands * such as RESTORE, which takes ... 17 command * bytes. Murphy's law #137: When you reserve * 16 bytes for a structure, you'll one day * discover that you really need 17... */ return -EINVAL; for (i = 0; i < 16; i++) ptr->reply[i] = 0; ptr->resultcode = 0; ptr->kernel_data = NULL; if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { if (ptr->length <= 0) return -EINVAL; ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length); fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length); if (!ptr->kernel_data) return -ENOMEM; ptr->buffer_length = ptr->length; } if (ptr->flags & FD_RAW_WRITE) { ret = fd_copyin(ptr->data, ptr->kernel_data, ptr->length); if (ret) return ret; } if (ptr->flags & FD_RAW_MORE) { rcmd = &(ptr->next); ptr->rate &= 0x43; goto loop; } return 0; } static int raw_cmd_ioctl(int cmd, void __user *param) { struct floppy_raw_cmd *my_raw_cmd; int drive; int ret2; int ret; if (FDCS->rawcmd <= 1) FDCS->rawcmd = 1; for (drive = 0; drive < N_DRIVE; drive++) { if (FDC(drive) != fdc) continue; if (drive == current_drive) { if (UDRS->fd_ref > 1) { FDCS->rawcmd = 2; break; } } else if (UDRS->fd_ref) { FDCS->rawcmd = 2; break; } } if (FDCS->reset) return -EIO; ret = raw_cmd_copyin(cmd, param, &my_raw_cmd); if (ret) { raw_cmd_free(&my_raw_cmd); return ret; } raw_cmd = my_raw_cmd; cont = &raw_cmd_cont; ret = wait_til_done(floppy_start, true); debug_dcl(DP->flags, "calling disk change from raw_cmd ioctl\n"); if (ret != -EINTR && FDCS->reset) ret = -EIO; DRS->track = NO_TRACK; ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd); if (!ret) ret = ret2; raw_cmd_free(&my_raw_cmd); return ret; } static int invalidate_drive(struct block_device *bdev) { /* invalidate the buffer track to force a reread */ set_bit((long)bdev->bd_disk->private_data, &fake_change); process_fd_request(); check_disk_change(bdev); return 0; } static int set_geometry(unsigned int cmd, struct floppy_struct *g, int drive, int type, struct block_device *bdev) { int cnt; /* sanity checking for parameters. */ if (g->sect <= 0 || g->head <= 0 || g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) || /* check if reserved bits are set */ (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0) return -EINVAL; if (type) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&open_lock); if (lock_fdc(drive, true)) { mutex_unlock(&open_lock); return -EINTR; } floppy_type[type] = *g; floppy_type[type].name = "user format"; for (cnt = type << 2; cnt < (type << 2) + 4; cnt++) floppy_sizes[cnt] = floppy_sizes[cnt + 0x80] = floppy_type[type].size + 1; process_fd_request(); for (cnt = 0; cnt < N_DRIVE; cnt++) { struct block_device *bdev = opened_bdev[cnt]; if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) continue; __invalidate_device(bdev, true); } mutex_unlock(&open_lock); } else { int oldStretch; if (lock_fdc(drive, true)) return -EINTR; if (cmd != FDDEFPRM) { /* notice a disk change immediately, else * we lose our settings immediately*/ if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; } oldStretch = g->stretch; user_params[drive] = *g; if (buffer_drive == drive) SUPBOUND(buffer_max, user_params[drive].sect); current_type[drive] = &user_params[drive]; floppy_sizes[drive] = user_params[drive].size; if (cmd == FDDEFPRM) DRS->keep_data = -1; else DRS->keep_data = 1; /* invalidation. Invalidate only when needed, i.e. * when there are already sectors in the buffer cache * whose number will change. This is useful, because * mtools often changes the geometry of the disk after * looking at the boot block */ if (DRS->maxblock > user_params[drive].sect || DRS->maxtrack || ((user_params[drive].sect ^ oldStretch) & (FD_SWAPSIDES | FD_SECTBASEMASK))) invalidate_drive(bdev); else process_fd_request(); } return 0; } /* handle obsolete ioctl's */ static unsigned int ioctl_table[] = { FDCLRPRM, FDSETPRM, FDDEFPRM, FDGETPRM, FDMSGON, FDMSGOFF, FDFMTBEG, FDFMTTRK, FDFMTEND, FDSETEMSGTRESH, FDFLUSH, FDSETMAXERRS, FDGETMAXERRS, FDGETDRVTYP, FDSETDRVPRM, FDGETDRVPRM, FDGETDRVSTAT, FDPOLLDRVSTAT, FDRESET, FDGETFDCSTAT, FDWERRORCLR, FDWERRORGET, FDRAWCMD, FDEJECT, FDTWADDLE }; static int normalize_ioctl(unsigned int *cmd, int *size) { int i; for (i = 0; i < ARRAY_SIZE(ioctl_table); i++) { if ((*cmd & 0xffff) == (ioctl_table[i] & 0xffff)) { *size = _IOC_SIZE(*cmd); *cmd = ioctl_table[i]; if (*size > _IOC_SIZE(*cmd)) { pr_info("ioctl not yet supported\n"); return -EFAULT; } return 0; } } return -EINVAL; } static int get_floppy_geometry(int drive, int type, struct floppy_struct **g) { if (type) *g = &floppy_type[type]; else { if (lock_fdc(drive, false)) return -EINTR; if (poll_drive(false, 0) == -EINTR) return -EINTR; process_fd_request(); *g = current_type[drive]; } if (!*g) return -ENODEV; return 0; } static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { int drive = (long)bdev->bd_disk->private_data; int type = ITYPE(drive_state[drive].fd_device); struct floppy_struct *g; int ret; ret = get_floppy_geometry(drive, type, &g); if (ret) return ret; geo->heads = g->head; geo->sectors = g->sect; geo->cylinders = g->track; return 0; } static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param) { int drive = (long)bdev->bd_disk->private_data; int type = ITYPE(UDRS->fd_device); int i; int ret; int size; union inparam { struct floppy_struct g; /* geometry */ struct format_descr f; struct floppy_max_errors max_errors; struct floppy_drive_params dp; } inparam; /* parameters coming from user space */ const void *outparam; /* parameters passed back to user space */ /* convert compatibility eject ioctls into floppy eject ioctl. * We do this in order to provide a means to eject floppy disks before * installing the new fdutils package */ if (cmd == CDROMEJECT || /* CD-ROM eject */ cmd == 0x6470) { /* SunOS floppy eject */ DPRINT("obsolete eject ioctl\n"); DPRINT("please use floppycontrol --eject\n"); cmd = FDEJECT; } if (!((cmd & 0xff00) == 0x0200)) return -EINVAL; /* convert the old style command into a new style command */ ret = normalize_ioctl(&cmd, &size); if (ret) return ret; /* permission checks */ if (((cmd & 0x40) && !(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))) || ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))) return -EPERM; if (WARN_ON(size < 0 || size > sizeof(inparam))) return -EINVAL; /* copyin */ memset(&inparam, 0, sizeof(inparam)); if (_IOC_DIR(cmd) & _IOC_WRITE) { ret = fd_copyin((void __user *)param, &inparam, size); if (ret) return ret; } switch (cmd) { case FDEJECT: if (UDRS->fd_ref != 1) /* somebody else has this drive open */ return -EBUSY; if (lock_fdc(drive, true)) return -EINTR; /* do the actual eject. Fails on * non-Sparc architectures */ ret = fd_eject(UNIT(drive)); set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); set_bit(FD_VERIFY_BIT, &UDRS->flags); process_fd_request(); return ret; case FDCLRPRM: if (lock_fdc(drive, true)) return -EINTR; current_type[drive] = NULL; floppy_sizes[drive] = MAX_DISK_SIZE << 1; UDRS->keep_data = 0; return invalidate_drive(bdev); case FDSETPRM: case FDDEFPRM: return set_geometry(cmd, &inparam.g, drive, type, bdev); case FDGETPRM: ret = get_floppy_geometry(drive, type, (struct floppy_struct **)&outparam); if (ret) return ret; break; case FDMSGON: UDP->flags |= FTD_MSG; return 0; case FDMSGOFF: UDP->flags &= ~FTD_MSG; return 0; case FDFMTBEG: if (lock_fdc(drive, true)) return -EINTR; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; ret = UDRS->flags; process_fd_request(); if (ret & FD_VERIFY) return -ENODEV; if (!(ret & FD_DISK_WRITABLE)) return -EROFS; return 0; case FDFMTTRK: if (UDRS->fd_ref != 1) return -EBUSY; return do_format(drive, &inparam.f); case FDFMTEND: case FDFLUSH: if (lock_fdc(drive, true)) return -EINTR; return invalidate_drive(bdev); case FDSETEMSGTRESH: UDP->max_errors.reporting = (unsigned short)(param & 0x0f); return 0; case FDGETMAXERRS: outparam = &UDP->max_errors; break; case FDSETMAXERRS: UDP->max_errors = inparam.max_errors; break; case FDGETDRVTYP: outparam = drive_name(type, drive); SUPBOUND(size, strlen((const char *)outparam) + 1); break; case FDSETDRVPRM: *UDP = inparam.dp; break; case FDGETDRVPRM: outparam = UDP; break; case FDPOLLDRVSTAT: if (lock_fdc(drive, true)) return -EINTR; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; process_fd_request(); /* fall through */ case FDGETDRVSTAT: outparam = UDRS; break; case FDRESET: return user_reset_fdc(drive, (int)param, true); case FDGETFDCSTAT: outparam = UFDCS; break; case FDWERRORCLR: memset(UDRWE, 0, sizeof(*UDRWE)); return 0; case FDWERRORGET: outparam = UDRWE; break; case FDRAWCMD: if (type) return -EINVAL; if (lock_fdc(drive, true)) return -EINTR; set_floppy(drive); i = raw_cmd_ioctl(cmd, (void __user *)param); if (i == -EINTR) return -EINTR; process_fd_request(); return i; case FDTWADDLE: if (lock_fdc(drive, true)) return -EINTR; twaddle(); process_fd_request(); return 0; default: return -EINVAL; } if (_IOC_DIR(cmd) & _IOC_READ) return fd_copyout((void __user *)param, outparam, size); return 0; } static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param) { int ret; mutex_lock(&floppy_mutex); ret = fd_locked_ioctl(bdev, mode, cmd, param); mutex_unlock(&floppy_mutex); return ret; } static void __init config_types(void) { bool has_drive = false; int drive; /* read drive info out of physical CMOS */ drive = 0; if (!UDP->cmos) UDP->cmos = FLOPPY0_TYPE; drive = 1; if (!UDP->cmos && FLOPPY1_TYPE) UDP->cmos = FLOPPY1_TYPE; /* FIXME: additional physical CMOS drive detection should go here */ for (drive = 0; drive < N_DRIVE; drive++) { unsigned int type = UDP->cmos; struct floppy_drive_params *params; const char *name = NULL; static char temparea[32]; if (type < ARRAY_SIZE(default_drive_params)) { params = &default_drive_params[type].params; if (type) { name = default_drive_params[type].name; allowed_drive_mask |= 1 << drive; } else allowed_drive_mask &= ~(1 << drive); } else { params = &default_drive_params[0].params; sprintf(temparea, "unknown type %d (usb?)", type); name = temparea; } if (name) { const char *prepend; if (!has_drive) { prepend = ""; has_drive = true; pr_info("Floppy drive(s):"); } else { prepend = ","; } pr_cont("%s fd%d is %s", prepend, drive, name); } *UDP = *params; } if (has_drive) pr_cont("\n"); } static void floppy_release(struct gendisk *disk, fmode_t mode) { int drive = (long)disk->private_data; mutex_lock(&floppy_mutex); mutex_lock(&open_lock); if (!UDRS->fd_ref--) { DPRINT("floppy_release with fd_ref == 0"); UDRS->fd_ref = 0; } if (!UDRS->fd_ref) opened_bdev[drive] = NULL; mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); } /* * floppy_open check for aliasing (/dev/fd0 can be the same as * /dev/PS0 etc), and disallows simultaneous access to the same * drive with different device numbers. */ static int floppy_open(struct block_device *bdev, fmode_t mode) { int drive = (long)bdev->bd_disk->private_data; int old_dev, new_dev; int try; int res = -EBUSY; char *tmp; mutex_lock(&floppy_mutex); mutex_lock(&open_lock); old_dev = UDRS->fd_device; if (opened_bdev[drive] && opened_bdev[drive] != bdev) goto out2; if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) { set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); set_bit(FD_VERIFY_BIT, &UDRS->flags); } UDRS->fd_ref++; opened_bdev[drive] = bdev; res = -ENXIO; if (!floppy_track_buffer) { /* if opening an ED drive, reserve a big buffer, * else reserve a small one */ if ((UDP->cmos == 6) || (UDP->cmos == 5)) try = 64; /* Only 48 actually useful */ else try = 32; /* Only 24 actually useful */ tmp = (char *)fd_dma_mem_alloc(1024 * try); if (!tmp && !floppy_track_buffer) { try >>= 1; /* buffer only one side */ INFBOUND(try, 16); tmp = (char *)fd_dma_mem_alloc(1024 * try); } if (!tmp && !floppy_track_buffer) fallback_on_nodma_alloc(&tmp, 2048 * try); if (!tmp && !floppy_track_buffer) { DPRINT("Unable to allocate DMA memory\n"); goto out; } if (floppy_track_buffer) { if (tmp) fd_dma_mem_free((unsigned long)tmp, try * 1024); } else { buffer_min = buffer_max = -1; floppy_track_buffer = tmp; max_buffer_sectors = try; } } new_dev = MINOR(bdev->bd_dev); UDRS->fd_device = new_dev; set_capacity(disks[drive], floppy_sizes[new_dev]); if (old_dev != -1 && old_dev != new_dev) { if (buffer_drive == drive) buffer_track = -1; } if (UFDCS->rawcmd == 1) UFDCS->rawcmd = 2; if (!(mode & FMODE_NDELAY)) { if (mode & (FMODE_READ|FMODE_WRITE)) { UDRS->last_checked = 0; clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); check_disk_change(bdev); if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) goto out; if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) goto out; } res = -EROFS; if ((mode & FMODE_WRITE) && !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) goto out; } mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); return 0; out: UDRS->fd_ref--; if (!UDRS->fd_ref) opened_bdev[drive] = NULL; out2: mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); return res; } /* * Check if the disk has been changed or if a change has been faked. */ static unsigned int floppy_check_events(struct gendisk *disk, unsigned int clearing) { int drive = (long)disk->private_data; if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags)) return DISK_EVENT_MEDIA_CHANGE; if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { lock_fdc(drive, false); poll_drive(false, 0); process_fd_request(); } if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags) || test_bit(drive, &fake_change) || drive_no_geom(drive)) return DISK_EVENT_MEDIA_CHANGE; return 0; } /* * This implements "read block 0" for floppy_revalidate(). * Needed for format autodetection, checking whether there is * a disk in the drive, and whether that disk is writable. */ struct rb0_cbdata { int drive; struct completion complete; }; static void floppy_rb0_cb(struct bio *bio, int err) { struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; int drive = cbdata->drive; if (err) { pr_info("floppy: error %d while reading block 0", err); set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); } complete(&cbdata->complete); } static int __floppy_read_block_0(struct block_device *bdev, int drive) { struct bio bio; struct bio_vec bio_vec; struct page *page; struct rb0_cbdata cbdata; size_t size; page = alloc_page(GFP_NOIO); if (!page) { process_fd_request(); return -ENOMEM; } size = bdev->bd_block_size; if (!size) size = 1024; cbdata.drive = drive; bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = size; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_iter.bi_size = size; bio.bi_bdev = bdev; bio.bi_iter.bi_sector = 0; bio.bi_flags = (1 << BIO_QUIET); bio.bi_private = &cbdata; bio.bi_end_io = floppy_rb0_cb; submit_bio(READ, &bio); process_fd_request(); init_completion(&cbdata.complete); wait_for_completion(&cbdata.complete); __free_page(page); return 0; } /* revalidate the floppy disk, i.e. trigger format autodetection by reading * the bootblock (block 0). "Autodetection" is also needed to check whether * there is a disk in the drive at all... Thus we also do it for fixed * geometry formats */ static int floppy_revalidate(struct gendisk *disk) { int drive = (long)disk->private_data; int cf; int res = 0; if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags) || test_bit(drive, &fake_change) || drive_no_geom(drive)) { if (WARN(atomic_read(&usage_count) == 0, "VFS: revalidate called on non-open device.\n")) return -EFAULT; lock_fdc(drive, false); cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags)); if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { process_fd_request(); /*already done by another thread */ return 0; } UDRS->maxblock = 0; UDRS->maxtrack = 0; if (buffer_drive == drive) buffer_track = -1; clear_bit(drive, &fake_change); clear_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); if (cf) UDRS->generation++; if (drive_no_geom(drive)) { /* auto-sensing */ res = __floppy_read_block_0(opened_bdev[drive], drive); } else { if (cf) poll_drive(false, FD_RAW_NEED_DISK); process_fd_request(); } } set_capacity(disk, floppy_sizes[UDRS->fd_device]); return res; } static const struct block_device_operations floppy_fops = { .owner = THIS_MODULE, .open = floppy_open, .release = floppy_release, .ioctl = fd_ioctl, .getgeo = fd_getgeo, .check_events = floppy_check_events, .revalidate_disk = floppy_revalidate, }; /* * Floppy Driver initialization * ============================= */ /* Determine the floppy disk controller type */ /* This routine was written by David C. Niemi */ static char __init get_fdc_version(void) { int r; output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */ if (FDCS->reset) return FDC_NONE; r = result(); if (r <= 0x00) return FDC_NONE; /* No FDC present ??? */ if ((r == 1) && (reply_buffer[0] == 0x80)) { pr_info("FDC %d is an 8272A\n", fdc); return FDC_8272A; /* 8272a/765 don't know DUMPREGS */ } if (r != 10) { pr_info("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n", fdc, r); return FDC_UNKNOWN; } if (!fdc_configure()) { pr_info("FDC %d is an 82072\n", fdc); return FDC_82072; /* 82072 doesn't know CONFIGURE */ } output_byte(FD_PERPENDICULAR); if (need_more_output() == MORE_OUTPUT) { output_byte(0); } else { pr_info("FDC %d is an 82072A\n", fdc); return FDC_82072A; /* 82072A as found on Sparcs. */ } output_byte(FD_UNLOCK); r = result(); if ((r == 1) && (reply_buffer[0] == 0x80)) { pr_info("FDC %d is a pre-1991 82077\n", fdc); return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know * LOCK/UNLOCK */ } if ((r != 1) || (reply_buffer[0] != 0x00)) { pr_info("FDC %d init: UNLOCK: unexpected return of %d bytes.\n", fdc, r); return FDC_UNKNOWN; } output_byte(FD_PARTID); r = result(); if (r != 1) { pr_info("FDC %d init: PARTID: unexpected return of %d bytes.\n", fdc, r); return FDC_UNKNOWN; } if (reply_buffer[0] == 0x80) { pr_info("FDC %d is a post-1991 82077\n", fdc); return FDC_82077; /* Revised 82077AA passes all the tests */ } switch (reply_buffer[0] >> 5) { case 0x0: /* Either a 82078-1 or a 82078SL running at 5Volt */ pr_info("FDC %d is an 82078.\n", fdc); return FDC_82078; case 0x1: pr_info("FDC %d is a 44pin 82078\n", fdc); return FDC_82078; case 0x2: pr_info("FDC %d is a S82078B\n", fdc); return FDC_S82078B; case 0x3: pr_info("FDC %d is a National Semiconductor PC87306\n", fdc); return FDC_87306; default: pr_info("FDC %d init: 82078 variant with unknown PARTID=%d.\n", fdc, reply_buffer[0] >> 5); return FDC_82078_UNKN; } } /* get_fdc_version */ /* lilo configuration */ static void __init floppy_set_flags(int *ints, int param, int param2) { int i; for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) { if (param) default_drive_params[i].params.flags |= param2; else default_drive_params[i].params.flags &= ~param2; } DPRINT("%s flag 0x%x\n", param2 ? "Setting" : "Clearing", param); } static void __init daring(int *ints, int param, int param2) { int i; for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) { if (param) { default_drive_params[i].params.select_delay = 0; default_drive_params[i].params.flags |= FD_SILENT_DCL_CLEAR; } else { default_drive_params[i].params.select_delay = 2 * HZ / 100; default_drive_params[i].params.flags &= ~FD_SILENT_DCL_CLEAR; } } DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken"); } static void __init set_cmos(int *ints, int dummy, int dummy2) { int current_drive = 0; if (ints[0] != 2) { DPRINT("wrong number of parameters for CMOS\n"); return; } current_drive = ints[1]; if (current_drive < 0 || current_drive >= 8) { DPRINT("bad drive for set_cmos\n"); return; } #if N_FDC > 1 if (current_drive >= 4 && !FDC2) FDC2 = 0x370; #endif DP->cmos = ints[2]; DPRINT("setting CMOS code to %d\n", ints[2]); } static struct param_table { const char *name; void (*fn) (int *ints, int param, int param2); int *var; int def_param; int param2; } config_params[] __initdata = { {"allowed_drive_mask", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */ {"all_drives", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */ {"asus_pci", NULL, &allowed_drive_mask, 0x33, 0}, {"irq", NULL, &FLOPPY_IRQ, 6, 0}, {"dma", NULL, &FLOPPY_DMA, 2, 0}, {"daring", daring, NULL, 1, 0}, #if N_FDC > 1 {"two_fdc", NULL, &FDC2, 0x370, 0}, {"one_fdc", NULL, &FDC2, 0, 0}, #endif {"thinkpad", floppy_set_flags, NULL, 1, FD_INVERTED_DCL}, {"broken_dcl", floppy_set_flags, NULL, 1, FD_BROKEN_DCL}, {"messages", floppy_set_flags, NULL, 1, FTD_MSG}, {"silent_dcl_clear", floppy_set_flags, NULL, 1, FD_SILENT_DCL_CLEAR}, {"debug", floppy_set_flags, NULL, 1, FD_DEBUG}, {"nodma", NULL, &can_use_virtual_dma, 1, 0}, {"omnibook", NULL, &can_use_virtual_dma, 1, 0}, {"yesdma", NULL, &can_use_virtual_dma, 0, 0}, {"fifo_depth", NULL, &fifo_depth, 0xa, 0}, {"nofifo", NULL, &no_fifo, 0x20, 0}, {"usefifo", NULL, &no_fifo, 0, 0}, {"cmos", set_cmos, NULL, 0, 0}, {"slow", NULL, &slow_floppy, 1, 0}, {"unexpected_interrupts", NULL, &print_unex, 1, 0}, {"no_unexpected_interrupts", NULL, &print_unex, 0, 0}, {"L40SX", NULL, &print_unex, 0, 0} EXTRA_FLOPPY_PARAMS }; static int __init floppy_setup(char *str) { int i; int param; int ints[11]; str = get_options(str, ARRAY_SIZE(ints), ints); if (str) { for (i = 0; i < ARRAY_SIZE(config_params); i++) { if (strcmp(str, config_params[i].name) == 0) { if (ints[0]) param = ints[1]; else param = config_params[i].def_param; if (config_params[i].fn) config_params[i].fn(ints, param, config_params[i]. param2); if (config_params[i].var) { DPRINT("%s=%d\n", str, param); *config_params[i].var = param; } return 1; } } } if (str) { DPRINT("unknown floppy option [%s]\n", str); DPRINT("allowed options are:"); for (i = 0; i < ARRAY_SIZE(config_params); i++) pr_cont(" %s", config_params[i].name); pr_cont("\n"); } else DPRINT("botched floppy option\n"); DPRINT("Read Documentation/blockdev/floppy.txt\n"); return 0; } static int have_no_fdc = -ENODEV; static ssize_t floppy_cmos_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *p = to_platform_device(dev); int drive; drive = p->id; return sprintf(buf, "%X\n", UDP->cmos); } static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL); static void floppy_device_release(struct device *dev) { } static int floppy_resume(struct device *dev) { int fdc; for (fdc = 0; fdc < N_FDC; fdc++) if (FDCS->address != -1) user_reset_fdc(-1, FD_RESET_ALWAYS, false); return 0; } static const struct dev_pm_ops floppy_pm_ops = { .resume = floppy_resume, .restore = floppy_resume, }; static struct platform_driver floppy_driver = { .driver = { .name = "floppy", .pm = &floppy_pm_ops, }, }; static struct platform_device floppy_device[N_DRIVE]; static bool floppy_available(int drive) { if (!(allowed_drive_mask & (1 << drive))) return false; if (fdc_state[FDC(drive)].version == FDC_NONE) return false; return true; } static struct kobject *floppy_find(dev_t dev, int *part, void *data) { int drive = (*part & 3) | ((*part & 0x80) >> 5); if (drive >= N_DRIVE || !floppy_available(drive)) return NULL; if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) return NULL; *part = 0; return get_disk(disks[drive]); } static int __init do_floppy_init(void) { int i, unit, drive, err; set_debugt(); interruptjiffies = resultjiffies = jiffies; #if defined(CONFIG_PPC) if (check_legacy_ioport(FDC1)) return -ENODEV; #endif raw_cmd = NULL; floppy_wq = alloc_ordered_workqueue("floppy", 0); if (!floppy_wq) return -ENOMEM; for (drive = 0; drive < N_DRIVE; drive++) { disks[drive] = alloc_disk(1); if (!disks[drive]) { err = -ENOMEM; goto out_put_disk; } disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock); if (!disks[drive]->queue) { err = -ENOMEM; goto out_put_disk; } blk_queue_max_hw_sectors(disks[drive]->queue, 64); disks[drive]->major = FLOPPY_MAJOR; disks[drive]->first_minor = TOMINOR(drive); disks[drive]->fops = &floppy_fops; sprintf(disks[drive]->disk_name, "fd%d", drive); init_timer(&motor_off_timer[drive]); motor_off_timer[drive].data = drive; motor_off_timer[drive].function = motor_off_callback; } err = register_blkdev(FLOPPY_MAJOR, "fd"); if (err) goto out_put_disk; err = platform_driver_register(&floppy_driver); if (err) goto out_unreg_blkdev; blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, floppy_find, NULL, NULL); for (i = 0; i < 256; i++) if (ITYPE(i)) floppy_sizes[i] = floppy_type[ITYPE(i)].size; else floppy_sizes[i] = MAX_DISK_SIZE << 1; reschedule_timeout(MAXTIMEOUT, "floppy init"); config_types(); for (i = 0; i < N_FDC; i++) { fdc = i; memset(FDCS, 0, sizeof(*FDCS)); FDCS->dtr = -1; FDCS->dor = 0x4; #if defined(__sparc__) || defined(__mc68000__) /*sparcs/sun3x don't have a DOR reset which we can fall back on to */ #ifdef __mc68000__ if (MACH_IS_SUN3X) #endif FDCS->version = FDC_82072A; #endif } use_virtual_dma = can_use_virtual_dma & 1; fdc_state[0].address = FDC1; if (fdc_state[0].address == -1) { cancel_delayed_work(&fd_timeout); err = -ENODEV; goto out_unreg_region; } #if N_FDC > 1 fdc_state[1].address = FDC2; #endif fdc = 0; /* reset fdc in case of unexpected interrupt */ err = floppy_grab_irq_and_dma(); if (err) { cancel_delayed_work(&fd_timeout); err = -EBUSY; goto out_unreg_region; } /* initialise drive state */ for (drive = 0; drive < N_DRIVE; drive++) { memset(UDRS, 0, sizeof(*UDRS)); memset(UDRWE, 0, sizeof(*UDRWE)); set_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags); set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags); set_bit(FD_VERIFY_BIT, &UDRS->flags); UDRS->fd_device = -1; floppy_track_buffer = NULL; max_buffer_sectors = 0; } /* * Small 10 msec delay to let through any interrupt that * initialization might have triggered, to not * confuse detection: */ msleep(10); for (i = 0; i < N_FDC; i++) { fdc = i; FDCS->driver_version = FD_DRIVER_VERSION; for (unit = 0; unit < 4; unit++) FDCS->track[unit] = 0; if (FDCS->address == -1) continue; FDCS->rawcmd = 2; if (user_reset_fdc(-1, FD_RESET_ALWAYS, false)) { /* free ioports reserved by floppy_grab_irq_and_dma() */ floppy_release_regions(fdc); FDCS->address = -1; FDCS->version = FDC_NONE; continue; } /* Try to determine the floppy controller type */ FDCS->version = get_fdc_version(); if (FDCS->version == FDC_NONE) { /* free ioports reserved by floppy_grab_irq_and_dma() */ floppy_release_regions(fdc); FDCS->address = -1; continue; } if (can_use_virtual_dma == 2 && FDCS->version < FDC_82072A) can_use_virtual_dma = 0; have_no_fdc = 0; /* Not all FDCs seem to be able to handle the version command * properly, so force a reset for the standard FDC clones, * to avoid interrupt garbage. */ user_reset_fdc(-1, FD_RESET_ALWAYS, false); } fdc = 0; cancel_delayed_work(&fd_timeout); current_drive = 0; initialized = true; if (have_no_fdc) { DPRINT("no floppy controllers found\n"); err = have_no_fdc; goto out_release_dma; } for (drive = 0; drive < N_DRIVE; drive++) { if (!floppy_available(drive)) continue; floppy_device[drive].name = floppy_device_name; floppy_device[drive].id = drive; floppy_device[drive].dev.release = floppy_device_release; err = platform_device_register(&floppy_device[drive]); if (err) goto out_remove_drives; err = device_create_file(&floppy_device[drive].dev, &dev_attr_cmos); if (err) goto out_unreg_platform_dev; /* to be cleaned up... */ disks[drive]->private_data = (void *)(long)drive; disks[drive]->flags |= GENHD_FL_REMOVABLE; disks[drive]->driverfs_dev = &floppy_device[drive].dev; add_disk(disks[drive]); } return 0; out_unreg_platform_dev: platform_device_unregister(&floppy_device[drive]); out_remove_drives: while (drive--) { if (floppy_available(drive)) { del_gendisk(disks[drive]); device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); platform_device_unregister(&floppy_device[drive]); } } out_release_dma: if (atomic_read(&usage_count)) floppy_release_irq_and_dma(); out_unreg_region: blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); platform_driver_unregister(&floppy_driver); out_unreg_blkdev: unregister_blkdev(FLOPPY_MAJOR, "fd"); out_put_disk: destroy_workqueue(floppy_wq); for (drive = 0; drive < N_DRIVE; drive++) { if (!disks[drive]) break; if (disks[drive]->queue) { del_timer_sync(&motor_off_timer[drive]); blk_cleanup_queue(disks[drive]->queue); disks[drive]->queue = NULL; } put_disk(disks[drive]); } return err; } #ifndef MODULE static __init void floppy_async_init(void *data, async_cookie_t cookie) { do_floppy_init(); } #endif static int __init floppy_init(void) { #ifdef MODULE return do_floppy_init(); #else /* Don't hold up the bootup by the floppy initialization */ async_schedule(floppy_async_init, NULL); return 0; #endif } static const struct io_region { int offset; int size; } io_regions[] = { { 2, 1 }, /* address + 3 is sometimes reserved by pnp bios for motherboard */ { 4, 2 }, /* address + 6 is reserved, and may be taken by IDE. * Unfortunately, Adaptec doesn't know this :-(, */ { 7, 1 }, }; static void floppy_release_allocated_regions(int fdc, const struct io_region *p) { while (p != io_regions) { p--; release_region(FDCS->address + p->offset, p->size); } } #define ARRAY_END(X) (&((X)[ARRAY_SIZE(X)])) static int floppy_request_regions(int fdc) { const struct io_region *p; for (p = io_regions; p < ARRAY_END(io_regions); p++) { if (!request_region(FDCS->address + p->offset, p->size, "floppy")) { DPRINT("Floppy io-port 0x%04lx in use\n", FDCS->address + p->offset); floppy_release_allocated_regions(fdc, p); return -EBUSY; } } return 0; } static void floppy_release_regions(int fdc) { floppy_release_allocated_regions(fdc, ARRAY_END(io_regions)); } static int floppy_grab_irq_and_dma(void) { if (atomic_inc_return(&usage_count) > 1) return 0; /* * We might have scheduled a free_irq(), wait it to * drain first: */ flush_workqueue(floppy_wq); if (fd_request_irq()) { DPRINT("Unable to grab IRQ%d for the floppy driver\n", FLOPPY_IRQ); atomic_dec(&usage_count); return -1; } if (fd_request_dma()) { DPRINT("Unable to grab DMA%d for the floppy driver\n", FLOPPY_DMA); if (can_use_virtual_dma & 2) use_virtual_dma = can_use_virtual_dma = 1; if (!(can_use_virtual_dma & 1)) { fd_free_irq(); atomic_dec(&usage_count); return -1; } } for (fdc = 0; fdc < N_FDC; fdc++) { if (FDCS->address != -1) { if (floppy_request_regions(fdc)) goto cleanup; } } for (fdc = 0; fdc < N_FDC; fdc++) { if (FDCS->address != -1) { reset_fdc_info(1); fd_outb(FDCS->dor, FD_DOR); } } fdc = 0; set_dor(0, ~0, 8); /* avoid immediate interrupt */ for (fdc = 0; fdc < N_FDC; fdc++) if (FDCS->address != -1) fd_outb(FDCS->dor, FD_DOR); /* * The driver will try and free resources and relies on us * to know if they were allocated or not. */ fdc = 0; irqdma_allocated = 1; return 0; cleanup: fd_free_irq(); fd_free_dma(); while (--fdc >= 0) floppy_release_regions(fdc); atomic_dec(&usage_count); return -1; } static void floppy_release_irq_and_dma(void) { int old_fdc; #ifndef __sparc__ int drive; #endif long tmpsize; unsigned long tmpaddr; if (!atomic_dec_and_test(&usage_count)) return; if (irqdma_allocated) { fd_disable_dma(); fd_free_dma(); fd_free_irq(); irqdma_allocated = 0; } set_dor(0, ~0, 8); #if N_FDC > 1 set_dor(1, ~8, 0); #endif if (floppy_track_buffer && max_buffer_sectors) { tmpsize = max_buffer_sectors * 1024; tmpaddr = (unsigned long)floppy_track_buffer; floppy_track_buffer = NULL; max_buffer_sectors = 0; buffer_min = buffer_max = -1; fd_dma_mem_free(tmpaddr, tmpsize); } #ifndef __sparc__ for (drive = 0; drive < N_FDC * 4; drive++) if (timer_pending(motor_off_timer + drive)) pr_info("motor off timer %d still active\n", drive); #endif if (delayed_work_pending(&fd_timeout)) pr_info("floppy timer still active:%s\n", timeout_message); if (delayed_work_pending(&fd_timer)) pr_info("auxiliary floppy timer still active\n"); if (work_pending(&floppy_work)) pr_info("work still pending\n"); old_fdc = fdc; for (fdc = 0; fdc < N_FDC; fdc++) if (FDCS->address != -1) floppy_release_regions(fdc); fdc = old_fdc; } #ifdef MODULE static char *floppy; static void __init parse_floppy_cfg_string(char *cfg) { char *ptr; while (*cfg) { ptr = cfg; while (*cfg && *cfg != ' ' && *cfg != '\t') cfg++; if (*cfg) { *cfg = '\0'; cfg++; } if (*ptr) floppy_setup(ptr); } } static int __init floppy_module_init(void) { if (floppy) parse_floppy_cfg_string(floppy); return floppy_init(); } module_init(floppy_module_init); static void __exit floppy_module_exit(void) { int drive; blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); unregister_blkdev(FLOPPY_MAJOR, "fd"); platform_driver_unregister(&floppy_driver); destroy_workqueue(floppy_wq); for (drive = 0; drive < N_DRIVE; drive++) { del_timer_sync(&motor_off_timer[drive]); if (floppy_available(drive)) { del_gendisk(disks[drive]); device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); platform_device_unregister(&floppy_device[drive]); } blk_cleanup_queue(disks[drive]->queue); /* * These disks have not called add_disk(). Don't put down * queue reference in put_disk(). */ if (!(allowed_drive_mask & (1 << drive)) || fdc_state[FDC(drive)].version == FDC_NONE) disks[drive]->queue = NULL; put_disk(disks[drive]); } cancel_delayed_work_sync(&fd_timeout); cancel_delayed_work_sync(&fd_timer); if (atomic_read(&usage_count)) floppy_release_irq_and_dma(); /* eject disk, if any */ fd_eject(0); } module_exit(floppy_module_exit); module_param(floppy, charp, 0); module_param(FLOPPY_IRQ, int, 0); module_param(FLOPPY_DMA, int, 0); MODULE_AUTHOR("Alain L. Knaff"); MODULE_SUPPORTED_DEVICE("fd"); MODULE_LICENSE("GPL"); /* This doesn't actually get used other than for module information */ static const struct pnp_device_id floppy_pnpids[] = { {"PNP0700", 0}, {} }; MODULE_DEVICE_TABLE(pnp, floppy_pnpids); #else __setup("floppy=", floppy_setup); module_init(floppy_init) #endif MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2073_0
crossvul-cpp_data_bad_5861_29
/* GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see http://www.gnu.org/licenses * * Please visit http://www.xyratex.com/contact if you need additional * information or have any questions. * * GPL HEADER END */ /* * Copyright 2012 Xyratex Technology Limited * * Wrappers for kernel crypto shash api to pclmulqdq crc32 imlementation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/crc32.h> #include <crypto/internal/hash.h> #include <asm/cpufeature.h> #include <asm/cpu_device_id.h> #include <asm/i387.h> #define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_DIGEST_SIZE 4 #define PCLMUL_MIN_LEN 64L /* minimum size of buffer * for crc32_pclmul_le_16 */ #define SCALE_F 16L /* size of xmm register */ #define SCALE_F_MASK (SCALE_F - 1) u32 crc32_pclmul_le_16(unsigned char const *buffer, size_t len, u32 crc32); static u32 __attribute__((pure)) crc32_pclmul_le(u32 crc, unsigned char const *p, size_t len) { unsigned int iquotient; unsigned int iremainder; unsigned int prealign; if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !irq_fpu_usable()) return crc32_le(crc, p, len); if ((long)p & SCALE_F_MASK) { /* align p to 16 byte */ prealign = SCALE_F - ((long)p & SCALE_F_MASK); crc = crc32_le(crc, p, prealign); len -= prealign; p = (unsigned char *)(((unsigned long)p + SCALE_F_MASK) & ~SCALE_F_MASK); } iquotient = len & (~SCALE_F_MASK); iremainder = len & SCALE_F_MASK; kernel_fpu_begin(); crc = crc32_pclmul_le_16(p, iquotient, crc); kernel_fpu_end(); if (iremainder) crc = crc32_le(crc, p + iquotient, iremainder); return crc; } static int crc32_pclmul_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); *key = 0; return 0; } static int crc32_pclmul_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) { crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } *mctx = le32_to_cpup((__le32 *)key); return 0; } static int crc32_pclmul_init(struct shash_desc *desc) { u32 *mctx = crypto_shash_ctx(desc->tfm); u32 *crcp = shash_desc_ctx(desc); *crcp = *mctx; return 0; } static int crc32_pclmul_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); *crcp = crc32_pclmul_le(*crcp, data, len); return 0; } /* No final XOR 0xFFFFFFFF, like crc32_le */ static int __crc32_pclmul_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { *(__le32 *)out = cpu_to_le32(crc32_pclmul_le(*crcp, data, len)); return 0; } static int crc32_pclmul_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32_pclmul_finup(shash_desc_ctx(desc), data, len, out); } static int crc32_pclmul_final(struct shash_desc *desc, u8 *out) { u32 *crcp = shash_desc_ctx(desc); *(__le32 *)out = cpu_to_le32p(crcp); return 0; } static int crc32_pclmul_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32_pclmul_finup(crypto_shash_ctx(desc->tfm), data, len, out); } static struct shash_alg alg = { .setkey = crc32_pclmul_setkey, .init = crc32_pclmul_init, .update = crc32_pclmul_update, .final = crc32_pclmul_final, .finup = crc32_pclmul_finup, .digest = crc32_pclmul_digest, .descsize = sizeof(u32), .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32", .cra_driver_name = "crc32-pclmul", .cra_priority = 200, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, .cra_init = crc32_pclmul_cra_init, } }; static const struct x86_cpu_id crc32pclmul_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), {} }; MODULE_DEVICE_TABLE(x86cpu, crc32pclmul_cpu_id); static int __init crc32_pclmul_mod_init(void) { if (!x86_match_cpu(crc32pclmul_cpu_id)) { pr_info("PCLMULQDQ-NI instructions are not detected.\n"); return -ENODEV; } return crypto_register_shash(&alg); } static void __exit crc32_pclmul_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(crc32_pclmul_mod_init); module_exit(crc32_pclmul_mod_fini); MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("crc32"); MODULE_ALIAS("crc32-pclmul");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5861_29
crossvul-cpp_data_bad_1576_2
/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * @file request.c * @brief functions to get and process requests * * @author Rob McCool 3/21/93 * * Thoroughly revamped by rst for Apache. NB this file reads * best from the bottom up. * */ #include "apr_strings.h" #include "apr_file_io.h" #include "apr_fnmatch.h" #define APR_WANT_STRFUNC #include "apr_want.h" #include "ap_config.h" #include "ap_provider.h" #include "httpd.h" #include "http_config.h" #include "http_request.h" #include "http_core.h" #include "http_protocol.h" #include "http_log.h" #include "http_main.h" #include "util_filter.h" #include "util_charset.h" #include "util_script.h" #include "ap_expr.h" #include "mod_request.h" #include "mod_core.h" #include "mod_auth.h" #if APR_HAVE_STDARG_H #include <stdarg.h> #endif /* we know core's module_index is 0 */ #undef APLOG_MODULE_INDEX #define APLOG_MODULE_INDEX AP_CORE_MODULE_INDEX APR_HOOK_STRUCT( APR_HOOK_LINK(translate_name) APR_HOOK_LINK(map_to_storage) APR_HOOK_LINK(check_user_id) APR_HOOK_LINK(fixups) APR_HOOK_LINK(type_checker) APR_HOOK_LINK(access_checker) APR_HOOK_LINK(access_checker_ex) APR_HOOK_LINK(auth_checker) APR_HOOK_LINK(insert_filter) APR_HOOK_LINK(create_request) APR_HOOK_LINK(post_perdir_config) APR_HOOK_LINK(dirwalk_stat) ) AP_IMPLEMENT_HOOK_RUN_FIRST(int,translate_name, (request_rec *r), (r), DECLINED) AP_IMPLEMENT_HOOK_RUN_FIRST(int,map_to_storage, (request_rec *r), (r), DECLINED) AP_IMPLEMENT_HOOK_RUN_FIRST(int,check_user_id, (request_rec *r), (r), DECLINED) AP_IMPLEMENT_HOOK_RUN_ALL(int,fixups, (request_rec *r), (r), OK, DECLINED) AP_IMPLEMENT_HOOK_RUN_FIRST(int,type_checker, (request_rec *r), (r), DECLINED) AP_IMPLEMENT_HOOK_RUN_ALL(int,access_checker, (request_rec *r), (r), OK, DECLINED) AP_IMPLEMENT_HOOK_RUN_FIRST(int,access_checker_ex, (request_rec *r), (r), DECLINED) AP_IMPLEMENT_HOOK_RUN_FIRST(int,auth_checker, (request_rec *r), (r), DECLINED) AP_IMPLEMENT_HOOK_VOID(insert_filter, (request_rec *r), (r)) AP_IMPLEMENT_HOOK_RUN_ALL(int, create_request, (request_rec *r), (r), OK, DECLINED) AP_IMPLEMENT_HOOK_RUN_ALL(int, post_perdir_config, (request_rec *r), (r), OK, DECLINED) AP_IMPLEMENT_HOOK_RUN_FIRST(apr_status_t,dirwalk_stat, (apr_finfo_t *finfo, request_rec *r, apr_int32_t wanted), (finfo, r, wanted), AP_DECLINED) static int auth_internal_per_conf = 0; static int auth_internal_per_conf_hooks = 0; static int auth_internal_per_conf_providers = 0; static int decl_die(int status, const char *phase, request_rec *r) { if (status == DECLINED) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(00025) "configuration error: couldn't %s: %s", phase, r->uri); return HTTP_INTERNAL_SERVER_ERROR; } else { ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "auth phase '%s' gave status %d: %s", phase, status, r->uri); return status; } } /* This is the master logic for processing requests. Do NOT duplicate * this logic elsewhere, or the security model will be broken by future * API changes. Each phase must be individually optimized to pick up * redundant/duplicate calls by subrequests, and redirects. */ AP_DECLARE(int) ap_process_request_internal(request_rec *r) { int file_req = (r->main && r->filename); int access_status; core_dir_config *d; /* Ignore embedded %2F's in path for proxy requests */ if (!r->proxyreq && r->parsed_uri.path) { d = ap_get_core_module_config(r->per_dir_config); if (d->allow_encoded_slashes) { access_status = ap_unescape_url_keep2f(r->parsed_uri.path, d->decode_encoded_slashes); } else { access_status = ap_unescape_url(r->parsed_uri.path); } if (access_status) { if (access_status == HTTP_NOT_FOUND) { if (! d->allow_encoded_slashes) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00026) "found %%2f (encoded '/') in URI " "(decoded='%s'), returning 404", r->parsed_uri.path); } } return access_status; } } ap_getparents(r->uri); /* OK --- shrinking transformations... */ /* All file subrequests are a huge pain... they cannot bubble through the * next several steps. Only file subrequests are allowed an empty uri, * otherwise let translate_name kill the request. */ if (!file_req) { if ((access_status = ap_location_walk(r))) { return access_status; } if ((access_status = ap_if_walk(r))) { return access_status; } /* Don't set per-dir loglevel if LogLevelOverride is set */ if (!r->connection->log) { d = ap_get_core_module_config(r->per_dir_config); if (d->log) r->log = d->log; } if ((access_status = ap_run_translate_name(r))) { return decl_die(access_status, "translate", r); } } /* Reset to the server default config prior to running map_to_storage */ r->per_dir_config = r->server->lookup_defaults; if ((access_status = ap_run_map_to_storage(r))) { /* This request wasn't in storage (e.g. TRACE) */ return access_status; } /* Rerun the location walk, which overrides any map_to_storage config. */ if ((access_status = ap_location_walk(r))) { return access_status; } if ((access_status = ap_if_walk(r))) { return access_status; } /* Don't set per-dir loglevel if LogLevelOverride is set */ if (!r->connection->log) { d = ap_get_core_module_config(r->per_dir_config); if (d->log) r->log = d->log; } if ((access_status = ap_run_post_perdir_config(r))) { return access_status; } /* Only on the main request! */ if (r->main == NULL) { if ((access_status = ap_run_header_parser(r))) { return access_status; } } /* Skip authn/authz if the parent or prior request passed the authn/authz, * and that configuration didn't change (this requires optimized _walk() * functions in map_to_storage that use the same merge results given * identical input.) If the config changes, we must re-auth. */ if (r->prev && (r->prev->per_dir_config == r->per_dir_config)) { r->user = r->prev->user; r->ap_auth_type = r->prev->ap_auth_type; } else if (r->main && (r->main->per_dir_config == r->per_dir_config)) { r->user = r->main->user; r->ap_auth_type = r->main->ap_auth_type; } else { switch (ap_satisfies(r)) { case SATISFY_ALL: case SATISFY_NOSPEC: if ((access_status = ap_run_access_checker(r)) != OK) { return decl_die(access_status, "check access (with Satisfy All)", r); } access_status = ap_run_access_checker_ex(r); if (access_status == OK) { ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "request authorized without authentication by " "access_checker_ex hook: %s", r->uri); } else if (access_status != DECLINED) { return decl_die(access_status, "check access", r); } else { if ((access_status = ap_run_check_user_id(r)) != OK) { return decl_die(access_status, "check user", r); } if (r->user == NULL) { /* don't let buggy authn module crash us in authz */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00027) "No authentication done but request not " "allowed without authentication for %s. " "Authentication not configured?", r->uri); access_status = HTTP_INTERNAL_SERVER_ERROR; return decl_die(access_status, "check user", r); } if ((access_status = ap_run_auth_checker(r)) != OK) { return decl_die(access_status, "check authorization", r); } } break; case SATISFY_ANY: if ((access_status = ap_run_access_checker(r)) == OK) { ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "request authorized without authentication by " "access_checker hook and 'Satisfy any': %s", r->uri); break; } access_status = ap_run_access_checker_ex(r); if (access_status == OK) { ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "request authorized without authentication by " "access_checker_ex hook: %s", r->uri); } else if (access_status != DECLINED) { return decl_die(access_status, "check access", r); } else { if ((access_status = ap_run_check_user_id(r)) != OK) { return decl_die(access_status, "check user", r); } if (r->user == NULL) { /* don't let buggy authn module crash us in authz */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00028) "No authentication done but request not " "allowed without authentication for %s. " "Authentication not configured?", r->uri); access_status = HTTP_INTERNAL_SERVER_ERROR; return decl_die(access_status, "check user", r); } if ((access_status = ap_run_auth_checker(r)) != OK) { return decl_die(access_status, "check authorization", r); } } break; } } /* XXX Must make certain the ap_run_type_checker short circuits mime * in mod-proxy for r->proxyreq && r->parsed_uri.scheme * && !strcmp(r->parsed_uri.scheme, "http") */ if ((access_status = ap_run_type_checker(r)) != OK) { return decl_die(access_status, "find types", r); } if ((access_status = ap_run_fixups(r)) != OK) { ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "fixups hook gave %d: %s", access_status, r->uri); return access_status; } return OK; } /* Useful caching structures to repeat _walk/merge sequences as required * when a subrequest or redirect reuses substantially the same config. * * Directive order in the httpd.conf file and its Includes significantly * impact this optimization. Grouping common blocks at the front of the * config that are less likely to change between a request and * its subrequests, or between a request and its redirects reduced * the work of these functions significantly. */ typedef struct walk_walked_t { ap_conf_vector_t *matched; /* A dir_conf sections we matched */ ap_conf_vector_t *merged; /* The dir_conf merged result */ } walk_walked_t; typedef struct walk_cache_t { const char *cached; /* The identifier we matched */ ap_conf_vector_t **dir_conf_tested; /* The sections we matched against */ ap_conf_vector_t *dir_conf_merged; /* Base per_dir_config */ ap_conf_vector_t *per_dir_result; /* per_dir_config += walked result */ apr_array_header_t *walked; /* The list of walk_walked_t results */ struct walk_cache_t *prev; /* Prev cache of same call in this (sub)req */ int count; /* Number of prev invocations of same call in this (sub)req */ } walk_cache_t; static walk_cache_t *prep_walk_cache(apr_size_t t, request_rec *r) { void **note, **inherit_note; walk_cache_t *cache, *prev_cache, *copy_cache; int count; /* Find the most relevant, recent walk cache to work from and provide * a copy the caller is allowed to munge. In the case of a sub-request * or internal redirect, this is the cache corresponding to the equivalent * invocation of the same function call in the "parent" request, if such * a cache exists. Otherwise it is the walk cache of the previous * invocation of the same function call in the current request, if * that exists; if not, then create a new walk cache. */ note = ap_get_request_note(r, t); AP_DEBUG_ASSERT(note != NULL); copy_cache = prev_cache = *note; count = prev_cache ? (prev_cache->count + 1) : 0; if ((r->prev && (inherit_note = ap_get_request_note(r->prev, t)) && *inherit_note) || (r->main && (inherit_note = ap_get_request_note(r->main, t)) && *inherit_note)) { walk_cache_t *inherit_cache = *inherit_note; while (inherit_cache->count > count) { inherit_cache = inherit_cache->prev; } if (inherit_cache->count == count) { copy_cache = inherit_cache; } } if (copy_cache) { cache = apr_pmemdup(r->pool, copy_cache, sizeof(*cache)); cache->walked = apr_array_copy(r->pool, cache->walked); cache->prev = prev_cache; cache->count = count; } else { cache = apr_pcalloc(r->pool, sizeof(*cache)); cache->walked = apr_array_make(r->pool, 4, sizeof(walk_walked_t)); } *note = cache; return cache; } /***************************************************************** * * Getting and checking directory configuration. Also checks the * FollowSymlinks and FollowSymOwner stuff, since this is really the * only place that can happen (barring a new mid_dir_walk callout). * * We can't do it as an access_checker module function which gets * called with the final per_dir_config, since we could have a directory * with FollowSymLinks disabled, which contains a symlink to another * with a .htaccess file which turns FollowSymLinks back on --- and * access in such a case must be denied. So, whatever it is that * checks FollowSymLinks needs to know the state of the options as * they change, all the way down. */ /* * resolve_symlink must _always_ be called on an APR_LNK file type! * It will resolve the actual target file type, modification date, etc, * and provide any processing required for symlink evaluation. * Path must already be cleaned, no trailing slash, no multi-slashes, * and don't call this on the root! * * Simply, the number of times we deref a symlink are minimal compared * to the number of times we had an extra lstat() since we 'weren't sure'. * * To optimize, we stat() anything when given (opts & OPT_SYM_LINKS), otherwise * we start off with an lstat(). Every lstat() must be dereferenced in case * it points at a 'nasty' - we must always rerun check_safe_file (or similar.) */ static int resolve_symlink(char *d, apr_finfo_t *lfi, int opts, apr_pool_t *p) { apr_finfo_t fi; const char *savename; if (!(opts & (OPT_SYM_OWNER | OPT_SYM_LINKS))) { return HTTP_FORBIDDEN; } /* Save the name from the valid bits. */ savename = (lfi->valid & APR_FINFO_NAME) ? lfi->name : NULL; /* if OPT_SYM_OWNER is unset, we only need to check target accessible */ if (!(opts & OPT_SYM_OWNER)) { if (apr_stat(&fi, d, lfi->valid & ~(APR_FINFO_NAME | APR_FINFO_LINK), p) != APR_SUCCESS) { return HTTP_FORBIDDEN; } /* Give back the target */ memcpy(lfi, &fi, sizeof(fi)); if (savename) { lfi->name = savename; lfi->valid |= APR_FINFO_NAME; } return OK; } /* OPT_SYM_OWNER only works if we can get the owner of * both the file and symlink. First fill in a missing * owner of the symlink, then get the info of the target. */ if (!(lfi->valid & APR_FINFO_OWNER)) { if (apr_stat(lfi, d, lfi->valid | APR_FINFO_LINK | APR_FINFO_OWNER, p) != APR_SUCCESS) { return HTTP_FORBIDDEN; } } if (apr_stat(&fi, d, lfi->valid & ~(APR_FINFO_NAME), p) != APR_SUCCESS) { return HTTP_FORBIDDEN; } if (apr_uid_compare(fi.user, lfi->user) != APR_SUCCESS) { return HTTP_FORBIDDEN; } /* Give back the target */ memcpy(lfi, &fi, sizeof(fi)); if (savename) { lfi->name = savename; lfi->valid |= APR_FINFO_NAME; } return OK; } /* * As we walk the directory configuration, the merged config won't * be 'rooted' to a specific vhost until the very end of the merge. * * We need a very fast mini-merge to a real, vhost-rooted merge * of core.opts and core.override, the only options tested within * directory_walk itself. * * See core.c::merge_core_dir_configs() for explanation. */ typedef struct core_opts_t { allow_options_t opts; allow_options_t add; allow_options_t remove; overrides_t override; overrides_t override_opts; apr_table_t *override_list; } core_opts_t; static void core_opts_merge(const ap_conf_vector_t *sec, core_opts_t *opts) { core_dir_config *this_dir = ap_get_core_module_config(sec); if (!this_dir) { return; } if (this_dir->opts & OPT_UNSET) { opts->add = (opts->add & ~this_dir->opts_remove) | this_dir->opts_add; opts->remove = (opts->remove & ~this_dir->opts_add) | this_dir->opts_remove; opts->opts = (opts->opts & ~opts->remove) | opts->add; } else { opts->opts = this_dir->opts; opts->add = this_dir->opts_add; opts->remove = this_dir->opts_remove; } if (!(this_dir->override & OR_UNSET)) { opts->override = this_dir->override; opts->override_opts = this_dir->override_opts; } if (this_dir->override_list != NULL) { opts->override_list = this_dir->override_list; } } /***************************************************************** * * Getting and checking directory configuration. Also checks the * FollowSymlinks and FollowSymOwner stuff, since this is really the * only place that can happen (barring a new mid_dir_walk callout). * * We can't do it as an access_checker module function which gets * called with the final per_dir_config, since we could have a directory * with FollowSymLinks disabled, which contains a symlink to another * with a .htaccess file which turns FollowSymLinks back on --- and * access in such a case must be denied. So, whatever it is that * checks FollowSymLinks needs to know the state of the options as * they change, all the way down. */ AP_DECLARE(int) ap_directory_walk(request_rec *r) { ap_conf_vector_t *now_merged = NULL; core_server_config *sconf = ap_get_core_module_config(r->server->module_config); ap_conf_vector_t **sec_ent = (ap_conf_vector_t **) sconf->sec_dir->elts; int num_sec = sconf->sec_dir->nelts; walk_cache_t *cache; char *entry_dir; apr_status_t rv; int cached; /* XXX: Better (faster) tests needed!!! * * "OK" as a response to a real problem is not _OK_, but to allow broken * modules to proceed, we will permit the not-a-path filename to pass the * following two tests. This behavior may be revoked in future versions * of Apache. We still must catch it later if it's heading for the core * handler. Leave INFO notes here for module debugging. */ if (r->filename == NULL) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00029) "Module bug? Request filename is missing for URI %s", r->uri); return OK; } /* Canonicalize the file path without resolving filename case or aliases * so we can begin by checking the cache for a recent directory walk. * This call will ensure we have an absolute path in the same pass. */ if ((rv = apr_filepath_merge(&entry_dir, NULL, r->filename, APR_FILEPATH_NOTRELATIVE, r->pool)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00030) "Module bug? Request filename path %s is invalid or " "or not absolute for uri %s", r->filename, r->uri); return OK; } /* XXX Notice that this forces path_info to be canonical. That might * not be desired by all apps. However, some of those same apps likely * have significant security holes. */ r->filename = entry_dir; cache = prep_walk_cache(AP_NOTE_DIRECTORY_WALK, r); cached = (cache->cached != NULL); /* If this is not a dirent subrequest with a preconstructed * r->finfo value, then we can simply stat the filename to * save burning mega-cycles with unneeded stats - if this is * an exact file match. We don't care about failure... we * will stat by component failing this meager attempt. * * It would be nice to distinguish APR_ENOENT from other * types of failure, such as APR_ENOTDIR. We can do something * with APR_ENOENT, knowing that the path is good. */ if (r->finfo.filetype == APR_NOFILE || r->finfo.filetype == APR_LNK) { rv = ap_run_dirwalk_stat(&r->finfo, r, APR_FINFO_MIN); /* some OSs will return APR_SUCCESS/APR_REG if we stat * a regular file but we have '/' at the end of the name; * * other OSs will return APR_ENOTDIR for that situation; * * handle it the same everywhere by simulating a failure * if it looks like a directory but really isn't * * Also reset if the stat failed, just for safety. */ if ((rv != APR_SUCCESS) || (r->finfo.filetype != APR_NOFILE && (r->finfo.filetype != APR_DIR) && (r->filename[strlen(r->filename) - 1] == '/'))) { r->finfo.filetype = APR_NOFILE; /* forget what we learned */ } } if (r->finfo.filetype == APR_REG) { entry_dir = ap_make_dirstr_parent(r->pool, entry_dir); } else if (r->filename[strlen(r->filename) - 1] != '/') { entry_dir = apr_pstrcat(r->pool, r->filename, "/", NULL); } /* If we have a file already matches the path of r->filename, * and the vhost's list of directory sections hasn't changed, * we can skip rewalking the directory_walk entries. */ if (cached && ((r->finfo.filetype == APR_REG) || ((r->finfo.filetype == APR_DIR) && (!r->path_info || !*r->path_info))) && (cache->dir_conf_tested == sec_ent) && (strcmp(entry_dir, cache->cached) == 0)) { int familiar = 0; /* Well this looks really familiar! If our end-result (per_dir_result) * didn't change, we have absolutely nothing to do :) * Otherwise (as is the case with most dir_merged/file_merged requests) * we must merge our dir_conf_merged onto this new r->per_dir_config. */ if (r->per_dir_config == cache->per_dir_result) { familiar = 1; } if (r->per_dir_config == cache->dir_conf_merged) { r->per_dir_config = cache->per_dir_result; familiar = 1; } if (familiar) { apr_finfo_t thisinfo; int res; allow_options_t opts; core_dir_config *this_dir; this_dir = ap_get_core_module_config(r->per_dir_config); opts = this_dir->opts; /* * If Symlinks are allowed in general we do not need the following * check. */ if (!(opts & OPT_SYM_LINKS)) { rv = ap_run_dirwalk_stat(&thisinfo, r, APR_FINFO_MIN | APR_FINFO_NAME | APR_FINFO_LINK); /* * APR_INCOMPLETE is as fine as result as APR_SUCCESS as we * have added APR_FINFO_NAME to the wanted parameter of * apr_stat above. On Unix platforms this means that apr_stat * is always going to return APR_INCOMPLETE in the case that * the call to the native stat / lstat did not fail. */ if ((rv != APR_INCOMPLETE) && (rv != APR_SUCCESS)) { /* * This should never happen, because we did a stat on the * same file, resolving a possible symlink several lines * above. Therefore do not make a detailed analysis of rv * in this case for the reason of the failure, just bail out * with a HTTP_FORBIDDEN in case we hit a race condition * here. */ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00031) "access to %s failed; stat of '%s' failed.", r->uri, r->filename); return r->status = HTTP_FORBIDDEN; } if (thisinfo.filetype == APR_LNK) { /* Is this a possibly acceptable symlink? */ if ((res = resolve_symlink(r->filename, &thisinfo, opts, r->pool)) != OK) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00032) "Symbolic link not allowed " "or link target not accessible: %s", r->filename); return r->status = res; } } } return OK; } if (cache->walked->nelts) { now_merged = ((walk_walked_t*)cache->walked->elts) [cache->walked->nelts - 1].merged; } } else { /* We start now_merged from NULL since we want to build * a locations list that can be merged to any vhost. */ int sec_idx; int matches = cache->walked->nelts; int cached_matches = matches; walk_walked_t *last_walk = (walk_walked_t*)cache->walked->elts; core_dir_config *this_dir; core_opts_t opts; apr_finfo_t thisinfo; char *save_path_info; apr_size_t buflen; char *buf; unsigned int seg, startseg; apr_pool_t *rxpool = NULL; /* Invariant: from the first time filename_len is set until * it goes out of scope, filename_len==strlen(r->filename) */ apr_size_t filename_len; #ifdef CASE_BLIND_FILESYSTEM apr_size_t canonical_len; #endif cached &= auth_internal_per_conf; /* * We must play our own mini-merge game here, for the few * running dir_config values we care about within dir_walk. * We didn't start the merge from r->per_dir_config, so we * accumulate opts and override as we merge, from the globals. */ this_dir = ap_get_core_module_config(r->per_dir_config); opts.opts = this_dir->opts; opts.add = this_dir->opts_add; opts.remove = this_dir->opts_remove; opts.override = this_dir->override; opts.override_opts = this_dir->override_opts; opts.override_list = this_dir->override_list; /* Set aside path_info to merge back onto path_info later. * If r->filename is a directory, we must remerge the path_info, * before we continue! [Directories cannot, by definition, have * path info. Either the next segment is not-found, or a file.] * * r->path_info tracks the unconsumed source path. * r->filename tracks the path as we process it */ if ((r->finfo.filetype == APR_DIR) && r->path_info && *r->path_info) { if ((rv = apr_filepath_merge(&r->path_info, r->filename, r->path_info, APR_FILEPATH_NOTABOVEROOT, r->pool)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00033) "dir_walk error, path_info %s is not relative " "to the filename path %s for uri %s", r->path_info, r->filename, r->uri); return HTTP_INTERNAL_SERVER_ERROR; } save_path_info = NULL; } else { save_path_info = r->path_info; r->path_info = r->filename; } #ifdef CASE_BLIND_FILESYSTEM canonical_len = 0; while (r->canonical_filename && r->canonical_filename[canonical_len] && (r->canonical_filename[canonical_len] == r->path_info[canonical_len])) { ++canonical_len; } while (canonical_len && ((r->canonical_filename[canonical_len - 1] != '/' && r->canonical_filename[canonical_len - 1]) || (r->path_info[canonical_len - 1] != '/' && r->path_info[canonical_len - 1]))) { --canonical_len; } /* * Now build r->filename component by component, starting * with the root (on Unix, simply "/"). We will make a huge * assumption here for efficiency, that any canonical path * already given included a canonical root. */ rv = apr_filepath_root((const char **)&r->filename, (const char **)&r->path_info, canonical_len ? 0 : APR_FILEPATH_TRUENAME, r->pool); filename_len = strlen(r->filename); /* * Bad assumption above? If the root's length is longer * than the canonical length, then it cannot be trusted as * a truename. So try again, this time more seriously. */ if ((rv == APR_SUCCESS) && canonical_len && (filename_len > canonical_len)) { rv = apr_filepath_root((const char **)&r->filename, (const char **)&r->path_info, APR_FILEPATH_TRUENAME, r->pool); filename_len = strlen(r->filename); canonical_len = 0; } #else /* ndef CASE_BLIND_FILESYSTEM, really this simple for Unix today; */ rv = apr_filepath_root((const char **)&r->filename, (const char **)&r->path_info, 0, r->pool); filename_len = strlen(r->filename); #endif if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00034) "dir_walk error, could not determine the root " "path of filename %s%s for uri %s", r->filename, r->path_info, r->uri); return HTTP_INTERNAL_SERVER_ERROR; } /* Working space for terminating null and an extra / is required. */ buflen = filename_len + strlen(r->path_info) + 2; buf = apr_palloc(r->pool, buflen); memcpy(buf, r->filename, filename_len + 1); r->filename = buf; thisinfo.valid = APR_FINFO_TYPE; thisinfo.filetype = APR_DIR; /* It's the root, of course it's a dir */ /* * seg keeps track of which segment we've copied. * sec_idx keeps track of which section we're on, since sections are * ordered by number of segments. See core_reorder_directories * startseg tells us how many segments describe the root path * e.g. the complete path "//host/foo/" to a UNC share (4) */ startseg = seg = ap_count_dirs(r->filename); sec_idx = 0; /* * Go down the directory hierarchy. Where we have to check for * symlinks, do so. Where a .htaccess file has permission to * override anything, try to find one. */ do { int res; char *seg_name; char *delim; int temp_slash=0; /* We have no trailing slash, but we sure would appreciate one. * However, we don't want to append a / our first time through. */ if ((seg > startseg) && r->filename[filename_len-1] != '/') { r->filename[filename_len++] = '/'; r->filename[filename_len] = 0; temp_slash=1; } /* Begin *this* level by looking for matching <Directory> sections * from the server config. */ for (; sec_idx < num_sec; ++sec_idx) { ap_conf_vector_t *entry_config = sec_ent[sec_idx]; core_dir_config *entry_core; entry_core = ap_get_core_module_config(entry_config); /* No more possible matches for this many segments? * We are done when we find relative/regex/longer components. */ if (entry_core->r || entry_core->d_components > seg) { break; } /* We will never skip '0' element components, e.g. plain old * <Directory >, and <Directory "/"> are classified as zero * so that Win32/Netware/OS2 etc all pick them up. * Otherwise, skip over the mismatches. */ if (entry_core->d_components && ((entry_core->d_components < seg) || (entry_core->d_is_fnmatch ? (apr_fnmatch(entry_core->d, r->filename, APR_FNM_PATHNAME) != APR_SUCCESS) : (strcmp(r->filename, entry_core->d) != 0)))) { continue; } /* If we haven't continue'd above, we have a match. * * Calculate our full-context core opts & override. */ core_opts_merge(sec_ent[sec_idx], &opts); /* If we merged this same section last time, reuse it */ if (matches) { if (last_walk->matched == sec_ent[sec_idx]) { now_merged = last_walk->merged; ++last_walk; --matches; continue; } /* We fell out of sync. This is our own copy of walked, * so truncate the remaining matches and reset remaining. */ cache->walked->nelts -= matches; matches = 0; cached = 0; } if (now_merged) { now_merged = ap_merge_per_dir_configs(r->pool, now_merged, sec_ent[sec_idx]); } else { now_merged = sec_ent[sec_idx]; } last_walk = (walk_walked_t*)apr_array_push(cache->walked); last_walk->matched = sec_ent[sec_idx]; last_walk->merged = now_merged; } /* If .htaccess files are enabled, check for one, provided we * have reached a real path. */ do { /* Not really a loop, just a break'able code block */ ap_conf_vector_t *htaccess_conf = NULL; /* No htaccess in an incomplete root path, * nor if it's disabled */ if (seg < startseg || (!opts.override && opts.override_list == NULL)) { break; } res = ap_parse_htaccess(&htaccess_conf, r, opts.override, opts.override_opts, opts.override_list, apr_pstrdup(r->pool, r->filename), sconf->access_name); if (res) { return res; } if (!htaccess_conf) { break; } /* If we are still here, we found our htaccess. * * Calculate our full-context core opts & override. */ core_opts_merge(htaccess_conf, &opts); /* If we merged this same htaccess last time, reuse it... * this wouldn't work except that we cache the htaccess * sections for the lifetime of the request, so we match * the same conf. Good planning (no, pure luck ;) */ if (matches) { if (last_walk->matched == htaccess_conf) { now_merged = last_walk->merged; ++last_walk; --matches; break; } /* We fell out of sync. This is our own copy of walked, * so truncate the remaining matches and reset * remaining. */ cache->walked->nelts -= matches; matches = 0; cached = 0; } if (now_merged) { now_merged = ap_merge_per_dir_configs(r->pool, now_merged, htaccess_conf); } else { now_merged = htaccess_conf; } last_walk = (walk_walked_t*)apr_array_push(cache->walked); last_walk->matched = htaccess_conf; last_walk->merged = now_merged; } while (0); /* Only one htaccess, not a real loop */ /* That temporary trailing slash was useful, now drop it. */ if (temp_slash) { r->filename[--filename_len] = '\0'; } /* Time for all good things to come to an end? */ if (!r->path_info || !*r->path_info) { break; } /* Now it's time for the next segment... * We will assume the next element is an end node, and fix it up * below as necessary... */ seg_name = r->filename + filename_len; delim = strchr(r->path_info + (*r->path_info == '/' ? 1 : 0), '/'); if (delim) { apr_size_t path_info_len = delim - r->path_info; *delim = '\0'; memcpy(seg_name, r->path_info, path_info_len + 1); filename_len += path_info_len; r->path_info = delim; *delim = '/'; } else { apr_size_t path_info_len = strlen(r->path_info); memcpy(seg_name, r->path_info, path_info_len + 1); filename_len += path_info_len; r->path_info += path_info_len; } if (*seg_name == '/') ++seg_name; /* If nothing remained but a '/' string, we are finished * XXX: NO WE ARE NOT!!! Now process this puppy!!! */ if (!*seg_name) { break; } /* First optimization; * If...we knew r->filename was a file, and * if...we have strict (case-sensitive) filenames, or * we know the canonical_filename matches to _this_ name, and * if...we have allowed symlinks * skip the lstat and dummy up an APR_DIR value for thisinfo. */ if (r->finfo.filetype != APR_NOFILE #ifdef CASE_BLIND_FILESYSTEM && (filename_len <= canonical_len) #endif && ((opts.opts & (OPT_SYM_OWNER | OPT_SYM_LINKS)) == OPT_SYM_LINKS)) { thisinfo.filetype = APR_DIR; ++seg; continue; } /* We choose apr_stat with flag APR_FINFO_LINK here, rather that * plain apr_stat, so that we capture this path object rather than * its target. We will replace the info with our target's info * below. We especially want the name of this 'link' object, not * the name of its target, if we are fixing the filename * case/resolving aliases. */ rv = ap_run_dirwalk_stat(&thisinfo, r, APR_FINFO_MIN | APR_FINFO_NAME | APR_FINFO_LINK); if (APR_STATUS_IS_ENOENT(rv)) { /* Nothing? That could be nice. But our directory * walk is done. */ thisinfo.filetype = APR_NOFILE; break; } else if (APR_STATUS_IS_EACCES(rv)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00035) "access to %s denied (filesystem path '%s') " "because search permissions are missing on a " "component of the path", r->uri, r->filename); return r->status = HTTP_FORBIDDEN; } else if ((rv != APR_SUCCESS && rv != APR_INCOMPLETE) || !(thisinfo.valid & APR_FINFO_TYPE)) { /* If we hit ENOTDIR, we must have over-optimized, deny * rather than assume not found. */ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00036) "access to %s failed (filesystem path '%s')", r->uri, r->filename); return r->status = HTTP_FORBIDDEN; } /* Fix up the path now if we have a name, and they don't agree */ if ((thisinfo.valid & APR_FINFO_NAME) && strcmp(seg_name, thisinfo.name)) { /* TODO: provide users an option that an internal/external * redirect is required here? We need to walk the URI and * filename in tandem to properly correlate these. */ strcpy(seg_name, thisinfo.name); filename_len = strlen(r->filename); } if (thisinfo.filetype == APR_LNK) { /* Is this a possibly acceptable symlink? */ if ((res = resolve_symlink(r->filename, &thisinfo, opts.opts, r->pool)) != OK) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00037) "Symbolic link not allowed " "or link target not accessible: %s", r->filename); return r->status = res; } } /* Ok, we are done with the link's info, test the real target */ if (thisinfo.filetype == APR_REG || thisinfo.filetype == APR_NOFILE) { /* That was fun, nothing left for us here */ break; } else if (thisinfo.filetype != APR_DIR) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00038) "Forbidden: %s doesn't point to " "a file or directory", r->filename); return r->status = HTTP_FORBIDDEN; } ++seg; } while (thisinfo.filetype == APR_DIR); /* If we have _not_ optimized, this is the time to recover * the final stat result. */ if (r->finfo.filetype == APR_NOFILE || r->finfo.filetype == APR_LNK) { r->finfo = thisinfo; } /* Now splice the saved path_info back onto any new path_info */ if (save_path_info) { if (r->path_info && *r->path_info) { r->path_info = ap_make_full_path(r->pool, r->path_info, save_path_info); } else { r->path_info = save_path_info; } } /* * Now we'll deal with the regexes, note we pick up sec_idx * where we left off (we gave up after we hit entry_core->r) */ for (; sec_idx < num_sec; ++sec_idx) { int nmatch = 0; int i; ap_regmatch_t *pmatch = NULL; core_dir_config *entry_core; entry_core = ap_get_core_module_config(sec_ent[sec_idx]); if (!entry_core->r) { continue; } if (entry_core->refs && entry_core->refs->nelts) { if (!rxpool) { apr_pool_create(&rxpool, r->pool); } nmatch = entry_core->refs->nelts; pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t)); } if (ap_regexec(entry_core->r, r->filename, nmatch, pmatch, 0)) { continue; } for (i = 0; i < nmatch; i++) { if (pmatch[i].rm_so >= 0 && pmatch[i].rm_eo >= 0 && ((const char **)entry_core->refs->elts)[i]) { apr_table_setn(r->subprocess_env, ((const char **)entry_core->refs->elts)[i], apr_pstrndup(r->pool, r->filename + pmatch[i].rm_so, pmatch[i].rm_eo - pmatch[i].rm_so)); } } /* If we haven't already continue'd above, we have a match. * * Calculate our full-context core opts & override. */ core_opts_merge(sec_ent[sec_idx], &opts); /* If we merged this same section last time, reuse it */ if (matches) { if (last_walk->matched == sec_ent[sec_idx]) { now_merged = last_walk->merged; ++last_walk; --matches; continue; } /* We fell out of sync. This is our own copy of walked, * so truncate the remaining matches and reset remaining. */ cache->walked->nelts -= matches; matches = 0; cached = 0; } if (now_merged) { now_merged = ap_merge_per_dir_configs(r->pool, now_merged, sec_ent[sec_idx]); } else { now_merged = sec_ent[sec_idx]; } last_walk = (walk_walked_t*)apr_array_push(cache->walked); last_walk->matched = sec_ent[sec_idx]; last_walk->merged = now_merged; } if (rxpool) { apr_pool_destroy(rxpool); } /* Whoops - everything matched in sequence, but either the original * walk found some additional matches (which we need to truncate), or * this walk found some additional matches. */ if (matches) { cache->walked->nelts -= matches; cached = 0; } else if (cache->walked->nelts > cached_matches) { cached = 0; } } /* It seems this shouldn't be needed anymore. We translated the x symlink above into a real resource, and should have died up there. x Even if we keep this, it needs more thought (maybe an r->file_is_symlink) x perhaps it should actually happen in file_walk, so we catch more x obscure cases in autoindex subrequests, etc. x x * Symlink permissions are determined by the parent. If the request is x * for a directory then applying the symlink test here would use the x * permissions of the directory as opposed to its parent. Consider a x * symlink pointing to a dir with a .htaccess disallowing symlinks. If x * you access /symlink (or /symlink/) you would get a 403 without this x * APR_DIR test. But if you accessed /symlink/index.html, for example, x * you would *not* get the 403. x x if (r->finfo.filetype != APR_DIR x && (res = resolve_symlink(r->filename, r->info, ap_allow_options(r), x r->pool))) { x ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, x "Symbolic link not allowed: %s", r->filename); x return res; x } */ /* Save future sub-requestors much angst in processing * this subrequest. If dir_walk couldn't canonicalize * the file path, nothing can. */ r->canonical_filename = r->filename; if (r->finfo.filetype == APR_DIR) { cache->cached = r->filename; } else { cache->cached = ap_make_dirstr_parent(r->pool, r->filename); } if (cached && r->per_dir_config == cache->dir_conf_merged) { r->per_dir_config = cache->per_dir_result; return OK; } cache->dir_conf_tested = sec_ent; cache->dir_conf_merged = r->per_dir_config; /* Merge our cache->dir_conf_merged construct with the r->per_dir_configs, * and note the end result to (potentially) skip this step next time. */ if (now_merged) { r->per_dir_config = ap_merge_per_dir_configs(r->pool, r->per_dir_config, now_merged); } cache->per_dir_result = r->per_dir_config; return OK; } AP_DECLARE(int) ap_location_walk(request_rec *r) { ap_conf_vector_t *now_merged = NULL; core_server_config *sconf = ap_get_core_module_config(r->server->module_config); ap_conf_vector_t **sec_ent = (ap_conf_vector_t **)sconf->sec_url->elts; int num_sec = sconf->sec_url->nelts; walk_cache_t *cache; const char *entry_uri; int cached; /* No tricks here, there are no <Locations > to parse in this vhost. * We won't destroy the cache, just in case _this_ redirect is later * redirected again to a vhost with <Location > blocks to optimize. */ if (!num_sec) { return OK; } cache = prep_walk_cache(AP_NOTE_LOCATION_WALK, r); cached = (cache->cached != NULL); /* Location and LocationMatch differ on their behaviour w.r.t. multiple * slashes. Location matches multiple slashes with a single slash, * LocationMatch doesn't. An exception, for backwards brokenness is * absoluteURIs... in which case neither match multiple slashes. */ if (r->uri[0] != '/') { entry_uri = r->uri; } else { char *uri = apr_pstrdup(r->pool, r->uri); ap_no2slash(uri); entry_uri = uri; } /* If we have an cache->cached location that matches r->uri, * and the vhost's list of locations hasn't changed, we can skip * rewalking the location_walk entries. */ if (cached && (cache->dir_conf_tested == sec_ent) && (strcmp(entry_uri, cache->cached) == 0)) { /* Well this looks really familiar! If our end-result (per_dir_result) * didn't change, we have absolutely nothing to do :) * Otherwise (as is the case with most dir_merged/file_merged requests) * we must merge our dir_conf_merged onto this new r->per_dir_config. */ if (r->per_dir_config == cache->per_dir_result) { return OK; } if (cache->walked->nelts) { now_merged = ((walk_walked_t*)cache->walked->elts) [cache->walked->nelts - 1].merged; } } else { /* We start now_merged from NULL since we want to build * a locations list that can be merged to any vhost. */ int len, sec_idx; int matches = cache->walked->nelts; int cached_matches = matches; walk_walked_t *last_walk = (walk_walked_t*)cache->walked->elts; apr_pool_t *rxpool = NULL; cached &= auth_internal_per_conf; cache->cached = entry_uri; /* Go through the location entries, and check for matches. * We apply the directive sections in given order, we should * really try them with the most general first. */ for (sec_idx = 0; sec_idx < num_sec; ++sec_idx) { core_dir_config *entry_core; entry_core = ap_get_core_module_config(sec_ent[sec_idx]); /* ### const strlen can be optimized in location config parsing */ len = strlen(entry_core->d); /* Test the regex, fnmatch or string as appropriate. * If it's a strcmp, and the <Location > pattern was * not slash terminated, then this uri must be slash * terminated (or at the end of the string) to match. */ if (entry_core->r) { int nmatch = 0; int i; ap_regmatch_t *pmatch = NULL; if (entry_core->refs && entry_core->refs->nelts) { if (!rxpool) { apr_pool_create(&rxpool, r->pool); } nmatch = entry_core->refs->nelts; pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t)); } if (ap_regexec(entry_core->r, r->uri, nmatch, pmatch, 0)) { continue; } for (i = 0; i < nmatch; i++) { if (pmatch[i].rm_so >= 0 && pmatch[i].rm_eo >= 0 && ((const char **)entry_core->refs->elts)[i]) { apr_table_setn(r->subprocess_env, ((const char **)entry_core->refs->elts)[i], apr_pstrndup(r->pool, r->uri + pmatch[i].rm_so, pmatch[i].rm_eo - pmatch[i].rm_so)); } } } else { if ((entry_core->d_is_fnmatch ? apr_fnmatch(entry_core->d, cache->cached, APR_FNM_PATHNAME) : (strncmp(entry_core->d, cache->cached, len) || (len > 0 && entry_core->d[len - 1] != '/' && cache->cached[len] != '/' && cache->cached[len] != '\0')))) { continue; } } /* If we merged this same section last time, reuse it */ if (matches) { if (last_walk->matched == sec_ent[sec_idx]) { now_merged = last_walk->merged; ++last_walk; --matches; continue; } /* We fell out of sync. This is our own copy of walked, * so truncate the remaining matches and reset remaining. */ cache->walked->nelts -= matches; matches = 0; cached = 0; } if (now_merged) { now_merged = ap_merge_per_dir_configs(r->pool, now_merged, sec_ent[sec_idx]); } else { now_merged = sec_ent[sec_idx]; } last_walk = (walk_walked_t*)apr_array_push(cache->walked); last_walk->matched = sec_ent[sec_idx]; last_walk->merged = now_merged; } if (rxpool) { apr_pool_destroy(rxpool); } /* Whoops - everything matched in sequence, but either the original * walk found some additional matches (which we need to truncate), or * this walk found some additional matches. */ if (matches) { cache->walked->nelts -= matches; cached = 0; } else if (cache->walked->nelts > cached_matches) { cached = 0; } } if (cached && r->per_dir_config == cache->dir_conf_merged) { r->per_dir_config = cache->per_dir_result; return OK; } cache->dir_conf_tested = sec_ent; cache->dir_conf_merged = r->per_dir_config; /* Merge our cache->dir_conf_merged construct with the r->per_dir_configs, * and note the end result to (potentially) skip this step next time. */ if (now_merged) { r->per_dir_config = ap_merge_per_dir_configs(r->pool, r->per_dir_config, now_merged); } cache->per_dir_result = r->per_dir_config; return OK; } AP_DECLARE(int) ap_file_walk(request_rec *r) { ap_conf_vector_t *now_merged = NULL; core_dir_config *dconf = ap_get_core_module_config(r->per_dir_config); ap_conf_vector_t **sec_ent = NULL; int num_sec = 0; walk_cache_t *cache; const char *test_file; int cached; if (dconf->sec_file) { sec_ent = (ap_conf_vector_t **)dconf->sec_file->elts; num_sec = dconf->sec_file->nelts; } /* To allow broken modules to proceed, we allow missing filenames to pass. * We will catch it later if it's heading for the core handler. * directory_walk already posted an INFO note for module debugging. */ if (r->filename == NULL) { return OK; } cache = prep_walk_cache(AP_NOTE_FILE_WALK, r); cached = (cache->cached != NULL); /* No tricks here, there are just no <Files > to parse in this context. * We won't destroy the cache, just in case _this_ redirect is later * redirected again to a context containing the same or similar <Files >. */ if (!num_sec) { return OK; } /* Get the basename .. and copy for the cache just * in case r->filename is munged by another module */ test_file = strrchr(r->filename, '/'); if (test_file == NULL) { test_file = apr_pstrdup(r->pool, r->filename); } else { test_file = apr_pstrdup(r->pool, ++test_file); } /* If we have an cache->cached file name that matches test_file, * and the directory's list of file sections hasn't changed, we * can skip rewalking the file_walk entries. */ if (cached && (cache->dir_conf_tested == sec_ent) && (strcmp(test_file, cache->cached) == 0)) { /* Well this looks really familiar! If our end-result (per_dir_result) * didn't change, we have absolutely nothing to do :) * Otherwise (as is the case with most dir_merged requests) * we must merge our dir_conf_merged onto this new r->per_dir_config. */ if (r->per_dir_config == cache->per_dir_result) { return OK; } if (cache->walked->nelts) { now_merged = ((walk_walked_t*)cache->walked->elts) [cache->walked->nelts - 1].merged; } } else { /* We start now_merged from NULL since we want to build * a file section list that can be merged to any dir_walk. */ int sec_idx; int matches = cache->walked->nelts; int cached_matches = matches; walk_walked_t *last_walk = (walk_walked_t*)cache->walked->elts; apr_pool_t *rxpool = NULL; cached &= auth_internal_per_conf; cache->cached = test_file; /* Go through the location entries, and check for matches. * We apply the directive sections in given order, we should * really try them with the most general first. */ for (sec_idx = 0; sec_idx < num_sec; ++sec_idx) { core_dir_config *entry_core; entry_core = ap_get_core_module_config(sec_ent[sec_idx]); if (entry_core->r) { int nmatch = 0; int i; ap_regmatch_t *pmatch = NULL; if (entry_core->refs && entry_core->refs->nelts) { if (!rxpool) { apr_pool_create(&rxpool, r->pool); } nmatch = entry_core->refs->nelts; pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t)); } if (ap_regexec(entry_core->r, cache->cached, nmatch, pmatch, 0)) { continue; } for (i = 0; i < nmatch; i++) { if (pmatch[i].rm_so >= 0 && pmatch[i].rm_eo >= 0 && ((const char **)entry_core->refs->elts)[i]) { apr_table_setn(r->subprocess_env, ((const char **)entry_core->refs->elts)[i], apr_pstrndup(r->pool, cache->cached + pmatch[i].rm_so, pmatch[i].rm_eo - pmatch[i].rm_so)); } } } else { if ((entry_core->d_is_fnmatch ? apr_fnmatch(entry_core->d, cache->cached, APR_FNM_PATHNAME) : strcmp(entry_core->d, cache->cached))) { continue; } } /* If we merged this same section last time, reuse it */ if (matches) { if (last_walk->matched == sec_ent[sec_idx]) { now_merged = last_walk->merged; ++last_walk; --matches; continue; } /* We fell out of sync. This is our own copy of walked, * so truncate the remaining matches and reset remaining. */ cache->walked->nelts -= matches; matches = 0; cached = 0; } if (now_merged) { now_merged = ap_merge_per_dir_configs(r->pool, now_merged, sec_ent[sec_idx]); } else { now_merged = sec_ent[sec_idx]; } last_walk = (walk_walked_t*)apr_array_push(cache->walked); last_walk->matched = sec_ent[sec_idx]; last_walk->merged = now_merged; } if (rxpool) { apr_pool_destroy(rxpool); } /* Whoops - everything matched in sequence, but either the original * walk found some additional matches (which we need to truncate), or * this walk found some additional matches. */ if (matches) { cache->walked->nelts -= matches; cached = 0; } else if (cache->walked->nelts > cached_matches) { cached = 0; } } if (cached && r->per_dir_config == cache->dir_conf_merged) { r->per_dir_config = cache->per_dir_result; return OK; } cache->dir_conf_tested = sec_ent; cache->dir_conf_merged = r->per_dir_config; /* Merge our cache->dir_conf_merged construct with the r->per_dir_configs, * and note the end result to (potentially) skip this step next time. */ if (now_merged) { r->per_dir_config = ap_merge_per_dir_configs(r->pool, r->per_dir_config, now_merged); } cache->per_dir_result = r->per_dir_config; return OK; } AP_DECLARE(int) ap_if_walk(request_rec *r) { ap_conf_vector_t *now_merged = NULL; core_dir_config *dconf = ap_get_core_module_config(r->per_dir_config); ap_conf_vector_t **sec_ent = NULL; int num_sec = 0; walk_cache_t *cache; int cached; int sec_idx; int matches; int cached_matches; int prev_result = -1; walk_walked_t *last_walk; if (dconf->sec_if) { sec_ent = (ap_conf_vector_t **)dconf->sec_if->elts; num_sec = dconf->sec_if->nelts; } /* No tricks here, there are just no <If > to parse in this context. * We won't destroy the cache, just in case _this_ redirect is later * redirected again to a context containing the same or similar <If >. */ if (!num_sec) { return OK; } cache = prep_walk_cache(AP_NOTE_IF_WALK, r); cached = (cache->cached != NULL); cache->cached = (void *)1; matches = cache->walked->nelts; cached_matches = matches; last_walk = (walk_walked_t*)cache->walked->elts; cached &= auth_internal_per_conf; /* Go through the if entries, and check for matches */ for (sec_idx = 0; sec_idx < num_sec; ++sec_idx) { const char *err = NULL; core_dir_config *entry_core; int rc; entry_core = ap_get_core_module_config(sec_ent[sec_idx]); AP_DEBUG_ASSERT(entry_core->condition_ifelse != 0); if (entry_core->condition_ifelse & AP_CONDITION_ELSE) { AP_DEBUG_ASSERT(prev_result != -1); if (prev_result == 1) continue; } if (entry_core->condition_ifelse & AP_CONDITION_IF) { rc = ap_expr_exec(r, entry_core->condition, &err); if (rc <= 0) { if (rc < 0) ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00039) "Failed to evaluate <If > condition: %s", err); prev_result = 0; continue; } prev_result = 1; } else { prev_result = -1; } /* If we merged this same section last time, reuse it */ if (matches) { if (last_walk->matched == sec_ent[sec_idx]) { now_merged = last_walk->merged; ++last_walk; --matches; continue; } /* We fell out of sync. This is our own copy of walked, * so truncate the remaining matches and reset remaining. */ cache->walked->nelts -= matches; matches = 0; cached = 0; } if (now_merged) { now_merged = ap_merge_per_dir_configs(r->pool, now_merged, sec_ent[sec_idx]); } else { now_merged = sec_ent[sec_idx]; } last_walk = (walk_walked_t*)apr_array_push(cache->walked); last_walk->matched = sec_ent[sec_idx]; last_walk->merged = now_merged; } /* Everything matched in sequence, but it may be that the original * walk found some additional matches (which we need to truncate), or * this walk found some additional matches. */ if (matches) { cache->walked->nelts -= matches; cached = 0; } else if (cache->walked->nelts > cached_matches) { cached = 0; } if (cached && r->per_dir_config == cache->dir_conf_merged) { r->per_dir_config = cache->per_dir_result; return OK; } cache->dir_conf_tested = sec_ent; cache->dir_conf_merged = r->per_dir_config; /* Merge our cache->dir_conf_merged construct with the r->per_dir_configs, * and note the end result to (potentially) skip this step next time. */ if (now_merged) { r->per_dir_config = ap_merge_per_dir_configs(r->pool, r->per_dir_config, now_merged); } cache->per_dir_result = r->per_dir_config; return OK; } /***************************************************************** * * The sub_request mechanism. * * Fns to look up a relative URI from, e.g., a map file or SSI document. * These do all access checks, etc., but don't actually run the transaction * ... use run_sub_req below for that. Also, be sure to use destroy_sub_req * as appropriate if you're likely to be creating more than a few of these. * (An early Apache version didn't destroy the sub_reqs used in directory * indexing. The result, when indexing a directory with 800-odd files in * it, was massively excessive storage allocation). * * Note more manipulation of protocol-specific vars in the request * structure... */ static request_rec *make_sub_request(const request_rec *r, ap_filter_t *next_filter) { apr_pool_t *rrp; request_rec *rnew; apr_pool_create(&rrp, r->pool); apr_pool_tag(rrp, "subrequest"); rnew = apr_pcalloc(rrp, sizeof(request_rec)); rnew->pool = rrp; rnew->hostname = r->hostname; rnew->request_time = r->request_time; rnew->connection = r->connection; rnew->server = r->server; rnew->log = r->log; rnew->request_config = ap_create_request_config(rnew->pool); /* Start a clean config from this subrequest's vhost. Optimization in * Location/File/Dir walks from the parent request assure that if the * config blocks of the subrequest match the parent request, no merges * will actually occur (and generally a minimal number of merges are * required, even if the parent and subrequest aren't quite identical.) */ rnew->per_dir_config = r->server->lookup_defaults; rnew->htaccess = r->htaccess; rnew->allowed_methods = ap_make_method_list(rnew->pool, 2); /* make a copy of the allowed-methods list */ ap_copy_method_list(rnew->allowed_methods, r->allowed_methods); /* start with the same set of output filters */ if (next_filter) { /* while there are no input filters for a subrequest, we will * try to insert some, so if we don't have valid data, the code * will seg fault. */ rnew->input_filters = r->input_filters; rnew->proto_input_filters = r->proto_input_filters; rnew->output_filters = next_filter; rnew->proto_output_filters = r->proto_output_filters; ap_add_output_filter_handle(ap_subreq_core_filter_handle, NULL, rnew, rnew->connection); } else { /* If NULL - we are expecting to be internal_fast_redirect'ed * to this subrequest - or this request will never be invoked. * Ignore the original request filter stack entirely, and * drill the input and output stacks back to the connection. */ rnew->proto_input_filters = r->proto_input_filters; rnew->proto_output_filters = r->proto_output_filters; rnew->input_filters = r->proto_input_filters; rnew->output_filters = r->proto_output_filters; } rnew->useragent_addr = r->useragent_addr; rnew->useragent_ip = r->useragent_ip; /* no input filters for a subrequest */ ap_set_sub_req_protocol(rnew, r); /* We have to run this after we fill in sub req vars, * or the r->main pointer won't be setup */ ap_run_create_request(rnew); /* Begin by presuming any module can make its own path_info assumptions, * until some module interjects and changes the value. */ rnew->used_path_info = AP_REQ_DEFAULT_PATH_INFO; /* Pass on the kept body (if any) into the new request. */ rnew->kept_body = r->kept_body; return rnew; } AP_CORE_DECLARE_NONSTD(apr_status_t) ap_sub_req_output_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_bucket *e = APR_BRIGADE_LAST(bb); if (APR_BUCKET_IS_EOS(e)) { apr_bucket_delete(e); } if (!APR_BRIGADE_EMPTY(bb)) { return ap_pass_brigade(f->next, bb); } return APR_SUCCESS; } extern APR_OPTIONAL_FN_TYPE(authz_some_auth_required) *ap__authz_ap_some_auth_required; AP_DECLARE(int) ap_some_auth_required(request_rec *r) { /* Is there a require line configured for the type of *this* req? */ if (ap__authz_ap_some_auth_required) { return ap__authz_ap_some_auth_required(r); } else return 0; } AP_DECLARE(void) ap_clear_auth_internal(void) { auth_internal_per_conf_hooks = 0; auth_internal_per_conf_providers = 0; } AP_DECLARE(void) ap_setup_auth_internal(apr_pool_t *ptemp) { int total_auth_hooks = 0; int total_auth_providers = 0; auth_internal_per_conf = 0; if (_hooks.link_access_checker) { total_auth_hooks += _hooks.link_access_checker->nelts; } if (_hooks.link_access_checker_ex) { total_auth_hooks += _hooks.link_access_checker_ex->nelts; } if (_hooks.link_check_user_id) { total_auth_hooks += _hooks.link_check_user_id->nelts; } if (_hooks.link_auth_checker) { total_auth_hooks += _hooks.link_auth_checker->nelts; } if (total_auth_hooks > auth_internal_per_conf_hooks) { return; } total_auth_providers += ap_list_provider_names(ptemp, AUTHN_PROVIDER_GROUP, AUTHN_PROVIDER_VERSION)->nelts; total_auth_providers += ap_list_provider_names(ptemp, AUTHZ_PROVIDER_GROUP, AUTHZ_PROVIDER_VERSION)->nelts; if (total_auth_providers > auth_internal_per_conf_providers) { return; } auth_internal_per_conf = 1; } AP_DECLARE(apr_status_t) ap_register_auth_provider(apr_pool_t *pool, const char *provider_group, const char *provider_name, const char *provider_version, const void *provider, int type) { if ((type & AP_AUTH_INTERNAL_MASK) == AP_AUTH_INTERNAL_PER_CONF) { ++auth_internal_per_conf_providers; } return ap_register_provider(pool, provider_group, provider_name, provider_version, provider); } AP_DECLARE(void) ap_hook_check_access(ap_HOOK_access_checker_t *pf, const char * const *aszPre, const char * const *aszSucc, int nOrder, int type) { if ((type & AP_AUTH_INTERNAL_MASK) == AP_AUTH_INTERNAL_PER_CONF) { ++auth_internal_per_conf_hooks; } ap_hook_access_checker(pf, aszPre, aszSucc, nOrder); } AP_DECLARE(void) ap_hook_check_access_ex(ap_HOOK_access_checker_ex_t *pf, const char * const *aszPre, const char * const *aszSucc, int nOrder, int type) { if ((type & AP_AUTH_INTERNAL_MASK) == AP_AUTH_INTERNAL_PER_CONF) { ++auth_internal_per_conf_hooks; } ap_hook_access_checker_ex(pf, aszPre, aszSucc, nOrder); } AP_DECLARE(void) ap_hook_check_authn(ap_HOOK_check_user_id_t *pf, const char * const *aszPre, const char * const *aszSucc, int nOrder, int type) { if ((type & AP_AUTH_INTERNAL_MASK) == AP_AUTH_INTERNAL_PER_CONF) { ++auth_internal_per_conf_hooks; } ap_hook_check_user_id(pf, aszPre, aszSucc, nOrder); } AP_DECLARE(void) ap_hook_check_authz(ap_HOOK_auth_checker_t *pf, const char * const *aszPre, const char * const *aszSucc, int nOrder, int type) { if ((type & AP_AUTH_INTERNAL_MASK) == AP_AUTH_INTERNAL_PER_CONF) { ++auth_internal_per_conf_hooks; } ap_hook_auth_checker(pf, aszPre, aszSucc, nOrder); } AP_DECLARE(request_rec *) ap_sub_req_method_uri(const char *method, const char *new_uri, const request_rec *r, ap_filter_t *next_filter) { request_rec *rnew; /* Initialise res, to avoid a gcc warning */ int res = HTTP_INTERNAL_SERVER_ERROR; char *udir; rnew = make_sub_request(r, next_filter); /* would be nicer to pass "method" to ap_set_sub_req_protocol */ rnew->method = method; rnew->method_number = ap_method_number_of(method); if (new_uri[0] == '/') { ap_parse_uri(rnew, new_uri); } else { udir = ap_make_dirstr_parent(rnew->pool, r->uri); udir = ap_escape_uri(rnew->pool, udir); /* re-escape it */ ap_parse_uri(rnew, ap_make_full_path(rnew->pool, udir, new_uri)); } /* We cannot return NULL without violating the API. So just turn this * subrequest into a 500 to indicate the failure. */ if (ap_is_recursion_limit_exceeded(r)) { rnew->status = HTTP_INTERNAL_SERVER_ERROR; return rnew; } /* lookup_uri * If the content can be served by the quick_handler, we can * safely bypass request_internal processing. * * If next_filter is NULL we are expecting to be * internal_fast_redirect'ed to the subrequest, or the subrequest will * never be invoked. We need to make sure that the quickhandler is not * invoked by any lookups. Since an internal_fast_redirect will always * occur too late for the quickhandler to handle the request. */ if (next_filter) { res = ap_run_quick_handler(rnew, 1); } if (next_filter == NULL || res != OK) { if ((res = ap_process_request_internal(rnew))) { rnew->status = res; } } return rnew; } AP_DECLARE(request_rec *) ap_sub_req_lookup_uri(const char *new_uri, const request_rec *r, ap_filter_t *next_filter) { return ap_sub_req_method_uri("GET", new_uri, r, next_filter); } AP_DECLARE(request_rec *) ap_sub_req_lookup_dirent(const apr_finfo_t *dirent, const request_rec *r, int subtype, ap_filter_t *next_filter) { request_rec *rnew; int res; char *fdir; char *udir; rnew = make_sub_request(r, next_filter); /* Special case: we are looking at a relative lookup in the same directory. * This is 100% safe, since dirent->name just came from the filesystem. */ if (r->path_info && *r->path_info) { /* strip path_info off the end of the uri to keep it in sync * with r->filename, which has already been stripped by directory_walk, * merge the dirent->name, and then, if the caller wants us to remerge * the original path info, do so. Note we never fix the path_info back * to r->filename, since dir_walk would do so (but we don't expect it * to happen in the usual cases) */ udir = apr_pstrdup(rnew->pool, r->uri); udir[ap_find_path_info(udir, r->path_info)] = '\0'; udir = ap_make_dirstr_parent(rnew->pool, udir); rnew->uri = ap_make_full_path(rnew->pool, udir, dirent->name); if (subtype == AP_SUBREQ_MERGE_ARGS) { rnew->uri = ap_make_full_path(rnew->pool, rnew->uri, r->path_info + 1); rnew->path_info = apr_pstrdup(rnew->pool, r->path_info); } rnew->uri = ap_escape_uri(rnew->pool, rnew->uri); } else { udir = ap_make_dirstr_parent(rnew->pool, r->uri); rnew->uri = ap_escape_uri(rnew->pool, ap_make_full_path(rnew->pool, udir, dirent->name)); } fdir = ap_make_dirstr_parent(rnew->pool, r->filename); rnew->filename = ap_make_full_path(rnew->pool, fdir, dirent->name); if (r->canonical_filename == r->filename) { rnew->canonical_filename = rnew->filename; } /* XXX This is now less relevant; we will do a full location walk * these days for this case. Preserve the apr_stat results, and * perhaps we also tag that symlinks were tested and/or found for * r->filename. */ rnew->per_dir_config = r->server->lookup_defaults; if ((dirent->valid & APR_FINFO_MIN) != APR_FINFO_MIN) { /* * apr_dir_read isn't very complete on this platform, so * we need another apr_stat (with or without APR_FINFO_LINK * depending on whether we allow all symlinks here.) If this * is an APR_LNK that resolves to an APR_DIR, then we will rerun * everything anyways... this should be safe. */ apr_status_t rv; if (ap_allow_options(rnew) & OPT_SYM_LINKS) { if (((rv = apr_stat(&rnew->finfo, rnew->filename, APR_FINFO_MIN, rnew->pool)) != APR_SUCCESS) && (rv != APR_INCOMPLETE)) { rnew->finfo.filetype = APR_NOFILE; } } else { if (((rv = apr_stat(&rnew->finfo, rnew->filename, APR_FINFO_LINK | APR_FINFO_MIN, rnew->pool)) != APR_SUCCESS) && (rv != APR_INCOMPLETE)) { rnew->finfo.filetype = APR_NOFILE; } } } else { memcpy(&rnew->finfo, dirent, sizeof(apr_finfo_t)); } if (rnew->finfo.filetype == APR_LNK) { /* * Resolve this symlink. We should tie this back to dir_walk's cache */ if ((res = resolve_symlink(rnew->filename, &rnew->finfo, ap_allow_options(rnew), rnew->pool)) != OK) { rnew->status = res; return rnew; } } if (rnew->finfo.filetype == APR_DIR) { /* ap_make_full_path and ap_escape_uri overallocated the buffers * by one character to help us out here. */ strcat(rnew->filename, "/"); if (!rnew->path_info || !*rnew->path_info) { strcat(rnew->uri, "/"); } } /* fill in parsed_uri values */ if (r->args && *r->args && (subtype == AP_SUBREQ_MERGE_ARGS)) { ap_parse_uri(rnew, apr_pstrcat(r->pool, rnew->uri, "?", r->args, NULL)); } else { ap_parse_uri(rnew, rnew->uri); } /* We cannot return NULL without violating the API. So just turn this * subrequest into a 500. */ if (ap_is_recursion_limit_exceeded(r)) { rnew->status = HTTP_INTERNAL_SERVER_ERROR; return rnew; } if ((res = ap_process_request_internal(rnew))) { rnew->status = res; } return rnew; } AP_DECLARE(request_rec *) ap_sub_req_lookup_file(const char *new_file, const request_rec *r, ap_filter_t *next_filter) { request_rec *rnew; int res; char *fdir; apr_size_t fdirlen; rnew = make_sub_request(r, next_filter); fdir = ap_make_dirstr_parent(rnew->pool, r->filename); fdirlen = strlen(fdir); /* Translate r->filename, if it was canonical, it stays canonical */ if (r->canonical_filename == r->filename) { rnew->canonical_filename = (char*)(1); } if (apr_filepath_merge(&rnew->filename, fdir, new_file, APR_FILEPATH_TRUENAME, rnew->pool) != APR_SUCCESS) { rnew->status = HTTP_FORBIDDEN; return rnew; } if (rnew->canonical_filename) { rnew->canonical_filename = rnew->filename; } /* * Check for a special case... if there are no '/' characters in new_file * at all, and the path was the same, then we are looking at a relative * lookup in the same directory. Fixup the URI to match. */ if (strncmp(rnew->filename, fdir, fdirlen) == 0 && rnew->filename[fdirlen] && ap_strchr_c(rnew->filename + fdirlen, '/') == NULL) { apr_status_t rv; if (ap_allow_options(rnew) & OPT_SYM_LINKS) { if (((rv = apr_stat(&rnew->finfo, rnew->filename, APR_FINFO_MIN, rnew->pool)) != APR_SUCCESS) && (rv != APR_INCOMPLETE)) { rnew->finfo.filetype = APR_NOFILE; } } else { if (((rv = apr_stat(&rnew->finfo, rnew->filename, APR_FINFO_LINK | APR_FINFO_MIN, rnew->pool)) != APR_SUCCESS) && (rv != APR_INCOMPLETE)) { rnew->finfo.filetype = APR_NOFILE; } } if (r->uri && *r->uri) { char *udir = ap_make_dirstr_parent(rnew->pool, r->uri); rnew->uri = ap_make_full_path(rnew->pool, udir, rnew->filename + fdirlen); ap_parse_uri(rnew, rnew->uri); /* fill in parsed_uri values */ } else { ap_parse_uri(rnew, new_file); /* fill in parsed_uri values */ rnew->uri = apr_pstrdup(rnew->pool, ""); } } else { /* XXX: @@@: What should be done with the parsed_uri values? * We would be better off stripping down to the 'common' elements * of the path, then reassembling the URI as best as we can. */ ap_parse_uri(rnew, new_file); /* fill in parsed_uri values */ /* * XXX: this should be set properly like it is in the same-dir case * but it's actually sometimes to impossible to do it... because the * file may not have a uri associated with it -djg */ rnew->uri = apr_pstrdup(rnew->pool, ""); } /* We cannot return NULL without violating the API. So just turn this * subrequest into a 500. */ if (ap_is_recursion_limit_exceeded(r)) { rnew->status = HTTP_INTERNAL_SERVER_ERROR; return rnew; } if ((res = ap_process_request_internal(rnew))) { rnew->status = res; } return rnew; } AP_DECLARE(int) ap_run_sub_req(request_rec *r) { int retval = DECLINED; /* Run the quick handler if the subrequest is not a dirent or file * subrequest */ if (!(r->filename && r->finfo.filetype != APR_NOFILE)) { retval = ap_run_quick_handler(r, 0); } if (retval != OK) { retval = ap_invoke_handler(r); if (retval == DONE) { retval = OK; } } ap_finalize_sub_req_protocol(r); return retval; } AP_DECLARE(void) ap_destroy_sub_req(request_rec *r) { /* Reclaim the space */ apr_pool_destroy(r->pool); } /* * Function to set the r->mtime field to the specified value if it's later * than what's already there. */ AP_DECLARE(void) ap_update_mtime(request_rec *r, apr_time_t dependency_mtime) { if (r->mtime < dependency_mtime) { r->mtime = dependency_mtime; } } /* * Is it the initial main request, which we only get *once* per HTTP request? */ AP_DECLARE(int) ap_is_initial_req(request_rec *r) { return (r->main == NULL) /* otherwise, this is a sub-request */ && (r->prev == NULL); /* otherwise, this is an internal redirect */ }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_1576_2
crossvul-cpp_data_good_2287_3
/* * linux/fs/ext3/file.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext3 fs regular file handling primitives * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) */ #include <linux/quotaops.h> #include "ext3.h" #include "xattr.h" #include "acl.h" /* * Called when an inode is released. Note that this is different * from ext3_file_open: open gets called at every open, but release * gets called only when /all/ the files are closed. */ static int ext3_release_file (struct inode * inode, struct file * filp) { if (ext3_test_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE)) { filemap_flush(inode->i_mapping); ext3_clear_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE); } /* if we are the last writer on the inode, drop the block reservation */ if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1)) { mutex_lock(&EXT3_I(inode)->truncate_mutex); ext3_discard_reservation(inode); mutex_unlock(&EXT3_I(inode)->truncate_mutex); } if (is_dx(inode) && filp->private_data) ext3_htree_free_dir_info(filp->private_data); return 0; } const struct file_operations ext3_file_operations = { .llseek = generic_file_llseek, .read = new_sync_read, .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .unlocked_ioctl = ext3_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext3_compat_ioctl, #endif .mmap = generic_file_mmap, .open = dquot_file_open, .release = ext3_release_file, .fsync = ext3_sync_file, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, }; const struct inode_operations ext3_file_inode_operations = { .setattr = ext3_setattr, #ifdef CONFIG_EXT3_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext3_listxattr, .removexattr = generic_removexattr, #endif .get_acl = ext3_get_acl, .set_acl = ext3_set_acl, .fiemap = ext3_fiemap, };
./CrossVul/dataset_final_sorted/CWE-264/c/good_2287_3
crossvul-cpp_data_good_5861_38
/* * Cryptographic API. * * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using * Supplemental SSE3 instructions. * * This file is based on sha1_generic.c * * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> * Copyright (c) Mathias Krause <minipli@googlemail.com> * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/cryptohash.h> #include <linux/types.h> #include <crypto/sha.h> #include <asm/byteorder.h> #include <asm/i387.h> #include <asm/xcr.h> #include <asm/xsave.h> asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, unsigned int rounds); #ifdef CONFIG_AS_AVX asmlinkage void sha1_transform_avx(u32 *digest, const char *data, unsigned int rounds); #endif #ifdef CONFIG_AS_AVX2 #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, unsigned int rounds); #endif static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int); static int sha1_ssse3_init(struct shash_desc *desc) { struct sha1_state *sctx = shash_desc_ctx(desc); *sctx = (struct sha1_state){ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, }; return 0; } static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data, unsigned int len, unsigned int partial) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int done = 0; sctx->count += len; if (partial) { done = SHA1_BLOCK_SIZE - partial; memcpy(sctx->buffer + partial, data, done); sha1_transform_asm(sctx->state, sctx->buffer, 1); } if (len - done >= SHA1_BLOCK_SIZE) { const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; sha1_transform_asm(sctx->state, data + done, rounds); done += rounds * SHA1_BLOCK_SIZE; } memcpy(sctx->buffer, data + done, len - done); return 0; } static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; int res; /* Handle the fast case right here */ if (partial + len < SHA1_BLOCK_SIZE) { sctx->count += len; memcpy(sctx->buffer + partial, data, len); return 0; } if (!irq_fpu_usable()) { res = crypto_sha1_update(desc, data, len); } else { kernel_fpu_begin(); res = __sha1_ssse3_update(desc, data, len, partial); kernel_fpu_end(); } return res; } /* Add padding and return the message digest. */ static int sha1_ssse3_final(struct shash_desc *desc, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; __be32 *dst = (__be32 *)out; __be64 bits; static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64 and append length */ index = sctx->count % SHA1_BLOCK_SIZE; padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); if (!irq_fpu_usable()) { crypto_sha1_update(desc, padding, padlen); crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits)); } else { kernel_fpu_begin(); /* We need to fill a whole block for __sha1_ssse3_update() */ if (padlen <= 56) { sctx->count += padlen; memcpy(sctx->buffer + index, padding, padlen); } else { __sha1_ssse3_update(desc, padding, padlen, index); } __sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56); kernel_fpu_end(); } /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_be32(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int sha1_ssse3_export(struct shash_desc *desc, void *out) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int sha1_ssse3_import(struct shash_desc *desc, const void *in) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } #ifdef CONFIG_AS_AVX2 static void sha1_apply_transform_avx2(u32 *digest, const char *data, unsigned int rounds) { /* Select the optimal transform based on data block size */ if (rounds >= SHA1_AVX2_BLOCK_OPTSIZE) sha1_transform_avx2(digest, data, rounds); else sha1_transform_avx(digest, data, rounds); } #endif static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_ssse3_init, .update = sha1_ssse3_update, .final = sha1_ssse3_final, .export = sha1_ssse3_export, .import = sha1_ssse3_import, .descsize = sizeof(struct sha1_state), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-ssse3", .cra_priority = 150, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; #ifdef CONFIG_AS_AVX static bool __init avx_usable(void) { u64 xcr0; if (!cpu_has_avx || !cpu_has_osxsave) return false; xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { pr_info("AVX detected but unusable.\n"); return false; } return true; } #ifdef CONFIG_AS_AVX2 static bool __init avx2_usable(void) { if (avx_usable() && cpu_has_avx2 && boot_cpu_has(X86_FEATURE_BMI1) && boot_cpu_has(X86_FEATURE_BMI2)) return true; return false; } #endif #endif static int __init sha1_ssse3_mod_init(void) { char *algo_name; /* test for SSSE3 first */ if (cpu_has_ssse3) { sha1_transform_asm = sha1_transform_ssse3; algo_name = "SSSE3"; } #ifdef CONFIG_AS_AVX /* allow AVX to override SSSE3, it's a little faster */ if (avx_usable()) { sha1_transform_asm = sha1_transform_avx; algo_name = "AVX"; #ifdef CONFIG_AS_AVX2 /* allow AVX2 to override AVX, it's a little faster */ if (avx2_usable()) { sha1_transform_asm = sha1_apply_transform_avx2; algo_name = "AVX2"; } #endif } #endif if (sha1_transform_asm) { pr_info("Using %s optimized SHA-1 implementation\n", algo_name); return crypto_register_shash(&alg); } pr_info("Neither AVX nor AVX2 nor SSSE3 is available/usable.\n"); return -ENODEV; } static void __exit sha1_ssse3_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_ssse3_mod_init); module_exit(sha1_ssse3_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); MODULE_ALIAS_CRYPTO("sha1");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_38
crossvul-cpp_data_good_2438_1
/* * * Copyright (C) 2011 Novell Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/fs.h> #include <linux/namei.h> #include <linux/xattr.h> #include <linux/security.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/parser.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/statfs.h> #include <linux/seq_file.h> #include "overlayfs.h" MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Overlay filesystem"); MODULE_LICENSE("GPL"); #define OVERLAYFS_SUPER_MAGIC 0x794c764f struct ovl_config { char *lowerdir; char *upperdir; char *workdir; }; /* private information held for overlayfs's superblock */ struct ovl_fs { struct vfsmount *upper_mnt; struct vfsmount *lower_mnt; struct dentry *workdir; long lower_namelen; /* pathnames of lower and upper dirs, for show_options */ struct ovl_config config; }; struct ovl_dir_cache; /* private information held for every overlayfs dentry */ struct ovl_entry { struct dentry *__upperdentry; struct dentry *lowerdentry; struct ovl_dir_cache *cache; union { struct { u64 version; bool opaque; }; struct rcu_head rcu; }; }; const char *ovl_opaque_xattr = "trusted.overlay.opaque"; enum ovl_path_type ovl_path_type(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; if (oe->__upperdentry) { if (oe->lowerdentry) { if (S_ISDIR(dentry->d_inode->i_mode)) return OVL_PATH_MERGE; else return OVL_PATH_UPPER; } else { if (oe->opaque) return OVL_PATH_UPPER; else return OVL_PATH_PURE_UPPER; } } else { return OVL_PATH_LOWER; } } static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) { struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry); /* * Make sure to order reads to upperdentry wrt ovl_dentry_update() */ smp_read_barrier_depends(); return upperdentry; } void ovl_path_upper(struct dentry *dentry, struct path *path) { struct ovl_fs *ofs = dentry->d_sb->s_fs_info; struct ovl_entry *oe = dentry->d_fsdata; path->mnt = ofs->upper_mnt; path->dentry = ovl_upperdentry_dereference(oe); } enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) { enum ovl_path_type type = ovl_path_type(dentry); if (type == OVL_PATH_LOWER) ovl_path_lower(dentry, path); else ovl_path_upper(dentry, path); return type; } struct dentry *ovl_dentry_upper(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; return ovl_upperdentry_dereference(oe); } struct dentry *ovl_dentry_lower(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; return oe->lowerdentry; } struct dentry *ovl_dentry_real(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; struct dentry *realdentry; realdentry = ovl_upperdentry_dereference(oe); if (!realdentry) realdentry = oe->lowerdentry; return realdentry; } struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper) { struct dentry *realdentry; realdentry = ovl_upperdentry_dereference(oe); if (realdentry) { *is_upper = true; } else { realdentry = oe->lowerdentry; *is_upper = false; } return realdentry; } struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; return oe->cache; } void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache) { struct ovl_entry *oe = dentry->d_fsdata; oe->cache = cache; } void ovl_path_lower(struct dentry *dentry, struct path *path) { struct ovl_fs *ofs = dentry->d_sb->s_fs_info; struct ovl_entry *oe = dentry->d_fsdata; path->mnt = ofs->lower_mnt; path->dentry = oe->lowerdentry; } int ovl_want_write(struct dentry *dentry) { struct ovl_fs *ofs = dentry->d_sb->s_fs_info; return mnt_want_write(ofs->upper_mnt); } void ovl_drop_write(struct dentry *dentry) { struct ovl_fs *ofs = dentry->d_sb->s_fs_info; mnt_drop_write(ofs->upper_mnt); } struct dentry *ovl_workdir(struct dentry *dentry) { struct ovl_fs *ofs = dentry->d_sb->s_fs_info; return ofs->workdir; } bool ovl_dentry_is_opaque(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; return oe->opaque; } void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque) { struct ovl_entry *oe = dentry->d_fsdata; oe->opaque = opaque; } void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry) { struct ovl_entry *oe = dentry->d_fsdata; WARN_ON(!mutex_is_locked(&upperdentry->d_parent->d_inode->i_mutex)); WARN_ON(oe->__upperdentry); BUG_ON(!upperdentry->d_inode); /* * Make sure upperdentry is consistent before making it visible to * ovl_upperdentry_dereference(). */ smp_wmb(); oe->__upperdentry = upperdentry; } void ovl_dentry_version_inc(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); oe->version++; } u64 ovl_dentry_version_get(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); return oe->version; } bool ovl_is_whiteout(struct dentry *dentry) { struct inode *inode = dentry->d_inode; return inode && IS_WHITEOUT(inode); } static bool ovl_is_opaquedir(struct dentry *dentry) { int res; char val; struct inode *inode = dentry->d_inode; if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr) return false; res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1); if (res == 1 && val == 'y') return true; return false; } static void ovl_dentry_release(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; if (oe) { dput(oe->__upperdentry); dput(oe->lowerdentry); kfree_rcu(oe, rcu); } } static const struct dentry_operations ovl_dentry_operations = { .d_release = ovl_dentry_release, }; static struct ovl_entry *ovl_alloc_entry(void) { return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL); } static inline struct dentry *ovl_lookup_real(struct dentry *dir, struct qstr *name) { struct dentry *dentry; mutex_lock(&dir->d_inode->i_mutex); dentry = lookup_one_len(name->name, dir, name->len); mutex_unlock(&dir->d_inode->i_mutex); if (IS_ERR(dentry)) { if (PTR_ERR(dentry) == -ENOENT) dentry = NULL; } else if (!dentry->d_inode) { dput(dentry); dentry = NULL; } return dentry; } struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct ovl_entry *oe; struct dentry *upperdir; struct dentry *lowerdir; struct dentry *upperdentry = NULL; struct dentry *lowerdentry = NULL; struct inode *inode = NULL; int err; err = -ENOMEM; oe = ovl_alloc_entry(); if (!oe) goto out; upperdir = ovl_dentry_upper(dentry->d_parent); lowerdir = ovl_dentry_lower(dentry->d_parent); if (upperdir) { upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); err = PTR_ERR(upperdentry); if (IS_ERR(upperdentry)) goto out_put_dir; if (lowerdir && upperdentry) { if (ovl_is_whiteout(upperdentry)) { dput(upperdentry); upperdentry = NULL; oe->opaque = true; } else if (ovl_is_opaquedir(upperdentry)) { oe->opaque = true; } } } if (lowerdir && !oe->opaque) { lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name); err = PTR_ERR(lowerdentry); if (IS_ERR(lowerdentry)) goto out_dput_upper; } if (lowerdentry && upperdentry && (!S_ISDIR(upperdentry->d_inode->i_mode) || !S_ISDIR(lowerdentry->d_inode->i_mode))) { dput(lowerdentry); lowerdentry = NULL; oe->opaque = true; } if (lowerdentry || upperdentry) { struct dentry *realdentry; realdentry = upperdentry ? upperdentry : lowerdentry; err = -ENOMEM; inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode, oe); if (!inode) goto out_dput; ovl_copyattr(realdentry->d_inode, inode); } oe->__upperdentry = upperdentry; oe->lowerdentry = lowerdentry; dentry->d_fsdata = oe; d_add(dentry, inode); return NULL; out_dput: dput(lowerdentry); out_dput_upper: dput(upperdentry); out_put_dir: kfree(oe); out: return ERR_PTR(err); } struct file *ovl_path_open(struct path *path, int flags) { return dentry_open(path, flags, current_cred()); } static void ovl_put_super(struct super_block *sb) { struct ovl_fs *ufs = sb->s_fs_info; dput(ufs->workdir); mntput(ufs->upper_mnt); mntput(ufs->lower_mnt); kfree(ufs->config.lowerdir); kfree(ufs->config.upperdir); kfree(ufs->config.workdir); kfree(ufs); } /** * ovl_statfs * @sb: The overlayfs super block * @buf: The struct kstatfs to fill in with stats * * Get the filesystem statistics. As writes always target the upper layer * filesystem pass the statfs to the same filesystem. */ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) { struct ovl_fs *ofs = dentry->d_sb->s_fs_info; struct dentry *root_dentry = dentry->d_sb->s_root; struct path path; int err; ovl_path_upper(root_dentry, &path); err = vfs_statfs(&path, buf); if (!err) { buf->f_namelen = max(buf->f_namelen, ofs->lower_namelen); buf->f_type = OVERLAYFS_SUPER_MAGIC; } return err; } /** * ovl_show_options * * Prints the mount options for a given superblock. * Returns zero; does not fail. */ static int ovl_show_options(struct seq_file *m, struct dentry *dentry) { struct super_block *sb = dentry->d_sb; struct ovl_fs *ufs = sb->s_fs_info; seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir); seq_printf(m, ",upperdir=%s", ufs->config.upperdir); seq_printf(m, ",workdir=%s", ufs->config.workdir); return 0; } static const struct super_operations ovl_super_operations = { .put_super = ovl_put_super, .statfs = ovl_statfs, .show_options = ovl_show_options, }; enum { OPT_LOWERDIR, OPT_UPPERDIR, OPT_WORKDIR, OPT_ERR, }; static const match_table_t ovl_tokens = { {OPT_LOWERDIR, "lowerdir=%s"}, {OPT_UPPERDIR, "upperdir=%s"}, {OPT_WORKDIR, "workdir=%s"}, {OPT_ERR, NULL} }; static int ovl_parse_opt(char *opt, struct ovl_config *config) { char *p; while ((p = strsep(&opt, ",")) != NULL) { int token; substring_t args[MAX_OPT_ARGS]; if (!*p) continue; token = match_token(p, ovl_tokens, args); switch (token) { case OPT_UPPERDIR: kfree(config->upperdir); config->upperdir = match_strdup(&args[0]); if (!config->upperdir) return -ENOMEM; break; case OPT_LOWERDIR: kfree(config->lowerdir); config->lowerdir = match_strdup(&args[0]); if (!config->lowerdir) return -ENOMEM; break; case OPT_WORKDIR: kfree(config->workdir); config->workdir = match_strdup(&args[0]); if (!config->workdir) return -ENOMEM; break; default: return -EINVAL; } } return 0; } #define OVL_WORKDIR_NAME "work" static struct dentry *ovl_workdir_create(struct vfsmount *mnt, struct dentry *dentry) { struct inode *dir = dentry->d_inode; struct dentry *work; int err; bool retried = false; err = mnt_want_write(mnt); if (err) return ERR_PTR(err); mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); retry: work = lookup_one_len(OVL_WORKDIR_NAME, dentry, strlen(OVL_WORKDIR_NAME)); if (!IS_ERR(work)) { struct kstat stat = { .mode = S_IFDIR | 0, }; if (work->d_inode) { err = -EEXIST; if (retried) goto out_dput; retried = true; ovl_cleanup(dir, work); dput(work); goto retry; } err = ovl_create_real(dir, work, &stat, NULL, NULL, true); if (err) goto out_dput; } out_unlock: mutex_unlock(&dir->i_mutex); mnt_drop_write(mnt); return work; out_dput: dput(work); work = ERR_PTR(err); goto out_unlock; } static int ovl_mount_dir(const char *name, struct path *path) { int err; err = kern_path(name, LOOKUP_FOLLOW, path); if (err) { pr_err("overlayfs: failed to resolve '%s': %i\n", name, err); err = -EINVAL; } return err; } static bool ovl_is_allowed_fs_type(struct dentry *root) { const struct dentry_operations *dop = root->d_op; /* * We don't support: * - automount filesystems * - filesystems with revalidate (FIXME for lower layer) * - filesystems with case insensitive names */ if (dop && (dop->d_manage || dop->d_automount || dop->d_revalidate || dop->d_weak_revalidate || dop->d_compare || dop->d_hash)) { return false; } return true; } /* Workdir should not be subdir of upperdir and vice versa */ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) { bool ok = false; if (workdir != upperdir) { ok = (lock_rename(workdir, upperdir) == NULL); unlock_rename(workdir, upperdir); } return ok; } static int ovl_fill_super(struct super_block *sb, void *data, int silent) { struct path lowerpath; struct path upperpath; struct path workpath; struct inode *root_inode; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; struct kstatfs statfs; int err; err = -ENOMEM; ufs = kzalloc(sizeof(struct ovl_fs), GFP_KERNEL); if (!ufs) goto out; err = ovl_parse_opt((char *) data, &ufs->config); if (err) goto out_free_config; /* FIXME: workdir is not needed for a R/O mount */ err = -EINVAL; if (!ufs->config.upperdir || !ufs->config.lowerdir || !ufs->config.workdir) { pr_err("overlayfs: missing upperdir or lowerdir or workdir\n"); goto out_free_config; } err = -ENOMEM; oe = ovl_alloc_entry(); if (oe == NULL) goto out_free_config; err = ovl_mount_dir(ufs->config.upperdir, &upperpath); if (err) goto out_free_oe; err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); if (err) goto out_put_upperpath; err = ovl_mount_dir(ufs->config.workdir, &workpath); if (err) goto out_put_lowerpath; err = -EINVAL; if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || !S_ISDIR(workpath.dentry->d_inode->i_mode)) { pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); goto out_put_workpath; } if (upperpath.mnt != workpath.mnt) { pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); goto out_put_workpath; } if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); goto out_put_workpath; } if (!ovl_is_allowed_fs_type(upperpath.dentry)) { pr_err("overlayfs: filesystem of upperdir is not supported\n"); goto out_put_workpath; } if (!ovl_is_allowed_fs_type(lowerpath.dentry)) { pr_err("overlayfs: filesystem of lowerdir is not supported\n"); goto out_put_workpath; } err = vfs_statfs(&lowerpath, &statfs); if (err) { pr_err("overlayfs: statfs failed on lowerpath\n"); goto out_put_workpath; } ufs->lower_namelen = statfs.f_namelen; sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth, lowerpath.mnt->mnt_sb->s_stack_depth) + 1; err = -EINVAL; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { pr_err("overlayfs: maximum fs stacking depth exceeded\n"); goto out_put_workpath; } ufs->upper_mnt = clone_private_mount(&upperpath); err = PTR_ERR(ufs->upper_mnt); if (IS_ERR(ufs->upper_mnt)) { pr_err("overlayfs: failed to clone upperpath\n"); goto out_put_workpath; } ufs->lower_mnt = clone_private_mount(&lowerpath); err = PTR_ERR(ufs->lower_mnt); if (IS_ERR(ufs->lower_mnt)) { pr_err("overlayfs: failed to clone lowerpath\n"); goto out_put_upper_mnt; } ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); err = PTR_ERR(ufs->workdir); if (IS_ERR(ufs->workdir)) { pr_err("overlayfs: failed to create directory %s/%s\n", ufs->config.workdir, OVL_WORKDIR_NAME); goto out_put_lower_mnt; } /* * Make lower_mnt R/O. That way fchmod/fchown on lower file * will fail instead of modifying lower fs. */ ufs->lower_mnt->mnt_flags |= MNT_READONLY; /* If the upper fs is r/o, we mark overlayfs r/o too */ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) sb->s_flags |= MS_RDONLY; sb->s_d_op = &ovl_dentry_operations; err = -ENOMEM; root_inode = ovl_new_inode(sb, S_IFDIR, oe); if (!root_inode) goto out_put_workdir; root_dentry = d_make_root(root_inode); if (!root_dentry) goto out_put_workdir; mntput(upperpath.mnt); mntput(lowerpath.mnt); path_put(&workpath); oe->__upperdentry = upperpath.dentry; oe->lowerdentry = lowerpath.dentry; root_dentry->d_fsdata = oe; sb->s_magic = OVERLAYFS_SUPER_MAGIC; sb->s_op = &ovl_super_operations; sb->s_root = root_dentry; sb->s_fs_info = ufs; return 0; out_put_workdir: dput(ufs->workdir); out_put_lower_mnt: mntput(ufs->lower_mnt); out_put_upper_mnt: mntput(ufs->upper_mnt); out_put_workpath: path_put(&workpath); out_put_lowerpath: path_put(&lowerpath); out_put_upperpath: path_put(&upperpath); out_free_oe: kfree(oe); out_free_config: kfree(ufs->config.lowerdir); kfree(ufs->config.upperdir); kfree(ufs->config.workdir); kfree(ufs); out: return err; } static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { return mount_nodev(fs_type, flags, raw_data, ovl_fill_super); } static struct file_system_type ovl_fs_type = { .owner = THIS_MODULE, .name = "overlayfs", .mount = ovl_mount, .kill_sb = kill_anon_super, }; MODULE_ALIAS_FS("overlayfs"); static int __init ovl_init(void) { return register_filesystem(&ovl_fs_type); } static void __exit ovl_exit(void) { unregister_filesystem(&ovl_fs_type); } module_init(ovl_init); module_exit(ovl_exit);
./CrossVul/dataset_final_sorted/CWE-264/c/good_2438_1
crossvul-cpp_data_bad_5617_0
/* scm.c - Socket level control messages processing. * * Author: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * Alignment and value checking mods by Craig Metz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/signal.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/stat.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/security.h> #include <linux/pid.h> #include <linux/nsproxy.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/compat.h> #include <net/scm.h> #include <net/cls_cgroup.h> /* * Only allow a user to send credentials, that they could set with * setu(g)id. */ static __inline__ int scm_check_creds(struct ucred *creds) { const struct cred *cred = current_cred(); kuid_t uid = make_kuid(cred->user_ns, creds->uid); kgid_t gid = make_kgid(cred->user_ns, creds->gid); if (!uid_valid(uid) || !gid_valid(gid)) return -EINVAL; if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || gid_eq(gid, cred->sgid)) || nsown_capable(CAP_SETGID))) { return 0; } return -EPERM; } static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) { int *fdp = (int*)CMSG_DATA(cmsg); struct scm_fp_list *fpl = *fplp; struct file **fpp; int i, num; num = (cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)))/sizeof(int); if (num <= 0) return 0; if (num > SCM_MAX_FD) return -EINVAL; if (!fpl) { fpl = kmalloc(sizeof(struct scm_fp_list), GFP_KERNEL); if (!fpl) return -ENOMEM; *fplp = fpl; fpl->count = 0; fpl->max = SCM_MAX_FD; } fpp = &fpl->fp[fpl->count]; if (fpl->count + num > fpl->max) return -EINVAL; /* * Verify the descriptors and increment the usage count. */ for (i=0; i< num; i++) { int fd = fdp[i]; struct file *file; if (fd < 0 || !(file = fget_raw(fd))) return -EBADF; *fpp++ = file; fpl->count++; } return num; } void __scm_destroy(struct scm_cookie *scm) { struct scm_fp_list *fpl = scm->fp; int i; if (fpl) { scm->fp = NULL; for (i=fpl->count-1; i>=0; i--) fput(fpl->fp[i]); kfree(fpl); } } EXPORT_SYMBOL(__scm_destroy); int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) { struct cmsghdr *cmsg; int err; for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { err = -EINVAL; /* Verify that cmsg_len is at least sizeof(struct cmsghdr) */ /* The first check was omitted in <= 2.2.5. The reasoning was that parser checks cmsg_len in any case, so that additional check would be work duplication. But if cmsg_level is not SOL_SOCKET, we do not check for too short ancillary data object at all! Oops. OK, let's add it... */ if (!CMSG_OK(msg, cmsg)) goto error; if (cmsg->cmsg_level != SOL_SOCKET) continue; switch (cmsg->cmsg_type) { case SCM_RIGHTS: if (!sock->ops || sock->ops->family != PF_UNIX) goto error; err=scm_fp_copy(cmsg, &p->fp); if (err<0) goto error; break; case SCM_CREDENTIALS: { struct ucred creds; kuid_t uid; kgid_t gid; if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) goto error; memcpy(&creds, CMSG_DATA(cmsg), sizeof(struct ucred)); err = scm_check_creds(&creds); if (err) goto error; p->creds.pid = creds.pid; if (!p->pid || pid_vnr(p->pid) != creds.pid) { struct pid *pid; err = -ESRCH; pid = find_get_pid(creds.pid); if (!pid) goto error; put_pid(p->pid); p->pid = pid; } err = -EINVAL; uid = make_kuid(current_user_ns(), creds.uid); gid = make_kgid(current_user_ns(), creds.gid); if (!uid_valid(uid) || !gid_valid(gid)) goto error; p->creds.uid = uid; p->creds.gid = gid; if (!p->cred || !uid_eq(p->cred->euid, uid) || !gid_eq(p->cred->egid, gid)) { struct cred *cred; err = -ENOMEM; cred = prepare_creds(); if (!cred) goto error; cred->uid = cred->euid = uid; cred->gid = cred->egid = gid; if (p->cred) put_cred(p->cred); p->cred = cred; } break; } default: goto error; } } if (p->fp && !p->fp->count) { kfree(p->fp); p->fp = NULL; } return 0; error: scm_destroy(p); return err; } EXPORT_SYMBOL(__scm_send); int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) { struct cmsghdr __user *cm = (__force struct cmsghdr __user *)msg->msg_control; struct cmsghdr cmhdr; int cmlen = CMSG_LEN(len); int err; if (MSG_CMSG_COMPAT & msg->msg_flags) return put_cmsg_compat(msg, level, type, len, data); if (cm==NULL || msg->msg_controllen < sizeof(*cm)) { msg->msg_flags |= MSG_CTRUNC; return 0; /* XXX: return error? check spec. */ } if (msg->msg_controllen < cmlen) { msg->msg_flags |= MSG_CTRUNC; cmlen = msg->msg_controllen; } cmhdr.cmsg_level = level; cmhdr.cmsg_type = type; cmhdr.cmsg_len = cmlen; err = -EFAULT; if (copy_to_user(cm, &cmhdr, sizeof cmhdr)) goto out; if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr))) goto out; cmlen = CMSG_SPACE(len); if (msg->msg_controllen < cmlen) cmlen = msg->msg_controllen; msg->msg_control += cmlen; msg->msg_controllen -= cmlen; err = 0; out: return err; } EXPORT_SYMBOL(put_cmsg); void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) { struct cmsghdr __user *cm = (__force struct cmsghdr __user*)msg->msg_control; int fdmax = 0; int fdnum = scm->fp->count; struct file **fp = scm->fp->fp; int __user *cmfptr; int err = 0, i; if (MSG_CMSG_COMPAT & msg->msg_flags) { scm_detach_fds_compat(msg, scm); return; } if (msg->msg_controllen > sizeof(struct cmsghdr)) fdmax = ((msg->msg_controllen - sizeof(struct cmsghdr)) / sizeof(int)); if (fdnum < fdmax) fdmax = fdnum; for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax; i++, cmfptr++) { struct socket *sock; int new_fd; err = security_file_receive(fp[i]); if (err) break; err = get_unused_fd_flags(MSG_CMSG_CLOEXEC & msg->msg_flags ? O_CLOEXEC : 0); if (err < 0) break; new_fd = err; err = put_user(new_fd, cmfptr); if (err) { put_unused_fd(new_fd); break; } /* Bump the usage count and install the file. */ sock = sock_from_file(fp[i], &err); if (sock) { sock_update_netprioidx(sock->sk, current); sock_update_classid(sock->sk, current); } fd_install(new_fd, get_file(fp[i])); } if (i > 0) { int cmlen = CMSG_LEN(i*sizeof(int)); err = put_user(SOL_SOCKET, &cm->cmsg_level); if (!err) err = put_user(SCM_RIGHTS, &cm->cmsg_type); if (!err) err = put_user(cmlen, &cm->cmsg_len); if (!err) { cmlen = CMSG_SPACE(i*sizeof(int)); msg->msg_control += cmlen; msg->msg_controllen -= cmlen; } } if (i < fdnum || (fdnum && fdmax <= 0)) msg->msg_flags |= MSG_CTRUNC; /* * All of the files that fit in the message have had their * usage counts incremented, so we just free the list. */ __scm_destroy(scm); } EXPORT_SYMBOL(scm_detach_fds); struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) { struct scm_fp_list *new_fpl; int i; if (!fpl) return NULL; new_fpl = kmemdup(fpl, offsetof(struct scm_fp_list, fp[fpl->count]), GFP_KERNEL); if (new_fpl) { for (i = 0; i < fpl->count; i++) get_file(fpl->fp[i]); new_fpl->max = new_fpl->count; } return new_fpl; } EXPORT_SYMBOL(scm_fp_dup);
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5617_0
crossvul-cpp_data_bad_3564_0
/* * linux/kernel/exit.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/capability.h> #include <linux/completion.h> #include <linux/personality.h> #include <linux/tty.h> #include <linux/mnt_namespace.h> #include <linux/iocontext.h> #include <linux/key.h> #include <linux/security.h> #include <linux/cpu.h> #include <linux/acct.h> #include <linux/tsacct_kern.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/binfmts.h> #include <linux/nsproxy.h> #include <linux/pid_namespace.h> #include <linux/ptrace.h> #include <linux/profile.h> #include <linux/mount.h> #include <linux/proc_fs.h> #include <linux/kthread.h> #include <linux/mempolicy.h> #include <linux/taskstats_kern.h> #include <linux/delayacct.h> #include <linux/freezer.h> #include <linux/cgroup.h> #include <linux/syscalls.h> #include <linux/signal.h> #include <linux/posix-timers.h> #include <linux/cn_proc.h> #include <linux/mutex.h> #include <linux/futex.h> #include <linux/compat.h> #include <linux/pipe_fs_i.h> #include <linux/audit.h> /* for audit_free() */ #include <linux/resource.h> #include <linux/blkdev.h> #include <linux/task_io_accounting_ops.h> #include <linux/tracehook.h> #include <trace/sched.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> static void exit_mm(struct task_struct * tsk); static inline int task_detached(struct task_struct *p) { return p->exit_signal == -1; } static void __unhash_process(struct task_struct *p) { nr_threads--; detach_pid(p, PIDTYPE_PID); if (thread_group_leader(p)) { detach_pid(p, PIDTYPE_PGID); detach_pid(p, PIDTYPE_SID); list_del_rcu(&p->tasks); __get_cpu_var(process_counts)--; } list_del_rcu(&p->thread_group); list_del_init(&p->sibling); } /* * This function expects the tasklist_lock write-locked. */ static void __exit_signal(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; struct sighand_struct *sighand; BUG_ON(!sig); BUG_ON(!atomic_read(&sig->count)); sighand = rcu_dereference(tsk->sighand); spin_lock(&sighand->siglock); posix_cpu_timers_exit(tsk); if (atomic_dec_and_test(&sig->count)) posix_cpu_timers_exit_group(tsk); else { /* * If there is any task waiting for the group exit * then notify it: */ if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) wake_up_process(sig->group_exit_task); if (tsk == sig->curr_target) sig->curr_target = next_thread(tsk); /* * Accumulate here the counters for all threads but the * group leader as they die, so they can be added into * the process-wide totals when those are taken. * The group leader stays around as a zombie as long * as there are other threads. When it gets reaped, * the exit.c code will add its counts into these totals. * We won't ever get here for the group leader, since it * will have been the last reference on the signal_struct. */ sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); sig->min_flt += tsk->min_flt; sig->maj_flt += tsk->maj_flt; sig->nvcsw += tsk->nvcsw; sig->nivcsw += tsk->nivcsw; sig->inblock += task_io_get_inblock(tsk); sig->oublock += task_io_get_oublock(tsk); task_io_accounting_add(&sig->ioac, &tsk->ioac); sig = NULL; /* Marker for below. */ } __unhash_process(tsk); /* * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ flush_sigqueue(&tsk->pending); tsk->signal = NULL; tsk->sighand = NULL; spin_unlock(&sighand->siglock); __cleanup_sighand(sighand); clear_tsk_thread_flag(tsk,TIF_SIGPENDING); if (sig) { flush_sigqueue(&sig->shared_pending); taskstats_tgid_free(sig); /* * Make sure ->signal can't go away under rq->lock, * see account_group_exec_runtime(). */ task_rq_unlock_wait(tsk); __cleanup_signal(sig); } } static void delayed_put_task_struct(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); trace_sched_process_free(tsk); put_task_struct(tsk); } void release_task(struct task_struct * p) { struct task_struct *leader; int zap_leader; repeat: tracehook_prepare_release_task(p); atomic_dec(&p->user->processes); proc_flush_task(p); write_lock_irq(&tasklist_lock); tracehook_finish_release_task(p); __exit_signal(p); /* * If we are the last non-leader member of the thread * group, and the leader is zombie, then notify the * group leader's parent process. (if it wants notification.) */ zap_leader = 0; leader = p->group_leader; if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { BUG_ON(task_detached(leader)); do_notify_parent(leader, leader->exit_signal); /* * If we were the last child thread and the leader has * exited already, and the leader's parent ignores SIGCHLD, * then we are the one who should release the leader. * * do_notify_parent() will have marked it self-reaping in * that case. */ zap_leader = task_detached(leader); /* * This maintains the invariant that release_task() * only runs on a task in EXIT_DEAD, just for sanity. */ if (zap_leader) leader->exit_state = EXIT_DEAD; } write_unlock_irq(&tasklist_lock); release_thread(p); call_rcu(&p->rcu, delayed_put_task_struct); p = leader; if (unlikely(zap_leader)) goto repeat; } /* * This checks not only the pgrp, but falls back on the pid if no * satisfactory pgrp is found. I dunno - gdb doesn't work correctly * without this... * * The caller must hold rcu lock or the tasklist lock. */ struct pid *session_of_pgrp(struct pid *pgrp) { struct task_struct *p; struct pid *sid = NULL; p = pid_task(pgrp, PIDTYPE_PGID); if (p == NULL) p = pid_task(pgrp, PIDTYPE_PID); if (p != NULL) sid = task_session(p); return sid; } /* * Determine if a process group is "orphaned", according to the POSIX * definition in 2.2.2.52. Orphaned process groups are not to be affected * by terminal-generated stop signals. Newly orphaned process groups are * to receive a SIGHUP and a SIGCONT. * * "I ask you, have you ever known what it is to be an orphan?" */ static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) { struct task_struct *p; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { if ((p == ignored_task) || (p->exit_state && thread_group_empty(p)) || is_global_init(p->real_parent)) continue; if (task_pgrp(p->real_parent) != pgrp && task_session(p->real_parent) == task_session(p)) return 0; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return 1; } int is_current_pgrp_orphaned(void) { int retval; read_lock(&tasklist_lock); retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); read_unlock(&tasklist_lock); return retval; } static int has_stopped_jobs(struct pid *pgrp) { int retval = 0; struct task_struct *p; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { if (!task_is_stopped(p)) continue; retval = 1; break; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return retval; } /* * Check to see if any process groups have become orphaned as * a result of our exiting, and if they have any stopped jobs, * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ static void kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) { struct pid *pgrp = task_pgrp(tsk); struct task_struct *ignored_task = tsk; if (!parent) /* exit: our father is in a different pgrp than * we are and we were the only connection outside. */ parent = tsk->real_parent; else /* reparent: our child is in a different pgrp than * we are, and it was the only connection outside. */ ignored_task = NULL; if (task_pgrp(parent) != pgrp && task_session(parent) == task_session(tsk) && will_become_orphaned_pgrp(pgrp, ignored_task) && has_stopped_jobs(pgrp)) { __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); } } /** * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd * * If a kernel thread is launched as a result of a system call, or if * it ever exits, it should generally reparent itself to kthreadd so it * isn't in the way of other processes and is correctly cleaned up on exit. * * The various task state such as scheduling policy and priority may have * been inherited from a user process, so we reset them to sane values here. * * NOTE that reparent_to_kthreadd() gives the caller full capabilities. */ static void reparent_to_kthreadd(void) { write_lock_irq(&tasklist_lock); ptrace_unlink(current); /* Reparent to init */ current->real_parent = current->parent = kthreadd_task; list_move_tail(&current->sibling, &current->real_parent->children); /* Set the exit signal to SIGCHLD so we signal init on exit */ current->exit_signal = SIGCHLD; if (task_nice(current) < 0) set_user_nice(current, 0); /* cpus_allowed? */ /* rt_priority? */ /* signals? */ security_task_reparent_to_init(current); memcpy(current->signal->rlim, init_task.signal->rlim, sizeof(current->signal->rlim)); atomic_inc(&(INIT_USER->__count)); write_unlock_irq(&tasklist_lock); switch_uid(INIT_USER); } void __set_special_pids(struct pid *pid) { struct task_struct *curr = current->group_leader; pid_t nr = pid_nr(pid); if (task_session(curr) != pid) { change_pid(curr, PIDTYPE_SID, pid); set_task_session(curr, nr); } if (task_pgrp(curr) != pid) { change_pid(curr, PIDTYPE_PGID, pid); set_task_pgrp(curr, nr); } } static void set_special_pids(struct pid *pid) { write_lock_irq(&tasklist_lock); __set_special_pids(pid); write_unlock_irq(&tasklist_lock); } /* * Let kernel threads use this to say that they * allow a certain signal (since daemonize() will * have disabled all of them by default). */ int allow_signal(int sig) { if (!valid_signal(sig) || sig < 1) return -EINVAL; spin_lock_irq(&current->sighand->siglock); sigdelset(&current->blocked, sig); if (!current->mm) { /* Kernel threads handle their own signals. Let the signal code know it'll be handled, so that they don't get converted to SIGKILL or just silently dropped */ current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; } recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return 0; } EXPORT_SYMBOL(allow_signal); int disallow_signal(int sig) { if (!valid_signal(sig) || sig < 1) return -EINVAL; spin_lock_irq(&current->sighand->siglock); current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return 0; } EXPORT_SYMBOL(disallow_signal); /* * Put all the gunge required to become a kernel thread without * attached user resources in one place where it belongs. */ void daemonize(const char *name, ...) { va_list args; struct fs_struct *fs; sigset_t blocked; va_start(args, name); vsnprintf(current->comm, sizeof(current->comm), name, args); va_end(args); /* * If we were started as result of loading a module, close all of the * user space pages. We don't need them, and if we didn't close them * they would be locked into memory. */ exit_mm(current); /* * We don't want to have TIF_FREEZE set if the system-wide hibernation * or suspend transition begins right now. */ current->flags |= (PF_NOFREEZE | PF_KTHREAD); if (current->nsproxy != &init_nsproxy) { get_nsproxy(&init_nsproxy); switch_task_namespaces(current, &init_nsproxy); } set_special_pids(&init_struct_pid); proc_clear_tty(current); /* Block and flush all signals */ sigfillset(&blocked); sigprocmask(SIG_BLOCK, &blocked, NULL); flush_signals(current); /* Become as one with the init task */ exit_fs(current); /* current->fs->count--; */ fs = init_task.fs; current->fs = fs; atomic_inc(&fs->count); exit_files(current); current->files = init_task.files; atomic_inc(&current->files->count); reparent_to_kthreadd(); } EXPORT_SYMBOL(daemonize); static void close_files(struct files_struct * files) { int i, j; struct fdtable *fdt; j = 0; /* * It is safe to dereference the fd table without RCU or * ->file_lock because this is the last reference to the * files structure. */ fdt = files_fdtable(files); for (;;) { unsigned long set; i = j * __NFDBITS; if (i >= fdt->max_fds) break; set = fdt->open_fds->fds_bits[j++]; while (set) { if (set & 1) { struct file * file = xchg(&fdt->fd[i], NULL); if (file) { filp_close(file, files); cond_resched(); } } i++; set >>= 1; } } } struct files_struct *get_files_struct(struct task_struct *task) { struct files_struct *files; task_lock(task); files = task->files; if (files) atomic_inc(&files->count); task_unlock(task); return files; } void put_files_struct(struct files_struct *files) { struct fdtable *fdt; if (atomic_dec_and_test(&files->count)) { close_files(files); /* * Free the fd and fdset arrays if we expanded them. * If the fdtable was embedded, pass files for freeing * at the end of the RCU grace period. Otherwise, * you can free files immediately. */ fdt = files_fdtable(files); if (fdt != &files->fdtab) kmem_cache_free(files_cachep, files); free_fdtable(fdt); } } void reset_files_struct(struct files_struct *files) { struct task_struct *tsk = current; struct files_struct *old; old = tsk->files; task_lock(tsk); tsk->files = files; task_unlock(tsk); put_files_struct(old); } void exit_files(struct task_struct *tsk) { struct files_struct * files = tsk->files; if (files) { task_lock(tsk); tsk->files = NULL; task_unlock(tsk); put_files_struct(files); } } void put_fs_struct(struct fs_struct *fs) { /* No need to hold fs->lock if we are killing it */ if (atomic_dec_and_test(&fs->count)) { path_put(&fs->root); path_put(&fs->pwd); kmem_cache_free(fs_cachep, fs); } } void exit_fs(struct task_struct *tsk) { struct fs_struct * fs = tsk->fs; if (fs) { task_lock(tsk); tsk->fs = NULL; task_unlock(tsk); put_fs_struct(fs); } } EXPORT_SYMBOL_GPL(exit_fs); #ifdef CONFIG_MM_OWNER /* * Task p is exiting and it owned mm, lets find a new owner for it */ static inline int mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) { /* * If there are other users of the mm and the owner (us) is exiting * we need to find a new owner to take on the responsibility. */ if (atomic_read(&mm->mm_users) <= 1) return 0; if (mm->owner != p) return 0; return 1; } void mm_update_next_owner(struct mm_struct *mm) { struct task_struct *c, *g, *p = current; retry: if (!mm_need_new_owner(mm, p)) return; read_lock(&tasklist_lock); /* * Search in the children */ list_for_each_entry(c, &p->children, sibling) { if (c->mm == mm) goto assign_new_owner; } /* * Search in the siblings */ list_for_each_entry(c, &p->parent->children, sibling) { if (c->mm == mm) goto assign_new_owner; } /* * Search through everything else. We should not get * here often */ do_each_thread(g, c) { if (c->mm == mm) goto assign_new_owner; } while_each_thread(g, c); read_unlock(&tasklist_lock); /* * We found no owner yet mm_users > 1: this implies that we are * most likely racing with swapoff (try_to_unuse()) or /proc or * ptrace or page migration (get_task_mm()). Mark owner as NULL, * so that subsystems can understand the callback and take action. */ down_write(&mm->mmap_sem); cgroup_mm_owner_callbacks(mm->owner, NULL); mm->owner = NULL; up_write(&mm->mmap_sem); return; assign_new_owner: BUG_ON(c == p); get_task_struct(c); read_unlock(&tasklist_lock); down_write(&mm->mmap_sem); /* * The task_lock protects c->mm from changing. * We always want mm->owner->mm == mm */ task_lock(c); if (c->mm != mm) { task_unlock(c); up_write(&mm->mmap_sem); put_task_struct(c); goto retry; } cgroup_mm_owner_callbacks(mm->owner, c); mm->owner = c; task_unlock(c); up_write(&mm->mmap_sem); put_task_struct(c); } #endif /* CONFIG_MM_OWNER */ /* * Turn us into a lazy TLB process if we * aren't already.. */ static void exit_mm(struct task_struct * tsk) { struct mm_struct *mm = tsk->mm; struct core_state *core_state; mm_release(tsk, mm); if (!mm) return; /* * Serialize with any possible pending coredump. * We must hold mmap_sem around checking core_state * and clearing tsk->mm. The core-inducing thread * will increment ->nr_threads for each thread in the * group with ->mm != NULL. */ down_read(&mm->mmap_sem); core_state = mm->core_state; if (core_state) { struct core_thread self; up_read(&mm->mmap_sem); self.task = tsk; self.next = xchg(&core_state->dumper.next, &self); /* * Implies mb(), the result of xchg() must be visible * to core_state->dumper. */ if (atomic_dec_and_test(&core_state->nr_threads)) complete(&core_state->startup); for (;;) { set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (!self.task) /* see coredump_finish() */ break; schedule(); } __set_task_state(tsk, TASK_RUNNING); down_read(&mm->mmap_sem); } atomic_inc(&mm->mm_count); BUG_ON(mm != tsk->active_mm); /* more a memory barrier than a real lock */ task_lock(tsk); tsk->mm = NULL; up_read(&mm->mmap_sem); enter_lazy_tlb(mm, current); /* We don't want this task to be frozen prematurely */ clear_freeze_flag(tsk); task_unlock(tsk); mm_update_next_owner(mm); mmput(mm); } /* * Return nonzero if @parent's children should reap themselves. * * Called with write_lock_irq(&tasklist_lock) held. */ static int ignoring_children(struct task_struct *parent) { int ret; struct sighand_struct *psig = parent->sighand; unsigned long flags; spin_lock_irqsave(&psig->siglock, flags); ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT)); spin_unlock_irqrestore(&psig->siglock, flags); return ret; } /* * Detach all tasks we were using ptrace on. * Any that need to be release_task'd are put on the @dead list. * * Called with write_lock(&tasklist_lock) held. */ static void ptrace_exit(struct task_struct *parent, struct list_head *dead) { struct task_struct *p, *n; int ign = -1; list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) { __ptrace_unlink(p); if (p->exit_state != EXIT_ZOMBIE) continue; /* * If it's a zombie, our attachedness prevented normal * parent notification or self-reaping. Do notification * now if it would have happened earlier. If it should * reap itself, add it to the @dead list. We can't call * release_task() here because we already hold tasklist_lock. * * If it's our own child, there is no notification to do. * But if our normal children self-reap, then this child * was prevented by ptrace and we must reap it now. */ if (!task_detached(p) && thread_group_empty(p)) { if (!same_thread_group(p->real_parent, parent)) do_notify_parent(p, p->exit_signal); else { if (ign < 0) ign = ignoring_children(parent); if (ign) p->exit_signal = -1; } } if (task_detached(p)) { /* * Mark it as in the process of being reaped. */ p->exit_state = EXIT_DEAD; list_add(&p->ptrace_entry, dead); } } } /* * Finish up exit-time ptrace cleanup. * * Called without locks. */ static void ptrace_exit_finish(struct task_struct *parent, struct list_head *dead) { struct task_struct *p, *n; BUG_ON(!list_empty(&parent->ptraced)); list_for_each_entry_safe(p, n, dead, ptrace_entry) { list_del_init(&p->ptrace_entry); release_task(p); } } static void reparent_thread(struct task_struct *p, struct task_struct *father) { if (p->pdeath_signal) /* We already hold the tasklist_lock here. */ group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); list_move_tail(&p->sibling, &p->real_parent->children); /* If this is a threaded reparent there is no need to * notify anyone anything has happened. */ if (same_thread_group(p->real_parent, father)) return; /* We don't want people slaying init. */ if (!task_detached(p)) p->exit_signal = SIGCHLD; /* If we'd notified the old parent about this child's death, * also notify the new parent. */ if (!ptrace_reparented(p) && p->exit_state == EXIT_ZOMBIE && !task_detached(p) && thread_group_empty(p)) do_notify_parent(p, p->exit_signal); kill_orphaned_pgrp(p, father); } /* * When we die, we re-parent all our children. * Try to give them to another thread in our thread * group, and if no such member exists, give it to * the child reaper process (ie "init") in our pid * space. */ static struct task_struct *find_new_reaper(struct task_struct *father) { struct pid_namespace *pid_ns = task_active_pid_ns(father); struct task_struct *thread; thread = father; while_each_thread(father, thread) { if (thread->flags & PF_EXITING) continue; if (unlikely(pid_ns->child_reaper == father)) pid_ns->child_reaper = thread; return thread; } if (unlikely(pid_ns->child_reaper == father)) { write_unlock_irq(&tasklist_lock); if (unlikely(pid_ns == &init_pid_ns)) panic("Attempted to kill init!"); zap_pid_ns_processes(pid_ns); write_lock_irq(&tasklist_lock); /* * We can not clear ->child_reaper or leave it alone. * There may by stealth EXIT_DEAD tasks on ->children, * forget_original_parent() must move them somewhere. */ pid_ns->child_reaper = init_pid_ns.child_reaper; } return pid_ns->child_reaper; } static void forget_original_parent(struct task_struct *father) { struct task_struct *p, *n, *reaper; LIST_HEAD(ptrace_dead); write_lock_irq(&tasklist_lock); reaper = find_new_reaper(father); /* * First clean up ptrace if we were using it. */ ptrace_exit(father, &ptrace_dead); list_for_each_entry_safe(p, n, &father->children, sibling) { p->real_parent = reaper; if (p->parent == father) { BUG_ON(p->ptrace); p->parent = p->real_parent; } reparent_thread(p, father); } write_unlock_irq(&tasklist_lock); BUG_ON(!list_empty(&father->children)); ptrace_exit_finish(father, &ptrace_dead); } /* * Send signals to all our closest relatives so that they know * to properly mourn us.. */ static void exit_notify(struct task_struct *tsk, int group_dead) { int signal; void *cookie; /* * This does two things: * * A. Make init inherit all the child processes * B. Check to see if any process groups have become orphaned * as a result of our exiting, and if they have any stopped * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ forget_original_parent(tsk); exit_task_namespaces(tsk); write_lock_irq(&tasklist_lock); if (group_dead) kill_orphaned_pgrp(tsk->group_leader, NULL); /* Let father know we died * * Thread signals are configurable, but you aren't going to use * that to send signals to arbitary processes. * That stops right now. * * If the parent exec id doesn't match the exec id we saved * when we started then we know the parent has changed security * domain. * * If our self_exec id doesn't match our parent_exec_id then * we have changed execution domain as these two values started * the same after a fork. */ if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && (tsk->parent_exec_id != tsk->real_parent->self_exec_id || tsk->self_exec_id != tsk->parent_exec_id) && !capable(CAP_KILL)) tsk->exit_signal = SIGCHLD; signal = tracehook_notify_death(tsk, &cookie, group_dead); if (signal >= 0) signal = do_notify_parent(tsk, signal); tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; /* mt-exec, de_thread() is waiting for us */ if (thread_group_leader(tsk) && tsk->signal->group_exit_task && tsk->signal->notify_count < 0) wake_up_process(tsk->signal->group_exit_task); write_unlock_irq(&tasklist_lock); tracehook_report_death(tsk, signal, cookie, group_dead); /* If the process is dead, release it - nobody will wait for it */ if (signal == DEATH_REAP) release_task(tsk); } #ifdef CONFIG_DEBUG_STACK_USAGE static void check_stack_usage(void) { static DEFINE_SPINLOCK(low_water_lock); static int lowest_to_date = THREAD_SIZE; unsigned long *n = end_of_stack(current); unsigned long free; while (*n == 0) n++; free = (unsigned long)n - (unsigned long)end_of_stack(current); if (free >= lowest_to_date) return; spin_lock(&low_water_lock); if (free < lowest_to_date) { printk(KERN_WARNING "%s used greatest stack depth: %lu bytes " "left\n", current->comm, free); lowest_to_date = free; } spin_unlock(&low_water_lock); } #else static inline void check_stack_usage(void) {} #endif NORET_TYPE void do_exit(long code) { struct task_struct *tsk = current; int group_dead; profile_task_exit(tsk); WARN_ON(atomic_read(&tsk->fs_excl)); if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); tracehook_report_exit(&code); /* * We're taking recursive faults here in do_exit. Safest is to just * leave this task alone and wait for reboot. */ if (unlikely(tsk->flags & PF_EXITING)) { printk(KERN_ALERT "Fixing recursive fault but reboot is needed!\n"); /* * We can do this unlocked here. The futex code uses * this flag just to verify whether the pi state * cleanup has been done or not. In the worst case it * loops once more. We pretend that the cleanup was * done as there is no way to return. Either the * OWNER_DIED bit is set by now or we push the blocked * task into the wait for ever nirwana as well. */ tsk->flags |= PF_EXITPIDONE; if (tsk->io_context) exit_io_context(); set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } exit_signals(tsk); /* sets PF_EXITING */ /* * tsk->flags are checked in the futex code to protect against * an exiting task cleaning up the robust pi futexes. */ smp_mb(); spin_unlock_wait(&tsk->pi_lock); if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", current->comm, task_pid_nr(current), preempt_count()); acct_update_integrals(tsk); if (tsk->mm) { update_hiwater_rss(tsk->mm); update_hiwater_vm(tsk->mm); } group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { hrtimer_cancel(&tsk->signal->real_timer); exit_itimers(tsk->signal); } acct_collect(code, group_dead); #ifdef CONFIG_FUTEX if (unlikely(tsk->robust_list)) exit_robust_list(tsk); #ifdef CONFIG_COMPAT if (unlikely(tsk->compat_robust_list)) compat_exit_robust_list(tsk); #endif #endif if (group_dead) tty_audit_exit(); if (unlikely(tsk->audit_context)) audit_free(tsk); tsk->exit_code = code; taskstats_exit(tsk, group_dead); exit_mm(tsk); if (group_dead) acct_process(); trace_sched_process_exit(tsk); exit_sem(tsk); exit_files(tsk); exit_fs(tsk); check_stack_usage(); exit_thread(); cgroup_exit(tsk, 1); exit_keys(tsk); if (group_dead && tsk->signal->leader) disassociate_ctty(1); module_put(task_thread_info(tsk)->exec_domain->module); if (tsk->binfmt) module_put(tsk->binfmt->module); proc_exit_connector(tsk); exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA mpol_put(tsk->mempolicy); tsk->mempolicy = NULL; #endif #ifdef CONFIG_FUTEX /* * This must happen late, after the PID is not * hashed anymore: */ if (unlikely(!list_empty(&tsk->pi_state_list))) exit_pi_state_list(tsk); if (unlikely(current->pi_state_cache)) kfree(current->pi_state_cache); #endif /* * Make sure we are holding no locks: */ debug_check_no_locks_held(tsk); /* * We can do this unlocked here. The futex code uses this flag * just to verify whether the pi state cleanup has been done * or not. In the worst case it loops once more. */ tsk->flags |= PF_EXITPIDONE; if (tsk->io_context) exit_io_context(); if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); preempt_disable(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; schedule(); BUG(); /* Avoid "noreturn function does return". */ for (;;) cpu_relax(); /* For when BUG is null */ } EXPORT_SYMBOL_GPL(do_exit); NORET_TYPE void complete_and_exit(struct completion *comp, long code) { if (comp) complete(comp); do_exit(code); } EXPORT_SYMBOL(complete_and_exit); asmlinkage long sys_exit(int error_code) { do_exit((error_code&0xff)<<8); } /* * Take down every thread in the group. This is called by fatal signals * as well as by sys_exit_group (below). */ NORET_TYPE void do_group_exit(int exit_code) { struct signal_struct *sig = current->signal; BUG_ON(exit_code & 0x80); /* core dumps don't get here */ if (signal_group_exit(sig)) exit_code = sig->group_exit_code; else if (!thread_group_empty(current)) { struct sighand_struct *const sighand = current->sighand; spin_lock_irq(&sighand->siglock); if (signal_group_exit(sig)) /* Another thread got here before we took the lock. */ exit_code = sig->group_exit_code; else { sig->group_exit_code = exit_code; sig->flags = SIGNAL_GROUP_EXIT; zap_other_threads(current); } spin_unlock_irq(&sighand->siglock); } do_exit(exit_code); /* NOTREACHED */ } /* * this kills every thread in the thread group. Note that any externally * wait4()-ing process will get the correct exit code - even if this * thread is not the thread group leader. */ asmlinkage void sys_exit_group(int error_code) { do_group_exit((error_code & 0xff) << 8); } static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) { struct pid *pid = NULL; if (type == PIDTYPE_PID) pid = task->pids[type].pid; else if (type < PIDTYPE_MAX) pid = task->group_leader->pids[type].pid; return pid; } static int eligible_child(enum pid_type type, struct pid *pid, int options, struct task_struct *p) { int err; if (type < PIDTYPE_MAX) { if (task_pid_type(p, type) != pid) return 0; } /* Wait for all children (clone and not) if __WALL is set; * otherwise, wait for clone children *only* if __WCLONE is * set; otherwise, wait for non-clone children *only*. (Note: * A "clone" child here is one that reports to its parent * using a signal other than SIGCHLD.) */ if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) && !(options & __WALL)) return 0; err = security_task_wait(p); if (err) return err; return 1; } static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, int why, int status, struct siginfo __user *infop, struct rusage __user *rusagep) { int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; put_task_struct(p); if (!retval) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval) retval = put_user(0, &infop->si_errno); if (!retval) retval = put_user((short)why, &infop->si_code); if (!retval) retval = put_user(pid, &infop->si_pid); if (!retval) retval = put_user(uid, &infop->si_uid); if (!retval) retval = put_user(status, &infop->si_status); if (!retval) retval = pid; return retval; } /* * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_zombie(struct task_struct *p, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { unsigned long state; int retval, status, traced; pid_t pid = task_pid_vnr(p); if (!likely(options & WEXITED)) return 0; if (unlikely(options & WNOWAIT)) { uid_t uid = p->uid; int exit_code = p->exit_code; int why, status; get_task_struct(p); read_unlock(&tasklist_lock); if ((exit_code & 0x7f) == 0) { why = CLD_EXITED; status = exit_code >> 8; } else { why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; status = exit_code & 0x7f; } return wait_noreap_copyout(p, pid, uid, why, status, infop, ru); } /* * Try to move the task's state to DEAD * only one thread is allowed to do this: */ state = xchg(&p->exit_state, EXIT_DEAD); if (state != EXIT_ZOMBIE) { BUG_ON(state != EXIT_DEAD); return 0; } traced = ptrace_reparented(p); if (likely(!traced)) { struct signal_struct *psig; struct signal_struct *sig; struct task_cputime cputime; /* * The resource counters for the group leader are in its * own task_struct. Those for dead threads in the group * are in its signal_struct, as are those for the child * processes it has previously reaped. All these * accumulate in the parent's signal_struct c* fields. * * We don't bother to take a lock here to protect these * p->signal fields, because they are only touched by * __exit_signal, which runs with tasklist_lock * write-locked anyway, and so is excluded here. We do * need to protect the access to p->parent->signal fields, * as other threads in the parent group can be right * here reaping other children at the same time. * * We use thread_group_cputime() to get times for the thread * group, which consolidates times for all threads in the * group including the group leader. */ spin_lock_irq(&p->parent->sighand->siglock); psig = p->parent->signal; sig = p->signal; thread_group_cputime(p, &cputime); psig->cutime = cputime_add(psig->cutime, cputime_add(cputime.utime, sig->cutime)); psig->cstime = cputime_add(psig->cstime, cputime_add(cputime.stime, sig->cstime)); psig->cgtime = cputime_add(psig->cgtime, cputime_add(p->gtime, cputime_add(sig->gtime, sig->cgtime))); psig->cmin_flt += p->min_flt + sig->min_flt + sig->cmin_flt; psig->cmaj_flt += p->maj_flt + sig->maj_flt + sig->cmaj_flt; psig->cnvcsw += p->nvcsw + sig->nvcsw + sig->cnvcsw; psig->cnivcsw += p->nivcsw + sig->nivcsw + sig->cnivcsw; psig->cinblock += task_io_get_inblock(p) + sig->inblock + sig->cinblock; psig->coublock += task_io_get_oublock(p) + sig->oublock + sig->coublock; task_io_accounting_add(&psig->ioac, &p->ioac); task_io_accounting_add(&psig->ioac, &sig->ioac); spin_unlock_irq(&p->parent->sighand->siglock); } /* * Now we are sure this task is interesting, and no other * thread can reap it because we set its state to EXIT_DEAD. */ read_unlock(&tasklist_lock); retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; status = (p->signal->flags & SIGNAL_GROUP_EXIT) ? p->signal->group_exit_code : p->exit_code; if (!retval && stat_addr) retval = put_user(status, stat_addr); if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) { int why; if ((status & 0x7f) == 0) { why = CLD_EXITED; status >>= 8; } else { why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; status &= 0x7f; } retval = put_user((short)why, &infop->si_code); if (!retval) retval = put_user(status, &infop->si_status); } if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) retval = put_user(p->uid, &infop->si_uid); if (!retval) retval = pid; if (traced) { write_lock_irq(&tasklist_lock); /* We dropped tasklist, ptracer could die and untrace */ ptrace_unlink(p); /* * If this is not a detached task, notify the parent. * If it's still not detached after that, don't release * it now. */ if (!task_detached(p)) { do_notify_parent(p, p->exit_signal); if (!task_detached(p)) { p->exit_state = EXIT_ZOMBIE; p = NULL; } } write_unlock_irq(&tasklist_lock); } if (p != NULL) release_task(p); return retval; } /* * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_stopped(int ptrace, struct task_struct *p, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { int retval, exit_code, why; uid_t uid = 0; /* unneeded, required by compiler */ pid_t pid; if (!(options & WUNTRACED)) return 0; exit_code = 0; spin_lock_irq(&p->sighand->siglock); if (unlikely(!task_is_stopped_or_traced(p))) goto unlock_sig; if (!ptrace && p->signal->group_stop_count > 0) /* * A group stop is in progress and this is the group leader. * We won't report until all threads have stopped. */ goto unlock_sig; exit_code = p->exit_code; if (!exit_code) goto unlock_sig; if (!unlikely(options & WNOWAIT)) p->exit_code = 0; uid = p->uid; unlock_sig: spin_unlock_irq(&p->sighand->siglock); if (!exit_code) return 0; /* * Now we are pretty sure this task is interesting. * Make sure it doesn't get reaped out from under us while we * give up the lock and then examine it below. We don't want to * keep holding onto the tasklist_lock while we call getrusage and * possibly take page faults for user memory. */ get_task_struct(p); pid = task_pid_vnr(p); why = ptrace ? CLD_TRAPPED : CLD_STOPPED; read_unlock(&tasklist_lock); if (unlikely(options & WNOWAIT)) return wait_noreap_copyout(p, pid, uid, why, exit_code, infop, ru); retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; if (!retval && stat_addr) retval = put_user((exit_code << 8) | 0x7f, stat_addr); if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) retval = put_user((short)why, &infop->si_code); if (!retval && infop) retval = put_user(exit_code, &infop->si_status); if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) retval = put_user(uid, &infop->si_uid); if (!retval) retval = pid; put_task_struct(p); BUG_ON(!retval); return retval; } /* * Handle do_wait work for one task in a live, non-stopped state. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_continued(struct task_struct *p, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { int retval; pid_t pid; uid_t uid; if (!unlikely(options & WCONTINUED)) return 0; if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) return 0; spin_lock_irq(&p->sighand->siglock); /* Re-check with the lock held. */ if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { spin_unlock_irq(&p->sighand->siglock); return 0; } if (!unlikely(options & WNOWAIT)) p->signal->flags &= ~SIGNAL_STOP_CONTINUED; spin_unlock_irq(&p->sighand->siglock); pid = task_pid_vnr(p); uid = p->uid; get_task_struct(p); read_unlock(&tasklist_lock); if (!infop) { retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; put_task_struct(p); if (!retval && stat_addr) retval = put_user(0xffff, stat_addr); if (!retval) retval = pid; } else { retval = wait_noreap_copyout(p, pid, uid, CLD_CONTINUED, SIGCONT, infop, ru); BUG_ON(retval == 0); } return retval; } /* * Consider @p for a wait by @parent. * * -ECHILD should be in *@notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; * then *@notask_error is 0 if @p is an eligible child, * or another error from security_task_wait(), or still -ECHILD. */ static int wait_consider_task(struct task_struct *parent, int ptrace, struct task_struct *p, int *notask_error, enum pid_type type, struct pid *pid, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { int ret = eligible_child(type, pid, options, p); if (!ret) return ret; if (unlikely(ret < 0)) { /* * If we have not yet seen any eligible child, * then let this error code replace -ECHILD. * A permission error will give the user a clue * to look for security policy problems, rather * than for mysterious wait bugs. */ if (*notask_error) *notask_error = ret; } if (likely(!ptrace) && unlikely(p->ptrace)) { /* * This child is hidden by ptrace. * We aren't allowed to see it now, but eventually we will. */ *notask_error = 0; return 0; } if (p->exit_state == EXIT_DEAD) return 0; /* * We don't reap group leaders with subthreads. */ if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) return wait_task_zombie(p, options, infop, stat_addr, ru); /* * It's stopped or running now, so it might * later continue, exit, or stop again. */ *notask_error = 0; if (task_is_stopped_or_traced(p)) return wait_task_stopped(ptrace, p, options, infop, stat_addr, ru); return wait_task_continued(p, options, infop, stat_addr, ru); } /* * Do the work of do_wait() for one thread in the group, @tsk. * * -ECHILD should be in *@notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; then * *@notask_error is 0 if there were any eligible children, * or another error from security_task_wait(), or still -ECHILD. */ static int do_wait_thread(struct task_struct *tsk, int *notask_error, enum pid_type type, struct pid *pid, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { struct task_struct *p; list_for_each_entry(p, &tsk->children, sibling) { /* * Do not consider detached threads. */ if (!task_detached(p)) { int ret = wait_consider_task(tsk, 0, p, notask_error, type, pid, options, infop, stat_addr, ru); if (ret) return ret; } } return 0; } static int ptrace_do_wait(struct task_struct *tsk, int *notask_error, enum pid_type type, struct pid *pid, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { struct task_struct *p; /* * Traditionally we see ptrace'd stopped tasks regardless of options. */ options |= WUNTRACED; list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { int ret = wait_consider_task(tsk, 1, p, notask_error, type, pid, options, infop, stat_addr, ru); if (ret) return ret; } return 0; } static long do_wait(enum pid_type type, struct pid *pid, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { DECLARE_WAITQUEUE(wait, current); struct task_struct *tsk; int retval; trace_sched_process_wait(pid); add_wait_queue(&current->signal->wait_chldexit,&wait); repeat: /* * If there is nothing that can match our critiera just get out. * We will clear @retval to zero if we see any child that might later * match our criteria, even if we are not able to reap it yet. */ retval = -ECHILD; if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type]))) goto end; current->state = TASK_INTERRUPTIBLE; read_lock(&tasklist_lock); tsk = current; do { int tsk_result = do_wait_thread(tsk, &retval, type, pid, options, infop, stat_addr, ru); if (!tsk_result) tsk_result = ptrace_do_wait(tsk, &retval, type, pid, options, infop, stat_addr, ru); if (tsk_result) { /* * tasklist_lock is unlocked and we have a final result. */ retval = tsk_result; goto end; } if (options & __WNOTHREAD) break; tsk = next_thread(tsk); BUG_ON(tsk->signal != current->signal); } while (tsk != current); read_unlock(&tasklist_lock); if (!retval && !(options & WNOHANG)) { retval = -ERESTARTSYS; if (!signal_pending(current)) { schedule(); goto repeat; } } end: current->state = TASK_RUNNING; remove_wait_queue(&current->signal->wait_chldexit,&wait); if (infop) { if (retval > 0) retval = 0; else { /* * For a WNOHANG return, clear out all the fields * we would set so the user can easily tell the * difference. */ if (!retval) retval = put_user(0, &infop->si_signo); if (!retval) retval = put_user(0, &infop->si_errno); if (!retval) retval = put_user(0, &infop->si_code); if (!retval) retval = put_user(0, &infop->si_pid); if (!retval) retval = put_user(0, &infop->si_uid); if (!retval) retval = put_user(0, &infop->si_status); } } return retval; } asmlinkage long sys_waitid(int which, pid_t upid, struct siginfo __user *infop, int options, struct rusage __user *ru) { struct pid *pid = NULL; enum pid_type type; long ret; if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) return -EINVAL; if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) return -EINVAL; switch (which) { case P_ALL: type = PIDTYPE_MAX; break; case P_PID: type = PIDTYPE_PID; if (upid <= 0) return -EINVAL; break; case P_PGID: type = PIDTYPE_PGID; if (upid <= 0) return -EINVAL; break; default: return -EINVAL; } if (type < PIDTYPE_MAX) pid = find_get_pid(upid); ret = do_wait(type, pid, options, infop, NULL, ru); put_pid(pid); /* avoid REGPARM breakage on x86: */ asmlinkage_protect(5, ret, which, upid, infop, options, ru); return ret; } asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr, int options, struct rusage __user *ru) { struct pid *pid = NULL; enum pid_type type; long ret; if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| __WNOTHREAD|__WCLONE|__WALL)) return -EINVAL; if (upid == -1) type = PIDTYPE_MAX; else if (upid < 0) { type = PIDTYPE_PGID; pid = find_get_pid(-upid); } else if (upid == 0) { type = PIDTYPE_PGID; pid = get_pid(task_pgrp(current)); } else /* upid > 0 */ { type = PIDTYPE_PID; pid = find_get_pid(upid); } ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru); put_pid(pid); /* avoid REGPARM breakage on x86: */ asmlinkage_protect(4, ret, upid, stat_addr, options, ru); return ret; } #ifdef __ARCH_WANT_SYS_WAITPID /* * sys_waitpid() remains for compatibility. waitpid() should be * implemented by calling sys_wait4() from libc.a. */ asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options) { return sys_wait4(pid, stat_addr, options, NULL); } #endif
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3564_0
crossvul-cpp_data_bad_3604_3
404: Not Found
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3604_3
crossvul-cpp_data_bad_2287_2
/* * linux/fs/ext2/file.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext2 fs regular file handling primitives * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) */ #include <linux/time.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include "ext2.h" #include "xattr.h" #include "acl.h" /* * Called when filp is released. This happens when all file descriptors * for a single struct file are closed. Note that different open() calls * for the same file yield different struct file structures. */ static int ext2_release_file (struct inode * inode, struct file * filp) { if (filp->f_mode & FMODE_WRITE) { mutex_lock(&EXT2_I(inode)->truncate_mutex); ext2_discard_reservation(inode); mutex_unlock(&EXT2_I(inode)->truncate_mutex); } return 0; } int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int ret; struct super_block *sb = file->f_mapping->host->i_sb; struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; ret = generic_file_fsync(file, start, end, datasync); if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) { /* We don't really know where the IO error happened... */ ext2_error(sb, __func__, "detected IO error when writing metadata buffers"); ret = -EIO; } return ret; } /* * We have mostly NULL's here: the current defaults are ok for * the ext2 filesystem. */ const struct file_operations ext2_file_operations = { .llseek = generic_file_llseek, .read = new_sync_read, .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, #endif .mmap = generic_file_mmap, .open = dquot_file_open, .release = ext2_release_file, .fsync = ext2_fsync, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, }; #ifdef CONFIG_EXT2_FS_XIP const struct file_operations ext2_xip_file_operations = { .llseek = generic_file_llseek, .read = xip_file_read, .write = xip_file_write, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, #endif .mmap = xip_file_mmap, .open = dquot_file_open, .release = ext2_release_file, .fsync = ext2_fsync, }; #endif const struct inode_operations ext2_file_inode_operations = { #ifdef CONFIG_EXT2_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext2_listxattr, .removexattr = generic_removexattr, #endif .setattr = ext2_setattr, .get_acl = ext2_get_acl, .set_acl = ext2_set_acl, .fiemap = ext2_fiemap, };
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2287_2
crossvul-cpp_data_bad_3526_2
/* * Copyright (C) 2003 Sistina Software Limited. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include <linux/device-mapper.h> #include "dm-path-selector.h" #include "dm-uevent.h" #include <linux/ctype.h> #include <linux/init.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/workqueue.h> #include <scsi/scsi_dh.h> #include <linux/atomic.h> #define DM_MSG_PREFIX "multipath" #define DM_PG_INIT_DELAY_MSECS 2000 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1) /* Path properties */ struct pgpath { struct list_head list; struct priority_group *pg; /* Owning PG */ unsigned is_active; /* Path status */ unsigned fail_count; /* Cumulative failure count */ struct dm_path path; struct delayed_work activate_path; }; #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) /* * Paths are grouped into Priority Groups and numbered from 1 upwards. * Each has a path selector which controls which path gets used. */ struct priority_group { struct list_head list; struct multipath *m; /* Owning multipath instance */ struct path_selector ps; unsigned pg_num; /* Reference number */ unsigned bypassed; /* Temporarily bypass this PG? */ unsigned nr_pgpaths; /* Number of paths in PG */ struct list_head pgpaths; }; /* Multipath context */ struct multipath { struct list_head list; struct dm_target *ti; spinlock_t lock; const char *hw_handler_name; char *hw_handler_params; unsigned nr_priority_groups; struct list_head priority_groups; wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ unsigned pg_init_required; /* pg_init needs calling? */ unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ unsigned pg_init_delay_retry; /* Delay pg_init retry? */ unsigned nr_valid_paths; /* Total number of usable paths */ struct pgpath *current_pgpath; struct priority_group *current_pg; struct priority_group *next_pg; /* Switch to this PG if set */ unsigned repeat_count; /* I/Os left before calling PS again */ unsigned queue_io; /* Must we queue all I/O? */ unsigned queue_if_no_path; /* Queue I/O if last path fails? */ unsigned saved_queue_if_no_path;/* Saved state during suspension */ unsigned pg_init_retries; /* Number of times to retry pg_init */ unsigned pg_init_count; /* Number of times pg_init called */ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ struct work_struct process_queued_ios; struct list_head queued_ios; unsigned queue_size; struct work_struct trigger_event; /* * We must use a mempool of dm_mpath_io structs so that we * can resubmit bios on error. */ mempool_t *mpio_pool; struct mutex work_mutex; }; /* * Context information attached to each bio we process. */ struct dm_mpath_io { struct pgpath *pgpath; size_t nr_bytes; }; typedef int (*action_fn) (struct pgpath *pgpath); #define MIN_IOS 256 /* Mempool size */ static struct kmem_cache *_mpio_cache; static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void process_queued_ios(struct work_struct *work); static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); /*----------------------------------------------- * Allocation routines *-----------------------------------------------*/ static struct pgpath *alloc_pgpath(void) { struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); if (pgpath) { pgpath->is_active = 1; INIT_DELAYED_WORK(&pgpath->activate_path, activate_path); } return pgpath; } static void free_pgpath(struct pgpath *pgpath) { kfree(pgpath); } static struct priority_group *alloc_priority_group(void) { struct priority_group *pg; pg = kzalloc(sizeof(*pg), GFP_KERNEL); if (pg) INIT_LIST_HEAD(&pg->pgpaths); return pg; } static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) { struct pgpath *pgpath, *tmp; struct multipath *m = ti->private; list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { list_del(&pgpath->list); if (m->hw_handler_name) scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); dm_put_device(ti, pgpath->path.dev); free_pgpath(pgpath); } } static void free_priority_group(struct priority_group *pg, struct dm_target *ti) { struct path_selector *ps = &pg->ps; if (ps->type) { ps->type->destroy(ps); dm_put_path_selector(ps->type); } free_pgpaths(&pg->pgpaths, ti); kfree(pg); } static struct multipath *alloc_multipath(struct dm_target *ti) { struct multipath *m; m = kzalloc(sizeof(*m), GFP_KERNEL); if (m) { INIT_LIST_HEAD(&m->priority_groups); INIT_LIST_HEAD(&m->queued_ios); spin_lock_init(&m->lock); m->queue_io = 1; m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->trigger_event, trigger_event); init_waitqueue_head(&m->pg_init_wait); mutex_init(&m->work_mutex); m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); if (!m->mpio_pool) { kfree(m); return NULL; } m->ti = ti; ti->private = m; } return m; } static void free_multipath(struct multipath *m) { struct priority_group *pg, *tmp; list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { list_del(&pg->list); free_priority_group(pg, m->ti); } kfree(m->hw_handler_name); kfree(m->hw_handler_params); mempool_destroy(m->mpio_pool); kfree(m); } /*----------------------------------------------- * Path selection *-----------------------------------------------*/ static void __pg_init_all_paths(struct multipath *m) { struct pgpath *pgpath; unsigned long pg_init_delay = 0; m->pg_init_count++; m->pg_init_required = 0; if (m->pg_init_delay_retry) pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { /* Skip failed paths */ if (!pgpath->is_active) continue; if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, pg_init_delay)) m->pg_init_in_progress++; } } static void __switch_pg(struct multipath *m, struct pgpath *pgpath) { m->current_pg = pgpath->pg; /* Must we initialise the PG first, and queue I/O till it's ready? */ if (m->hw_handler_name) { m->pg_init_required = 1; m->queue_io = 1; } else { m->pg_init_required = 0; m->queue_io = 0; } m->pg_init_count = 0; } static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, size_t nr_bytes) { struct dm_path *path; path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes); if (!path) return -ENXIO; m->current_pgpath = path_to_pgpath(path); if (m->current_pg != pg) __switch_pg(m, m->current_pgpath); return 0; } static void __choose_pgpath(struct multipath *m, size_t nr_bytes) { struct priority_group *pg; unsigned bypassed = 1; if (!m->nr_valid_paths) goto failed; /* Were we instructed to switch PG? */ if (m->next_pg) { pg = m->next_pg; m->next_pg = NULL; if (!__choose_path_in_pg(m, pg, nr_bytes)) return; } /* Don't change PG until it has no remaining paths */ if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes)) return; /* * Loop through priority groups until we find a valid path. * First time we skip PGs marked 'bypassed'. * Second time we only try the ones we skipped. */ do { list_for_each_entry(pg, &m->priority_groups, list) { if (pg->bypassed == bypassed) continue; if (!__choose_path_in_pg(m, pg, nr_bytes)) return; } } while (bypassed--); failed: m->current_pgpath = NULL; m->current_pg = NULL; } /* * Check whether bios must be queued in the device-mapper core rather * than here in the target. * * m->lock must be held on entry. * * If m->queue_if_no_path and m->saved_queue_if_no_path hold the * same value then we are not between multipath_presuspend() * and multipath_resume() calls and we have no need to check * for the DMF_NOFLUSH_SUSPENDING flag. */ static int __must_push_back(struct multipath *m) { return (m->queue_if_no_path != m->saved_queue_if_no_path && dm_noflush_suspending(m->ti)); } static int map_io(struct multipath *m, struct request *clone, struct dm_mpath_io *mpio, unsigned was_queued) { int r = DM_MAPIO_REMAPPED; size_t nr_bytes = blk_rq_bytes(clone); unsigned long flags; struct pgpath *pgpath; struct block_device *bdev; spin_lock_irqsave(&m->lock, flags); /* Do we need to select a new pgpath? */ if (!m->current_pgpath || (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) __choose_pgpath(m, nr_bytes); pgpath = m->current_pgpath; if (was_queued) m->queue_size--; if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) { /* Queue for the daemon to resubmit */ list_add_tail(&clone->queuelist, &m->queued_ios); m->queue_size++; if ((m->pg_init_required && !m->pg_init_in_progress) || !m->queue_io) queue_work(kmultipathd, &m->process_queued_ios); pgpath = NULL; r = DM_MAPIO_SUBMITTED; } else if (pgpath) { bdev = pgpath->path.dev->bdev; clone->q = bdev_get_queue(bdev); clone->rq_disk = bdev->bd_disk; } else if (__must_push_back(m)) r = DM_MAPIO_REQUEUE; else r = -EIO; /* Failed */ mpio->pgpath = pgpath; mpio->nr_bytes = nr_bytes; if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io) pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, nr_bytes); spin_unlock_irqrestore(&m->lock, flags); return r; } /* * If we run out of usable paths, should we queue I/O or error it? */ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, unsigned save_old_value) { unsigned long flags; spin_lock_irqsave(&m->lock, flags); if (save_old_value) m->saved_queue_if_no_path = m->queue_if_no_path; else m->saved_queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path; if (!m->queue_if_no_path && m->queue_size) queue_work(kmultipathd, &m->process_queued_ios); spin_unlock_irqrestore(&m->lock, flags); return 0; } /*----------------------------------------------------------------- * The multipath daemon is responsible for resubmitting queued ios. *---------------------------------------------------------------*/ static void dispatch_queued_ios(struct multipath *m) { int r; unsigned long flags; struct dm_mpath_io *mpio; union map_info *info; struct request *clone, *n; LIST_HEAD(cl); spin_lock_irqsave(&m->lock, flags); list_splice_init(&m->queued_ios, &cl); spin_unlock_irqrestore(&m->lock, flags); list_for_each_entry_safe(clone, n, &cl, queuelist) { list_del_init(&clone->queuelist); info = dm_get_rq_mapinfo(clone); mpio = info->ptr; r = map_io(m, clone, mpio, 1); if (r < 0) { mempool_free(mpio, m->mpio_pool); dm_kill_unmapped_request(clone, r); } else if (r == DM_MAPIO_REMAPPED) dm_dispatch_request(clone); else if (r == DM_MAPIO_REQUEUE) { mempool_free(mpio, m->mpio_pool); dm_requeue_unmapped_request(clone); } } } static void process_queued_ios(struct work_struct *work) { struct multipath *m = container_of(work, struct multipath, process_queued_ios); struct pgpath *pgpath = NULL; unsigned must_queue = 1; unsigned long flags; spin_lock_irqsave(&m->lock, flags); if (!m->queue_size) goto out; if (!m->current_pgpath) __choose_pgpath(m, 0); pgpath = m->current_pgpath; if ((pgpath && !m->queue_io) || (!pgpath && !m->queue_if_no_path)) must_queue = 0; if (m->pg_init_required && !m->pg_init_in_progress && pgpath) __pg_init_all_paths(m); out: spin_unlock_irqrestore(&m->lock, flags); if (!must_queue) dispatch_queued_ios(m); } /* * An event is triggered whenever a path is taken out of use. * Includes path failure and PG bypass. */ static void trigger_event(struct work_struct *work) { struct multipath *m = container_of(work, struct multipath, trigger_event); dm_table_event(m->ti->table); } /*----------------------------------------------------------------- * Constructor/argument parsing: * <#multipath feature args> [<arg>]* * <#hw_handler args> [hw_handler [<arg>]*] * <#priority groups> * <initial priority group> * [<selector> <#selector args> [<arg>]* * <#paths> <#per-path selector args> * [<path> [<arg>]* ]+ ]+ *---------------------------------------------------------------*/ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, struct dm_target *ti) { int r; struct path_selector_type *pst; unsigned ps_argc; static struct dm_arg _args[] = { {0, 1024, "invalid number of path selector args"}, }; pst = dm_get_path_selector(dm_shift_arg(as)); if (!pst) { ti->error = "unknown path selector type"; return -EINVAL; } r = dm_read_arg_group(_args, as, &ps_argc, &ti->error); if (r) { dm_put_path_selector(pst); return -EINVAL; } r = pst->create(&pg->ps, ps_argc, as->argv); if (r) { dm_put_path_selector(pst); ti->error = "path selector constructor failed"; return r; } pg->ps.type = pst; dm_consume_args(as, ps_argc); return 0; } static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, struct dm_target *ti) { int r; struct pgpath *p; struct multipath *m = ti->private; /* we need at least a path arg */ if (as->argc < 1) { ti->error = "no device given"; return ERR_PTR(-EINVAL); } p = alloc_pgpath(); if (!p) return ERR_PTR(-ENOMEM); r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), &p->path.dev); if (r) { ti->error = "error getting device"; goto bad; } if (m->hw_handler_name) { struct request_queue *q = bdev_get_queue(p->path.dev->bdev); r = scsi_dh_attach(q, m->hw_handler_name); if (r == -EBUSY) { /* * Already attached to different hw_handler, * try to reattach with correct one. */ scsi_dh_detach(q); r = scsi_dh_attach(q, m->hw_handler_name); } if (r < 0) { ti->error = "error attaching hardware handler"; dm_put_device(ti, p->path.dev); goto bad; } if (m->hw_handler_params) { r = scsi_dh_set_params(q, m->hw_handler_params); if (r < 0) { ti->error = "unable to set hardware " "handler parameters"; scsi_dh_detach(q); dm_put_device(ti, p->path.dev); goto bad; } } } r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); if (r) { dm_put_device(ti, p->path.dev); goto bad; } return p; bad: free_pgpath(p); return ERR_PTR(r); } static struct priority_group *parse_priority_group(struct dm_arg_set *as, struct multipath *m) { static struct dm_arg _args[] = { {1, 1024, "invalid number of paths"}, {0, 1024, "invalid number of selector args"} }; int r; unsigned i, nr_selector_args, nr_args; struct priority_group *pg; struct dm_target *ti = m->ti; if (as->argc < 2) { as->argc = 0; ti->error = "not enough priority group arguments"; return ERR_PTR(-EINVAL); } pg = alloc_priority_group(); if (!pg) { ti->error = "couldn't allocate priority group"; return ERR_PTR(-ENOMEM); } pg->m = m; r = parse_path_selector(as, pg, ti); if (r) goto bad; /* * read the paths */ r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error); if (r) goto bad; r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error); if (r) goto bad; nr_args = 1 + nr_selector_args; for (i = 0; i < pg->nr_pgpaths; i++) { struct pgpath *pgpath; struct dm_arg_set path_args; if (as->argc < nr_args) { ti->error = "not enough path parameters"; r = -EINVAL; goto bad; } path_args.argc = nr_args; path_args.argv = as->argv; pgpath = parse_path(&path_args, &pg->ps, ti); if (IS_ERR(pgpath)) { r = PTR_ERR(pgpath); goto bad; } pgpath->pg = pg; list_add_tail(&pgpath->list, &pg->pgpaths); dm_consume_args(as, nr_args); } return pg; bad: free_priority_group(pg, ti); return ERR_PTR(r); } static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) { unsigned hw_argc; int ret; struct dm_target *ti = m->ti; static struct dm_arg _args[] = { {0, 1024, "invalid number of hardware handler args"}, }; if (dm_read_arg_group(_args, as, &hw_argc, &ti->error)) return -EINVAL; if (!hw_argc) return 0; m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); request_module("scsi_dh_%s", m->hw_handler_name); if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { ti->error = "unknown hardware handler type"; ret = -EINVAL; goto fail; } if (hw_argc > 1) { char *p; int i, j, len = 4; for (i = 0; i <= hw_argc - 2; i++) len += strlen(as->argv[i]) + 1; p = m->hw_handler_params = kzalloc(len, GFP_KERNEL); if (!p) { ti->error = "memory allocation failed"; ret = -ENOMEM; goto fail; } j = sprintf(p, "%d", hw_argc - 1); for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) j = sprintf(p, "%s", as->argv[i]); } dm_consume_args(as, hw_argc - 1); return 0; fail: kfree(m->hw_handler_name); m->hw_handler_name = NULL; return ret; } static int parse_features(struct dm_arg_set *as, struct multipath *m) { int r; unsigned argc; struct dm_target *ti = m->ti; const char *arg_name; static struct dm_arg _args[] = { {0, 5, "invalid number of feature args"}, {1, 50, "pg_init_retries must be between 1 and 50"}, {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, }; r = dm_read_arg_group(_args, as, &argc, &ti->error); if (r) return -EINVAL; if (!argc) return 0; do { arg_name = dm_shift_arg(as); argc--; if (!strcasecmp(arg_name, "queue_if_no_path")) { r = queue_if_no_path(m, 1, 0); continue; } if (!strcasecmp(arg_name, "pg_init_retries") && (argc >= 1)) { r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error); argc--; continue; } if (!strcasecmp(arg_name, "pg_init_delay_msecs") && (argc >= 1)) { r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error); argc--; continue; } ti->error = "Unrecognised multipath feature request"; r = -EINVAL; } while (argc && !r); return r; } static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv) { /* target arguments */ static struct dm_arg _args[] = { {0, 1024, "invalid number of priority groups"}, {0, 1024, "invalid initial priority group number"}, }; int r; struct multipath *m; struct dm_arg_set as; unsigned pg_count = 0; unsigned next_pg_num; as.argc = argc; as.argv = argv; m = alloc_multipath(ti); if (!m) { ti->error = "can't allocate multipath"; return -EINVAL; } r = parse_features(&as, m); if (r) goto bad; r = parse_hw_handler(&as, m); if (r) goto bad; r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error); if (r) goto bad; r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error); if (r) goto bad; if ((!m->nr_priority_groups && next_pg_num) || (m->nr_priority_groups && !next_pg_num)) { ti->error = "invalid initial priority group"; r = -EINVAL; goto bad; } /* parse the priority groups */ while (as.argc) { struct priority_group *pg; pg = parse_priority_group(&as, m); if (IS_ERR(pg)) { r = PTR_ERR(pg); goto bad; } m->nr_valid_paths += pg->nr_pgpaths; list_add_tail(&pg->list, &m->priority_groups); pg_count++; pg->pg_num = pg_count; if (!--next_pg_num) m->next_pg = pg; } if (pg_count != m->nr_priority_groups) { ti->error = "priority group count mismatch"; r = -EINVAL; goto bad; } ti->num_flush_requests = 1; ti->num_discard_requests = 1; return 0; bad: free_multipath(m); return r; } static void multipath_wait_for_pg_init_completion(struct multipath *m) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; add_wait_queue(&m->pg_init_wait, &wait); while (1) { set_current_state(TASK_UNINTERRUPTIBLE); spin_lock_irqsave(&m->lock, flags); if (!m->pg_init_in_progress) { spin_unlock_irqrestore(&m->lock, flags); break; } spin_unlock_irqrestore(&m->lock, flags); io_schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&m->pg_init_wait, &wait); } static void flush_multipath_work(struct multipath *m) { flush_workqueue(kmpath_handlerd); multipath_wait_for_pg_init_completion(m); flush_workqueue(kmultipathd); flush_work_sync(&m->trigger_event); } static void multipath_dtr(struct dm_target *ti) { struct multipath *m = ti->private; flush_multipath_work(m); free_multipath(m); } /* * Map cloned requests */ static int multipath_map(struct dm_target *ti, struct request *clone, union map_info *map_context) { int r; struct dm_mpath_io *mpio; struct multipath *m = (struct multipath *) ti->private; mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC); if (!mpio) /* ENOMEM, requeue */ return DM_MAPIO_REQUEUE; memset(mpio, 0, sizeof(*mpio)); map_context->ptr = mpio; clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; r = map_io(m, clone, mpio, 0); if (r < 0 || r == DM_MAPIO_REQUEUE) mempool_free(mpio, m->mpio_pool); return r; } /* * Take a path out of use. */ static int fail_path(struct pgpath *pgpath) { unsigned long flags; struct multipath *m = pgpath->pg->m; spin_lock_irqsave(&m->lock, flags); if (!pgpath->is_active) goto out; DMWARN("Failing path %s.", pgpath->path.dev->name); pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); pgpath->is_active = 0; pgpath->fail_count++; m->nr_valid_paths--; if (pgpath == m->current_pgpath) m->current_pgpath = NULL; dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, pgpath->path.dev->name, m->nr_valid_paths); schedule_work(&m->trigger_event); out: spin_unlock_irqrestore(&m->lock, flags); return 0; } /* * Reinstate a previously-failed path */ static int reinstate_path(struct pgpath *pgpath) { int r = 0; unsigned long flags; struct multipath *m = pgpath->pg->m; spin_lock_irqsave(&m->lock, flags); if (pgpath->is_active) goto out; if (!pgpath->pg->ps.type->reinstate_path) { DMWARN("Reinstate path not supported by path selector %s", pgpath->pg->ps.type->name); r = -EINVAL; goto out; } r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); if (r) goto out; pgpath->is_active = 1; if (!m->nr_valid_paths++ && m->queue_size) { m->current_pgpath = NULL; queue_work(kmultipathd, &m->process_queued_ios); } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) m->pg_init_in_progress++; } dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, pgpath->path.dev->name, m->nr_valid_paths); schedule_work(&m->trigger_event); out: spin_unlock_irqrestore(&m->lock, flags); return r; } /* * Fail or reinstate all paths that match the provided struct dm_dev. */ static int action_dev(struct multipath *m, struct dm_dev *dev, action_fn action) { int r = -EINVAL; struct pgpath *pgpath; struct priority_group *pg; list_for_each_entry(pg, &m->priority_groups, list) { list_for_each_entry(pgpath, &pg->pgpaths, list) { if (pgpath->path.dev == dev) r = action(pgpath); } } return r; } /* * Temporarily try to avoid having to use the specified PG */ static void bypass_pg(struct multipath *m, struct priority_group *pg, int bypassed) { unsigned long flags; spin_lock_irqsave(&m->lock, flags); pg->bypassed = bypassed; m->current_pgpath = NULL; m->current_pg = NULL; spin_unlock_irqrestore(&m->lock, flags); schedule_work(&m->trigger_event); } /* * Switch to using the specified PG from the next I/O that gets mapped */ static int switch_pg_num(struct multipath *m, const char *pgstr) { struct priority_group *pg; unsigned pgnum; unsigned long flags; if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || (pgnum > m->nr_priority_groups)) { DMWARN("invalid PG number supplied to switch_pg_num"); return -EINVAL; } spin_lock_irqsave(&m->lock, flags); list_for_each_entry(pg, &m->priority_groups, list) { pg->bypassed = 0; if (--pgnum) continue; m->current_pgpath = NULL; m->current_pg = NULL; m->next_pg = pg; } spin_unlock_irqrestore(&m->lock, flags); schedule_work(&m->trigger_event); return 0; } /* * Set/clear bypassed status of a PG. * PGs are numbered upwards from 1 in the order they were declared. */ static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed) { struct priority_group *pg; unsigned pgnum; if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || (pgnum > m->nr_priority_groups)) { DMWARN("invalid PG number supplied to bypass_pg"); return -EINVAL; } list_for_each_entry(pg, &m->priority_groups, list) { if (!--pgnum) break; } bypass_pg(m, pg, bypassed); return 0; } /* * Should we retry pg_init immediately? */ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) { unsigned long flags; int limit_reached = 0; spin_lock_irqsave(&m->lock, flags); if (m->pg_init_count <= m->pg_init_retries) m->pg_init_required = 1; else limit_reached = 1; spin_unlock_irqrestore(&m->lock, flags); return limit_reached; } static void pg_init_done(void *data, int errors) { struct pgpath *pgpath = data; struct priority_group *pg = pgpath->pg; struct multipath *m = pg->m; unsigned long flags; unsigned delay_retry = 0; /* device or driver problems */ switch (errors) { case SCSI_DH_OK: break; case SCSI_DH_NOSYS: if (!m->hw_handler_name) { errors = 0; break; } DMERR("Could not failover the device: Handler scsi_dh_%s " "Error %d.", m->hw_handler_name, errors); /* * Fail path for now, so we do not ping pong */ fail_path(pgpath); break; case SCSI_DH_DEV_TEMP_BUSY: /* * Probably doing something like FW upgrade on the * controller so try the other pg. */ bypass_pg(m, pg, 1); break; case SCSI_DH_RETRY: /* Wait before retrying. */ delay_retry = 1; case SCSI_DH_IMM_RETRY: case SCSI_DH_RES_TEMP_UNAVAIL: if (pg_init_limit_reached(m, pgpath)) fail_path(pgpath); errors = 0; break; default: /* * We probably do not want to fail the path for a device * error, but this is what the old dm did. In future * patches we can do more advanced handling. */ fail_path(pgpath); } spin_lock_irqsave(&m->lock, flags); if (errors) { if (pgpath == m->current_pgpath) { DMERR("Could not failover device. Error %d.", errors); m->current_pgpath = NULL; m->current_pg = NULL; } } else if (!m->pg_init_required) pg->bypassed = 0; if (--m->pg_init_in_progress) /* Activations of other paths are still on going */ goto out; if (!m->pg_init_required) m->queue_io = 0; m->pg_init_delay_retry = delay_retry; queue_work(kmultipathd, &m->process_queued_ios); /* * Wake up any thread waiting to suspend. */ wake_up(&m->pg_init_wait); out: spin_unlock_irqrestore(&m->lock, flags); } static void activate_path(struct work_struct *work) { struct pgpath *pgpath = container_of(work, struct pgpath, activate_path.work); scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), pg_init_done, pgpath); } /* * end_io handling */ static int do_end_io(struct multipath *m, struct request *clone, int error, struct dm_mpath_io *mpio) { /* * We don't queue any clone request inside the multipath target * during end I/O handling, since those clone requests don't have * bio clones. If we queue them inside the multipath target, * we need to make bio clones, that requires memory allocation. * (See drivers/md/dm.c:end_clone_bio() about why the clone requests * don't have bio clones.) * Instead of queueing the clone request here, we queue the original * request into dm core, which will remake a clone request and * clone bios for it and resubmit it later. */ int r = DM_ENDIO_REQUEUE; unsigned long flags; if (!error && !clone->errors) return 0; /* I/O complete */ if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ) return error; if (mpio->pgpath) fail_path(mpio->pgpath); spin_lock_irqsave(&m->lock, flags); if (!m->nr_valid_paths) { if (!m->queue_if_no_path) { if (!__must_push_back(m)) r = -EIO; } else { if (error == -EBADE) r = error; } } spin_unlock_irqrestore(&m->lock, flags); return r; } static int multipath_end_io(struct dm_target *ti, struct request *clone, int error, union map_info *map_context) { struct multipath *m = ti->private; struct dm_mpath_io *mpio = map_context->ptr; struct pgpath *pgpath = mpio->pgpath; struct path_selector *ps; int r; r = do_end_io(m, clone, error, mpio); if (pgpath) { ps = &pgpath->pg->ps; if (ps->type->end_io) ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); } mempool_free(mpio, m->mpio_pool); return r; } /* * Suspend can't complete until all the I/O is processed so if * the last path fails we must error any remaining I/O. * Note that if the freeze_bdev fails while suspending, the * queue_if_no_path state is lost - userspace should reset it. */ static void multipath_presuspend(struct dm_target *ti) { struct multipath *m = (struct multipath *) ti->private; queue_if_no_path(m, 0, 1); } static void multipath_postsuspend(struct dm_target *ti) { struct multipath *m = ti->private; mutex_lock(&m->work_mutex); flush_multipath_work(m); mutex_unlock(&m->work_mutex); } /* * Restore the queue_if_no_path setting. */ static void multipath_resume(struct dm_target *ti) { struct multipath *m = (struct multipath *) ti->private; unsigned long flags; spin_lock_irqsave(&m->lock, flags); m->queue_if_no_path = m->saved_queue_if_no_path; spin_unlock_irqrestore(&m->lock, flags); } /* * Info output has the following format: * num_multipath_feature_args [multipath_feature_args]* * num_handler_status_args [handler_status_args]* * num_groups init_group_number * [A|D|E num_ps_status_args [ps_status_args]* * num_paths num_selector_args * [path_dev A|F fail_count [selector_args]* ]+ ]+ * * Table output has the following format (identical to the constructor string): * num_feature_args [features_args]* * num_handler_args hw_handler [hw_handler_args]* * num_groups init_group_number * [priority selector-name num_ps_args [ps_args]* * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ */ static int multipath_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { int sz = 0; unsigned long flags; struct multipath *m = (struct multipath *) ti->private; struct priority_group *pg; struct pgpath *p; unsigned pg_num; char state; spin_lock_irqsave(&m->lock, flags); /* Features */ if (type == STATUSTYPE_INFO) DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); else { DMEMIT("%u ", m->queue_if_no_path + (m->pg_init_retries > 0) * 2 + (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2); if (m->queue_if_no_path) DMEMIT("queue_if_no_path "); if (m->pg_init_retries) DMEMIT("pg_init_retries %u ", m->pg_init_retries); if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); } if (!m->hw_handler_name || type == STATUSTYPE_INFO) DMEMIT("0 "); else DMEMIT("1 %s ", m->hw_handler_name); DMEMIT("%u ", m->nr_priority_groups); if (m->next_pg) pg_num = m->next_pg->pg_num; else if (m->current_pg) pg_num = m->current_pg->pg_num; else pg_num = (m->nr_priority_groups ? 1 : 0); DMEMIT("%u ", pg_num); switch (type) { case STATUSTYPE_INFO: list_for_each_entry(pg, &m->priority_groups, list) { if (pg->bypassed) state = 'D'; /* Disabled */ else if (pg == m->current_pg) state = 'A'; /* Currently Active */ else state = 'E'; /* Enabled */ DMEMIT("%c ", state); if (pg->ps.type->status) sz += pg->ps.type->status(&pg->ps, NULL, type, result + sz, maxlen - sz); else DMEMIT("0 "); DMEMIT("%u %u ", pg->nr_pgpaths, pg->ps.type->info_args); list_for_each_entry(p, &pg->pgpaths, list) { DMEMIT("%s %s %u ", p->path.dev->name, p->is_active ? "A" : "F", p->fail_count); if (pg->ps.type->status) sz += pg->ps.type->status(&pg->ps, &p->path, type, result + sz, maxlen - sz); } } break; case STATUSTYPE_TABLE: list_for_each_entry(pg, &m->priority_groups, list) { DMEMIT("%s ", pg->ps.type->name); if (pg->ps.type->status) sz += pg->ps.type->status(&pg->ps, NULL, type, result + sz, maxlen - sz); else DMEMIT("0 "); DMEMIT("%u %u ", pg->nr_pgpaths, pg->ps.type->table_args); list_for_each_entry(p, &pg->pgpaths, list) { DMEMIT("%s ", p->path.dev->name); if (pg->ps.type->status) sz += pg->ps.type->status(&pg->ps, &p->path, type, result + sz, maxlen - sz); } } break; } spin_unlock_irqrestore(&m->lock, flags); return 0; } static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) { int r = -EINVAL; struct dm_dev *dev; struct multipath *m = (struct multipath *) ti->private; action_fn action; mutex_lock(&m->work_mutex); if (dm_suspended(ti)) { r = -EBUSY; goto out; } if (argc == 1) { if (!strcasecmp(argv[0], "queue_if_no_path")) { r = queue_if_no_path(m, 1, 0); goto out; } else if (!strcasecmp(argv[0], "fail_if_no_path")) { r = queue_if_no_path(m, 0, 0); goto out; } } if (argc != 2) { DMWARN("Unrecognised multipath message received."); goto out; } if (!strcasecmp(argv[0], "disable_group")) { r = bypass_pg_num(m, argv[1], 1); goto out; } else if (!strcasecmp(argv[0], "enable_group")) { r = bypass_pg_num(m, argv[1], 0); goto out; } else if (!strcasecmp(argv[0], "switch_group")) { r = switch_pg_num(m, argv[1]); goto out; } else if (!strcasecmp(argv[0], "reinstate_path")) action = reinstate_path; else if (!strcasecmp(argv[0], "fail_path")) action = fail_path; else { DMWARN("Unrecognised multipath message received."); goto out; } r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); if (r) { DMWARN("message: error getting device %s", argv[1]); goto out; } r = action_dev(m, dev, action); dm_put_device(ti, dev); out: mutex_unlock(&m->work_mutex); return r; } static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { struct multipath *m = (struct multipath *) ti->private; struct block_device *bdev = NULL; fmode_t mode = 0; unsigned long flags; int r = 0; spin_lock_irqsave(&m->lock, flags); if (!m->current_pgpath) __choose_pgpath(m, 0); if (m->current_pgpath) { bdev = m->current_pgpath->path.dev->bdev; mode = m->current_pgpath->path.dev->mode; } if (m->queue_io) r = -EAGAIN; else if (!bdev) r = -EIO; spin_unlock_irqrestore(&m->lock, flags); return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); } static int multipath_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct multipath *m = ti->private; struct priority_group *pg; struct pgpath *p; int ret = 0; list_for_each_entry(pg, &m->priority_groups, list) { list_for_each_entry(p, &pg->pgpaths, list) { ret = fn(ti, p->path.dev, ti->begin, ti->len, data); if (ret) goto out; } } out: return ret; } static int __pgpath_busy(struct pgpath *pgpath) { struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); return dm_underlying_device_busy(q); } /* * We return "busy", only when we can map I/Os but underlying devices * are busy (so even if we map I/Os now, the I/Os will wait on * the underlying queue). * In other words, if we want to kill I/Os or queue them inside us * due to map unavailability, we don't return "busy". Otherwise, * dm core won't give us the I/Os and we can't do what we want. */ static int multipath_busy(struct dm_target *ti) { int busy = 0, has_active = 0; struct multipath *m = ti->private; struct priority_group *pg; struct pgpath *pgpath; unsigned long flags; spin_lock_irqsave(&m->lock, flags); /* Guess which priority_group will be used at next mapping time */ if (unlikely(!m->current_pgpath && m->next_pg)) pg = m->next_pg; else if (likely(m->current_pg)) pg = m->current_pg; else /* * We don't know which pg will be used at next mapping time. * We don't call __choose_pgpath() here to avoid to trigger * pg_init just by busy checking. * So we don't know whether underlying devices we will be using * at next mapping time are busy or not. Just try mapping. */ goto out; /* * If there is one non-busy active path at least, the path selector * will be able to select it. So we consider such a pg as not busy. */ busy = 1; list_for_each_entry(pgpath, &pg->pgpaths, list) if (pgpath->is_active) { has_active = 1; if (!__pgpath_busy(pgpath)) { busy = 0; break; } } if (!has_active) /* * No active path in this pg, so this pg won't be used and * the current_pg will be changed at next mapping time. * We need to try mapping to determine it. */ busy = 0; out: spin_unlock_irqrestore(&m->lock, flags); return busy; } /*----------------------------------------------------------------- * Module setup *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, .map_rq = multipath_map, .rq_end_io = multipath_end_io, .presuspend = multipath_presuspend, .postsuspend = multipath_postsuspend, .resume = multipath_resume, .status = multipath_status, .message = multipath_message, .ioctl = multipath_ioctl, .iterate_devices = multipath_iterate_devices, .busy = multipath_busy, }; static int __init dm_multipath_init(void) { int r; /* allocate a slab for the dm_ios */ _mpio_cache = KMEM_CACHE(dm_mpath_io, 0); if (!_mpio_cache) return -ENOMEM; r = dm_register_target(&multipath_target); if (r < 0) { DMERR("register failed %d", r); kmem_cache_destroy(_mpio_cache); return -EINVAL; } kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); if (!kmultipathd) { DMERR("failed to create workqueue kmpathd"); dm_unregister_target(&multipath_target); kmem_cache_destroy(_mpio_cache); return -ENOMEM; } /* * A separate workqueue is used to handle the device handlers * to avoid overloading existing workqueue. Overloading the * old workqueue would also create a bottleneck in the * path of the storage hardware device activation. */ kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd", WQ_MEM_RECLAIM); if (!kmpath_handlerd) { DMERR("failed to create workqueue kmpath_handlerd"); destroy_workqueue(kmultipathd); dm_unregister_target(&multipath_target); kmem_cache_destroy(_mpio_cache); return -ENOMEM; } DMINFO("version %u.%u.%u loaded", multipath_target.version[0], multipath_target.version[1], multipath_target.version[2]); return r; } static void __exit dm_multipath_exit(void) { destroy_workqueue(kmpath_handlerd); destroy_workqueue(kmultipathd); dm_unregister_target(&multipath_target); kmem_cache_destroy(_mpio_cache); } module_init(dm_multipath_init); module_exit(dm_multipath_exit); MODULE_DESCRIPTION(DM_NAME " multipath target"); MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3526_2
crossvul-cpp_data_bad_2399_21
/* * Copyright (C)2006 USAGI/WIDE Project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: * Kazunori Miyazawa <miyazawa@linux-ipv6.org> */ #include <crypto/internal/hash.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x02020202, 0x02020202, 0x02020202, 0x02020202, 0x03030303, 0x03030303, 0x03030303, 0x03030303}; /* * +------------------------ * | <parent tfm> * +------------------------ * | xcbc_tfm_ctx * +------------------------ * | consts (block size * 2) * +------------------------ */ struct xcbc_tfm_ctx { struct crypto_cipher *child; u8 ctx[]; }; /* * +------------------------ * | <shash desc> * +------------------------ * | xcbc_desc_ctx * +------------------------ * | odds (block size) * +------------------------ * | prev (block size) * +------------------------ */ struct xcbc_desc_ctx { unsigned int len; u8 ctx[]; }; static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); int bs = crypto_shash_blocksize(parent); u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); int err = 0; u8 key1[bs]; if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen))) return err; crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs); crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2); crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks); return crypto_cipher_setkey(ctx->child, key1, bs); } static int crypto_xcbc_digest_init(struct shash_desc *pdesc) { unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; ctx->len = 0; memset(prev, 0, bs); return 0; } static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, unsigned int len) { struct crypto_shash *parent = pdesc->tfm; unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); u8 *prev = odds + bs; /* checking the data can fill the block */ if ((ctx->len + len) <= bs) { memcpy(odds + ctx->len, p, len); ctx->len += len; return 0; } /* filling odds with new data and encrypting it */ memcpy(odds + ctx->len, p, bs - ctx->len); len -= bs - ctx->len; p += bs - ctx->len; crypto_xor(prev, odds, bs); crypto_cipher_encrypt_one(tfm, prev, prev); /* clearing the length */ ctx->len = 0; /* encrypting the rest of data */ while (len > bs) { crypto_xor(prev, p, bs); crypto_cipher_encrypt_one(tfm, prev, prev); p += bs; len -= bs; } /* keeping the surplus of blocksize */ if (len) { memcpy(odds, p, len); ctx->len = len; } return 0; } static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); u8 *prev = odds + bs; unsigned int offset = 0; if (ctx->len != bs) { unsigned int rlen; u8 *p = odds + ctx->len; *p = 0x80; p++; rlen = bs - ctx->len -1; if (rlen) memset(p, 0, rlen); offset += bs; } crypto_xor(prev, odds, bs); crypto_xor(prev, consts + offset, bs); crypto_cipher_encrypt_one(tfm, out, prev); return 0; } static int xcbc_init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; return 0; }; static void xcbc_exit_tfm(struct crypto_tfm *tfm) { struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_cipher(ctx->child); } static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; struct crypto_alg *alg; unsigned long alignmask; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); if (err) return err; alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return PTR_ERR(alg); switch(alg->cra_blocksize) { case 16: break; default: goto out_put_alg; } inst = shash_alloc_instance("xcbc", alg); err = PTR_ERR(inst); if (IS_ERR(inst)) goto out_put_alg; err = crypto_init_spawn(shash_instance_ctx(inst), alg, shash_crypto_instance(inst), CRYPTO_ALG_TYPE_MASK); if (err) goto out_free_inst; alignmask = alg->cra_alignmask | 3; inst->alg.base.cra_alignmask = alignmask; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.digestsize = alg->cra_blocksize; inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), crypto_tfm_ctx_alignment()) + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)) + alg->cra_blocksize * 2; inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), alignmask + 1) + alg->cra_blocksize * 2; inst->alg.base.cra_init = xcbc_init_tfm; inst->alg.base.cra_exit = xcbc_exit_tfm; inst->alg.init = crypto_xcbc_digest_init; inst->alg.update = crypto_xcbc_digest_update; inst->alg.final = crypto_xcbc_digest_final; inst->alg.setkey = crypto_xcbc_digest_setkey; err = shash_register_instance(tmpl, inst); if (err) { out_free_inst: shash_free_instance(shash_crypto_instance(inst)); } out_put_alg: crypto_mod_put(alg); return err; } static struct crypto_template crypto_xcbc_tmpl = { .name = "xcbc", .create = xcbc_create, .free = shash_free_instance, .module = THIS_MODULE, }; static int __init crypto_xcbc_module_init(void) { return crypto_register_template(&crypto_xcbc_tmpl); } static void __exit crypto_xcbc_module_exit(void) { crypto_unregister_template(&crypto_xcbc_tmpl); } module_init(crypto_xcbc_module_init); module_exit(crypto_xcbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XCBC keyed hash algorithm");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2399_21
crossvul-cpp_data_good_2399_14
/* * Cryptographic API. * * HMAC: Keyed-Hashing for Message Authentication (RFC2104). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * * The HMAC implementation is derived from USAGI. * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/string.h> struct hmac_ctx { struct crypto_shash *hash; }; static inline void *align_ptr(void *p, unsigned int align) { return (void *)ALIGN((unsigned long)p, align); } static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) { return align_ptr(crypto_shash_ctx_aligned(tfm) + crypto_shash_statesize(tfm) * 2, crypto_tfm_ctx_alignment()); } static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { int bs = crypto_shash_blocksize(parent); int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); char *ipad = crypto_shash_ctx_aligned(parent); char *opad = ipad + ss; struct hmac_ctx *ctx = align_ptr(opad + ss, crypto_tfm_ctx_alignment()); struct crypto_shash *hash = ctx->hash; SHASH_DESC_ON_STACK(shash, hash); unsigned int i; shash->tfm = hash; shash->flags = crypto_shash_get_flags(parent) & CRYPTO_TFM_REQ_MAY_SLEEP; if (keylen > bs) { int err; err = crypto_shash_digest(shash, inkey, keylen, ipad); if (err) return err; keylen = ds; } else memcpy(ipad, inkey, keylen); memset(ipad + keylen, 0, bs - keylen); memcpy(opad, ipad, bs); for (i = 0; i < bs; i++) { ipad[i] ^= 0x36; opad[i] ^= 0x5c; } return crypto_shash_init(shash) ?: crypto_shash_update(shash, ipad, bs) ?: crypto_shash_export(shash, ipad) ?: crypto_shash_init(shash) ?: crypto_shash_update(shash, opad, bs) ?: crypto_shash_export(shash, opad); } static int hmac_export(struct shash_desc *pdesc, void *out) { struct shash_desc *desc = shash_desc_ctx(pdesc); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_export(desc, out); } static int hmac_import(struct shash_desc *pdesc, const void *in) { struct shash_desc *desc = shash_desc_ctx(pdesc); struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); desc->tfm = ctx->hash; desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_import(desc, in); } static int hmac_init(struct shash_desc *pdesc) { return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); } static int hmac_update(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes) { struct shash_desc *desc = shash_desc_ctx(pdesc); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_update(desc, data, nbytes); } static int hmac_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); char *opad = crypto_shash_ctx_aligned(parent) + ss; struct shash_desc *desc = shash_desc_ctx(pdesc); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_final(desc, out) ?: crypto_shash_import(desc, opad) ?: crypto_shash_finup(desc, out, ds, out); } static int hmac_finup(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes, u8 *out) { struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); char *opad = crypto_shash_ctx_aligned(parent) + ss; struct shash_desc *desc = shash_desc_ctx(pdesc); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_finup(desc, data, nbytes, out) ?: crypto_shash_import(desc, opad) ?: crypto_shash_finup(desc, out, ds, out); } static int hmac_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *parent = __crypto_shash_cast(tfm); struct crypto_shash *hash; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst); struct hmac_ctx *ctx = hmac_ctx(parent); hash = crypto_spawn_shash(spawn); if (IS_ERR(hash)) return PTR_ERR(hash); parent->descsize = sizeof(struct shash_desc) + crypto_shash_descsize(hash); ctx->hash = hash; return 0; } static void hmac_exit_tfm(struct crypto_tfm *tfm) { struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm)); crypto_free_shash(ctx->hash); } static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; struct crypto_alg *alg; struct shash_alg *salg; int err; int ds; int ss; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); if (err) return err; salg = shash_attr_alg(tb[1], 0, 0); if (IS_ERR(salg)) return PTR_ERR(salg); err = -EINVAL; ds = salg->digestsize; ss = salg->statesize; alg = &salg->base; if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) goto out_put_alg; inst = shash_alloc_instance("hmac", alg); err = PTR_ERR(inst); if (IS_ERR(inst)) goto out_put_alg; err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg, shash_crypto_instance(inst)); if (err) goto out_free_inst; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_alignmask = alg->cra_alignmask; ss = ALIGN(ss, alg->cra_alignmask + 1); inst->alg.digestsize = ds; inst->alg.statesize = ss; inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + ALIGN(ss * 2, crypto_tfm_ctx_alignment()); inst->alg.base.cra_init = hmac_init_tfm; inst->alg.base.cra_exit = hmac_exit_tfm; inst->alg.init = hmac_init; inst->alg.update = hmac_update; inst->alg.final = hmac_final; inst->alg.finup = hmac_finup; inst->alg.export = hmac_export; inst->alg.import = hmac_import; inst->alg.setkey = hmac_setkey; err = shash_register_instance(tmpl, inst); if (err) { out_free_inst: shash_free_instance(shash_crypto_instance(inst)); } out_put_alg: crypto_mod_put(alg); return err; } static struct crypto_template hmac_tmpl = { .name = "hmac", .create = hmac_create, .free = shash_free_instance, .module = THIS_MODULE, }; static int __init hmac_module_init(void) { return crypto_register_template(&hmac_tmpl); } static void __exit hmac_module_exit(void) { crypto_unregister_template(&hmac_tmpl); } module_init(hmac_module_init); module_exit(hmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("HMAC hash algorithm"); MODULE_ALIAS_CRYPTO("hmac");
./CrossVul/dataset_final_sorted/CWE-264/c/good_2399_14
crossvul-cpp_data_bad_5040_0
/* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include "irq.h" #include "mmu.h" #include "cpuid.h" #include "lapic.h" #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/moduleparam.h> #include <linux/mod_devicetable.h> #include <linux/trace_events.h> #include <linux/slab.h> #include <linux/tboot.h> #include <linux/hrtimer.h> #include "kvm_cache_regs.h" #include "x86.h" #include <asm/cpu.h> #include <asm/io.h> #include <asm/desc.h> #include <asm/vmx.h> #include <asm/virtext.h> #include <asm/mce.h> #include <asm/fpu/internal.h> #include <asm/perf_event.h> #include <asm/debugreg.h> #include <asm/kexec.h> #include <asm/apic.h> #include <asm/irq_remapping.h> #include "trace.h" #include "pmu.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) #define __ex_clear(x, reg) \ ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); static const struct x86_cpu_id vmx_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_VMX), {} }; MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); static bool __read_mostly enable_vpid = 1; module_param_named(vpid, enable_vpid, bool, 0444); static bool __read_mostly flexpriority_enabled = 1; module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); static bool __read_mostly enable_ept = 1; module_param_named(ept, enable_ept, bool, S_IRUGO); static bool __read_mostly enable_unrestricted_guest = 1; module_param_named(unrestricted_guest, enable_unrestricted_guest, bool, S_IRUGO); static bool __read_mostly enable_ept_ad_bits = 1; module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); static bool __read_mostly emulate_invalid_guest_state = true; module_param(emulate_invalid_guest_state, bool, S_IRUGO); static bool __read_mostly vmm_exclusive = 1; module_param(vmm_exclusive, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); static bool __read_mostly enable_apicv = 1; module_param(enable_apicv, bool, S_IRUGO); static bool __read_mostly enable_shadow_vmcs = 1; module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); /* * If nested=1, nested virtualization is supported, i.e., guests may use * VMX and be a hypervisor for its own guests. If nested=0, guests may not * use VMX instructions. */ static bool __read_mostly nested = 0; module_param(nested, bool, S_IRUGO); static u64 __read_mostly host_xss; static bool __read_mostly enable_pml = 1; module_param_named(pml, enable_pml, bool, S_IRUGO); #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD) #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE) #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_CR4_GUEST_OWNED_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXMMEXCPT | X86_CR4_TSD) #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 /* * These 2 parameters are used to config the controls for Pause-Loop Exiting: * ple_gap: upper bound on the amount of time between two successive * executions of PAUSE in a loop. Also indicate if ple enabled. * According to test, this time is usually smaller than 128 cycles. * ple_window: upper bound on the amount of time a guest is allowed to execute * in a PAUSE loop. Tests indicate that most spinlocks are held for * less than 2^12 cycles * Time is measured based on a counter that runs at the same rate as the TSC, * refer SDM volume 3b section 21.6.13 & 22.1.3. */ #define KVM_VMX_DEFAULT_PLE_GAP 128 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \ INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP; module_param(ple_gap, int, S_IRUGO); static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; module_param(ple_window, int, S_IRUGO); /* Default doubles per-vcpu window every exit. */ static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW; module_param(ple_window_grow, int, S_IRUGO); /* Default resets per-vcpu window every exit to ple_window. */ static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK; module_param(ple_window_shrink, int, S_IRUGO); /* Default is to compute the maximum so we can never overflow. */ static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; module_param(ple_window_max, int, S_IRUGO); extern const ulong vmx_return; #define NR_AUTOLOAD_MSRS 8 #define VMCS02_POOL_SIZE 1 struct vmcs { u32 revision_id; u32 abort; char data[0]; }; /* * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs * loaded on this CPU (so we can clear them if the CPU goes down). */ struct loaded_vmcs { struct vmcs *vmcs; int cpu; int launched; struct list_head loaded_vmcss_on_cpu_link; }; struct shared_msr_entry { unsigned index; u64 data; u64 mask; }; /* * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a * single nested guest (L2), hence the name vmcs12. Any VMX implementation has * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is * stored in guest memory specified by VMPTRLD, but is opaque to the guest, * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. * More than one of these structures may exist, if L1 runs multiple L2 guests. * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the * underlying hardware which will be used to run L2. * This structure is packed to ensure that its layout is identical across * machines (necessary for live migration). * If there are changes in this struct, VMCS12_REVISION must be changed. */ typedef u64 natural_width; struct __packed vmcs12 { /* According to the Intel spec, a VMCS region must start with the * following two fields. Then follow implementation-specific data. */ u32 revision_id; u32 abort; u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ u32 padding[7]; /* room for future expansion */ u64 io_bitmap_a; u64 io_bitmap_b; u64 msr_bitmap; u64 vm_exit_msr_store_addr; u64 vm_exit_msr_load_addr; u64 vm_entry_msr_load_addr; u64 tsc_offset; u64 virtual_apic_page_addr; u64 apic_access_addr; u64 posted_intr_desc_addr; u64 ept_pointer; u64 eoi_exit_bitmap0; u64 eoi_exit_bitmap1; u64 eoi_exit_bitmap2; u64 eoi_exit_bitmap3; u64 xss_exit_bitmap; u64 guest_physical_address; u64 vmcs_link_pointer; u64 guest_ia32_debugctl; u64 guest_ia32_pat; u64 guest_ia32_efer; u64 guest_ia32_perf_global_ctrl; u64 guest_pdptr0; u64 guest_pdptr1; u64 guest_pdptr2; u64 guest_pdptr3; u64 guest_bndcfgs; u64 host_ia32_pat; u64 host_ia32_efer; u64 host_ia32_perf_global_ctrl; u64 padding64[8]; /* room for future expansion */ /* * To allow migration of L1 (complete with its L2 guests) between * machines of different natural widths (32 or 64 bit), we cannot have * unsigned long fields with no explict size. We use u64 (aliased * natural_width) instead. Luckily, x86 is little-endian. */ natural_width cr0_guest_host_mask; natural_width cr4_guest_host_mask; natural_width cr0_read_shadow; natural_width cr4_read_shadow; natural_width cr3_target_value0; natural_width cr3_target_value1; natural_width cr3_target_value2; natural_width cr3_target_value3; natural_width exit_qualification; natural_width guest_linear_address; natural_width guest_cr0; natural_width guest_cr3; natural_width guest_cr4; natural_width guest_es_base; natural_width guest_cs_base; natural_width guest_ss_base; natural_width guest_ds_base; natural_width guest_fs_base; natural_width guest_gs_base; natural_width guest_ldtr_base; natural_width guest_tr_base; natural_width guest_gdtr_base; natural_width guest_idtr_base; natural_width guest_dr7; natural_width guest_rsp; natural_width guest_rip; natural_width guest_rflags; natural_width guest_pending_dbg_exceptions; natural_width guest_sysenter_esp; natural_width guest_sysenter_eip; natural_width host_cr0; natural_width host_cr3; natural_width host_cr4; natural_width host_fs_base; natural_width host_gs_base; natural_width host_tr_base; natural_width host_gdtr_base; natural_width host_idtr_base; natural_width host_ia32_sysenter_esp; natural_width host_ia32_sysenter_eip; natural_width host_rsp; natural_width host_rip; natural_width paddingl[8]; /* room for future expansion */ u32 pin_based_vm_exec_control; u32 cpu_based_vm_exec_control; u32 exception_bitmap; u32 page_fault_error_code_mask; u32 page_fault_error_code_match; u32 cr3_target_count; u32 vm_exit_controls; u32 vm_exit_msr_store_count; u32 vm_exit_msr_load_count; u32 vm_entry_controls; u32 vm_entry_msr_load_count; u32 vm_entry_intr_info_field; u32 vm_entry_exception_error_code; u32 vm_entry_instruction_len; u32 tpr_threshold; u32 secondary_vm_exec_control; u32 vm_instruction_error; u32 vm_exit_reason; u32 vm_exit_intr_info; u32 vm_exit_intr_error_code; u32 idt_vectoring_info_field; u32 idt_vectoring_error_code; u32 vm_exit_instruction_len; u32 vmx_instruction_info; u32 guest_es_limit; u32 guest_cs_limit; u32 guest_ss_limit; u32 guest_ds_limit; u32 guest_fs_limit; u32 guest_gs_limit; u32 guest_ldtr_limit; u32 guest_tr_limit; u32 guest_gdtr_limit; u32 guest_idtr_limit; u32 guest_es_ar_bytes; u32 guest_cs_ar_bytes; u32 guest_ss_ar_bytes; u32 guest_ds_ar_bytes; u32 guest_fs_ar_bytes; u32 guest_gs_ar_bytes; u32 guest_ldtr_ar_bytes; u32 guest_tr_ar_bytes; u32 guest_interruptibility_info; u32 guest_activity_state; u32 guest_sysenter_cs; u32 host_ia32_sysenter_cs; u32 vmx_preemption_timer_value; u32 padding32[7]; /* room for future expansion */ u16 virtual_processor_id; u16 posted_intr_nv; u16 guest_es_selector; u16 guest_cs_selector; u16 guest_ss_selector; u16 guest_ds_selector; u16 guest_fs_selector; u16 guest_gs_selector; u16 guest_ldtr_selector; u16 guest_tr_selector; u16 guest_intr_status; u16 host_es_selector; u16 host_cs_selector; u16 host_ss_selector; u16 host_ds_selector; u16 host_fs_selector; u16 host_gs_selector; u16 host_tr_selector; }; /* * VMCS12_REVISION is an arbitrary id that should be changed if the content or * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and * VMPTRLD verifies that the VMCS region that L1 is loading contains this id. */ #define VMCS12_REVISION 0x11e57ed0 /* * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region * and any VMCS region. Although only sizeof(struct vmcs12) are used by the * current implementation, 4K are reserved to avoid future complications. */ #define VMCS12_SIZE 0x1000 /* Used to remember the last vmcs02 used for some recently used vmcs12s */ struct vmcs02_list { struct list_head list; gpa_t vmptr; struct loaded_vmcs vmcs02; }; /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. */ struct nested_vmx { /* Has the level1 guest done vmxon? */ bool vmxon; gpa_t vmxon_ptr; /* The guest-physical address of the current VMCS L1 keeps for L2 */ gpa_t current_vmptr; /* The host-usable pointer to the above */ struct page *current_vmcs12_page; struct vmcs12 *current_vmcs12; struct vmcs *current_shadow_vmcs; /* * Indicates if the shadow vmcs must be updated with the * data hold by vmcs12 */ bool sync_shadow_vmcs; /* vmcs02_list cache of VMCSs recently used to run L2 guests */ struct list_head vmcs02_pool; int vmcs02_num; u64 vmcs01_tsc_offset; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; /* * Guest pages referred to in vmcs02 with host-physical pointers, so * we must keep them pinned while L2 runs. */ struct page *apic_access_page; struct page *virtual_apic_page; struct page *pi_desc_page; struct pi_desc *pi_desc; bool pi_pending; u16 posted_intr_nv; u64 msr_ia32_feature_control; struct hrtimer preemption_timer; bool preemption_timer_expired; /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ u64 vmcs01_debugctl; u16 vpid02; u16 last_vpid; u32 nested_vmx_procbased_ctls_low; u32 nested_vmx_procbased_ctls_high; u32 nested_vmx_true_procbased_ctls_low; u32 nested_vmx_secondary_ctls_low; u32 nested_vmx_secondary_ctls_high; u32 nested_vmx_pinbased_ctls_low; u32 nested_vmx_pinbased_ctls_high; u32 nested_vmx_exit_ctls_low; u32 nested_vmx_exit_ctls_high; u32 nested_vmx_true_exit_ctls_low; u32 nested_vmx_entry_ctls_low; u32 nested_vmx_entry_ctls_high; u32 nested_vmx_true_entry_ctls_low; u32 nested_vmx_misc_low; u32 nested_vmx_misc_high; u32 nested_vmx_ept_caps; u32 nested_vmx_vpid_caps; }; #define POSTED_INTR_ON 0 #define POSTED_INTR_SN 1 /* Posted-Interrupt Descriptor */ struct pi_desc { u32 pir[8]; /* Posted interrupt requested */ union { struct { /* bit 256 - Outstanding Notification */ u16 on : 1, /* bit 257 - Suppress Notification */ sn : 1, /* bit 271:258 - Reserved */ rsvd_1 : 14; /* bit 279:272 - Notification Vector */ u8 nv; /* bit 287:280 - Reserved */ u8 rsvd_2; /* bit 319:288 - Notification Destination */ u32 ndst; }; u64 control; }; u32 rsvd[6]; } __aligned(64); static bool pi_test_and_set_on(struct pi_desc *pi_desc) { return test_and_set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static bool pi_test_and_clear_on(struct pi_desc *pi_desc) { return test_and_clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) { return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); } static inline void pi_clear_sn(struct pi_desc *pi_desc) { return clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } static inline void pi_set_sn(struct pi_desc *pi_desc) { return set_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } static inline int pi_test_on(struct pi_desc *pi_desc) { return test_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static inline int pi_test_sn(struct pi_desc *pi_desc) { return test_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; u8 fail; bool nmi_known_unmasked; u32 exit_intr_info; u32 idt_vectoring_info; ulong rflags; struct shared_msr_entry *guest_msrs; int nmsrs; int save_nmsrs; unsigned long host_idt_base; #ifdef CONFIG_X86_64 u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; /* * loaded_vmcs points to the VMCS currently used in this vcpu. For a * non-nested (L1) guest, it always points to vmcs01. For a nested * guest (L2), it points to a different VMCS. */ struct loaded_vmcs vmcs01; struct loaded_vmcs *loaded_vmcs; bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { unsigned nr; struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; } msr_autoload; struct { int loaded; u16 fs_sel, gs_sel, ldt_sel; #ifdef CONFIG_X86_64 u16 ds_sel, es_sel; #endif int gs_ldt_reload_needed; int fs_reload_needed; u64 msr_host_bndcfgs; unsigned long vmcs_host_cr4; /* May not match real cr4 */ } host_state; struct { int vm86_active; ulong save_rflags; struct kvm_segment segs[8]; } rmode; struct { u32 bitmask; /* 4 bits per segment (1 bit per field) */ struct kvm_save_segment { u16 selector; unsigned long base; u32 limit; u32 ar; } seg[8]; } segment_cache; int vpid; bool emulation_required; /* Support for vnmi-less CPUs */ int soft_vnmi_blocked; ktime_t entry_time; s64 vnmi_blocked_time; u32 exit_reason; /* Posted interrupt descriptor */ struct pi_desc pi_desc; /* Support for a guest hypervisor (nested VMX) */ struct nested_vmx nested; /* Dynamic PLE window. */ int ple_window; bool ple_window_dirty; /* Support for PML */ #define PML_ENTITY_NUM 512 struct page *pml_pg; u64 current_tsc_ratio; bool guest_pkru_valid; u32 guest_pkru; u32 host_pkru; }; enum segment_cache_field { SEG_FIELD_SEL = 0, SEG_FIELD_BASE = 1, SEG_FIELD_LIMIT = 2, SEG_FIELD_AR = 3, SEG_FIELD_NR = 4 }; static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_vmx, vcpu); } static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) { return &(to_vmx(vcpu)->pi_desc); } #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x) #define FIELD(number, name) [number] = VMCS12_OFFSET(name) #define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \ [number##_HIGH] = VMCS12_OFFSET(name)+4 static unsigned long shadow_read_only_fields[] = { /* * We do NOT shadow fields that are modified when L0 * traps and emulates any vmx instruction (e.g. VMPTRLD, * VMXON...) executed by L1. * For example, VM_INSTRUCTION_ERROR is read * by L1 if a vmx instruction fails (part of the error path). * Note the code assumes this logic. If for some reason * we start shadowing these fields then we need to * force a shadow sync when L0 emulates vmx instructions * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified * by nested_vmx_failValid) */ VM_EXIT_REASON, VM_EXIT_INTR_INFO, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_INFO_FIELD, IDT_VECTORING_ERROR_CODE, VM_EXIT_INTR_ERROR_CODE, EXIT_QUALIFICATION, GUEST_LINEAR_ADDRESS, GUEST_PHYSICAL_ADDRESS }; static int max_shadow_read_only_fields = ARRAY_SIZE(shadow_read_only_fields); static unsigned long shadow_read_write_fields[] = { TPR_THRESHOLD, GUEST_RIP, GUEST_RSP, GUEST_CR0, GUEST_CR3, GUEST_CR4, GUEST_INTERRUPTIBILITY_INFO, GUEST_RFLAGS, GUEST_CS_SELECTOR, GUEST_CS_AR_BYTES, GUEST_CS_LIMIT, GUEST_CS_BASE, GUEST_ES_BASE, GUEST_BNDCFGS, CR0_GUEST_HOST_MASK, CR0_READ_SHADOW, CR4_READ_SHADOW, TSC_OFFSET, EXCEPTION_BITMAP, CPU_BASED_VM_EXEC_CONTROL, VM_ENTRY_EXCEPTION_ERROR_CODE, VM_ENTRY_INTR_INFO_FIELD, VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE, HOST_FS_BASE, HOST_GS_BASE, HOST_FS_SELECTOR, HOST_GS_SELECTOR }; static int max_shadow_read_write_fields = ARRAY_SIZE(shadow_read_write_fields); static const unsigned short vmcs_field_to_offset_table[] = { FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), FIELD(POSTED_INTR_NV, posted_intr_nv), FIELD(GUEST_ES_SELECTOR, guest_es_selector), FIELD(GUEST_CS_SELECTOR, guest_cs_selector), FIELD(GUEST_SS_SELECTOR, guest_ss_selector), FIELD(GUEST_DS_SELECTOR, guest_ds_selector), FIELD(GUEST_FS_SELECTOR, guest_fs_selector), FIELD(GUEST_GS_SELECTOR, guest_gs_selector), FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector), FIELD(GUEST_TR_SELECTOR, guest_tr_selector), FIELD(GUEST_INTR_STATUS, guest_intr_status), FIELD(HOST_ES_SELECTOR, host_es_selector), FIELD(HOST_CS_SELECTOR, host_cs_selector), FIELD(HOST_SS_SELECTOR, host_ss_selector), FIELD(HOST_DS_SELECTOR, host_ds_selector), FIELD(HOST_FS_SELECTOR, host_fs_selector), FIELD(HOST_GS_SELECTOR, host_gs_selector), FIELD(HOST_TR_SELECTOR, host_tr_selector), FIELD64(IO_BITMAP_A, io_bitmap_a), FIELD64(IO_BITMAP_B, io_bitmap_b), FIELD64(MSR_BITMAP, msr_bitmap), FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr), FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr), FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr), FIELD64(TSC_OFFSET, tsc_offset), FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), FIELD64(APIC_ACCESS_ADDR, apic_access_addr), FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr), FIELD64(EPT_POINTER, ept_pointer), FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0), FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1), FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2), FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3), FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), FIELD64(GUEST_IA32_PAT, guest_ia32_pat), FIELD64(GUEST_IA32_EFER, guest_ia32_efer), FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl), FIELD64(GUEST_PDPTR0, guest_pdptr0), FIELD64(GUEST_PDPTR1, guest_pdptr1), FIELD64(GUEST_PDPTR2, guest_pdptr2), FIELD64(GUEST_PDPTR3, guest_pdptr3), FIELD64(GUEST_BNDCFGS, guest_bndcfgs), FIELD64(HOST_IA32_PAT, host_ia32_pat), FIELD64(HOST_IA32_EFER, host_ia32_efer), FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl), FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control), FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control), FIELD(EXCEPTION_BITMAP, exception_bitmap), FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask), FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match), FIELD(CR3_TARGET_COUNT, cr3_target_count), FIELD(VM_EXIT_CONTROLS, vm_exit_controls), FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count), FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count), FIELD(VM_ENTRY_CONTROLS, vm_entry_controls), FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count), FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field), FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code), FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len), FIELD(TPR_THRESHOLD, tpr_threshold), FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control), FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error), FIELD(VM_EXIT_REASON, vm_exit_reason), FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info), FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code), FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field), FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code), FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len), FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info), FIELD(GUEST_ES_LIMIT, guest_es_limit), FIELD(GUEST_CS_LIMIT, guest_cs_limit), FIELD(GUEST_SS_LIMIT, guest_ss_limit), FIELD(GUEST_DS_LIMIT, guest_ds_limit), FIELD(GUEST_FS_LIMIT, guest_fs_limit), FIELD(GUEST_GS_LIMIT, guest_gs_limit), FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit), FIELD(GUEST_TR_LIMIT, guest_tr_limit), FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit), FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit), FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes), FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes), FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes), FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes), FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes), FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes), FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes), FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes), FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info), FIELD(GUEST_ACTIVITY_STATE, guest_activity_state), FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs), FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs), FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value), FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask), FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask), FIELD(CR0_READ_SHADOW, cr0_read_shadow), FIELD(CR4_READ_SHADOW, cr4_read_shadow), FIELD(CR3_TARGET_VALUE0, cr3_target_value0), FIELD(CR3_TARGET_VALUE1, cr3_target_value1), FIELD(CR3_TARGET_VALUE2, cr3_target_value2), FIELD(CR3_TARGET_VALUE3, cr3_target_value3), FIELD(EXIT_QUALIFICATION, exit_qualification), FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address), FIELD(GUEST_CR0, guest_cr0), FIELD(GUEST_CR3, guest_cr3), FIELD(GUEST_CR4, guest_cr4), FIELD(GUEST_ES_BASE, guest_es_base), FIELD(GUEST_CS_BASE, guest_cs_base), FIELD(GUEST_SS_BASE, guest_ss_base), FIELD(GUEST_DS_BASE, guest_ds_base), FIELD(GUEST_FS_BASE, guest_fs_base), FIELD(GUEST_GS_BASE, guest_gs_base), FIELD(GUEST_LDTR_BASE, guest_ldtr_base), FIELD(GUEST_TR_BASE, guest_tr_base), FIELD(GUEST_GDTR_BASE, guest_gdtr_base), FIELD(GUEST_IDTR_BASE, guest_idtr_base), FIELD(GUEST_DR7, guest_dr7), FIELD(GUEST_RSP, guest_rsp), FIELD(GUEST_RIP, guest_rip), FIELD(GUEST_RFLAGS, guest_rflags), FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions), FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp), FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip), FIELD(HOST_CR0, host_cr0), FIELD(HOST_CR3, host_cr3), FIELD(HOST_CR4, host_cr4), FIELD(HOST_FS_BASE, host_fs_base), FIELD(HOST_GS_BASE, host_gs_base), FIELD(HOST_TR_BASE, host_tr_base), FIELD(HOST_GDTR_BASE, host_gdtr_base), FIELD(HOST_IDTR_BASE, host_idtr_base), FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp), FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip), FIELD(HOST_RSP, host_rsp), FIELD(HOST_RIP, host_rip), }; static inline short vmcs_field_to_offset(unsigned long field) { BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || vmcs_field_to_offset_table[field] == 0) return -ENOENT; return vmcs_field_to_offset_table[field]; } static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) { return to_vmx(vcpu)->nested.current_vmcs12; } static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) { struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT); if (is_error_page(page)) return NULL; return page; } static void nested_release_page(struct page *page) { kvm_release_page_dirty(page); } static void nested_release_page_clean(struct page *page) { kvm_release_page_clean(page); } static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); static bool vmx_xsaves_supported(void); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static bool guest_state_valid(struct kvm_vcpu *vcpu); static u32 vmx_segment_access_rights(struct kvm_segment *var); static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); static int alloc_identity_pagetable(struct kvm *kvm); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); /* * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. */ static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); static DEFINE_PER_CPU(struct desc_ptr, host_gdt); /* * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we * can find which vCPU should be waken up. */ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); static unsigned long *vmx_io_bitmap_a; static unsigned long *vmx_io_bitmap_b; static unsigned long *vmx_msr_bitmap_legacy; static unsigned long *vmx_msr_bitmap_longmode; static unsigned long *vmx_msr_bitmap_legacy_x2apic; static unsigned long *vmx_msr_bitmap_longmode_x2apic; static unsigned long *vmx_msr_bitmap_nested; static unsigned long *vmx_vmread_bitmap; static unsigned long *vmx_vmwrite_bitmap; static bool cpu_has_load_ia32_efer; static bool cpu_has_load_perf_global_ctrl; static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DEFINE_SPINLOCK(vmx_vpid_lock); static struct vmcs_config { int size; int order; u32 revision_id; u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; } vmcs_config; static struct vmx_capability { u32 ept; u32 vpid; } vmx_capability; #define VMX_SEGMENT_FIELD(seg) \ [VCPU_SREG_##seg] = { \ .selector = GUEST_##seg##_SELECTOR, \ .base = GUEST_##seg##_BASE, \ .limit = GUEST_##seg##_LIMIT, \ .ar_bytes = GUEST_##seg##_AR_BYTES, \ } static const struct kvm_vmx_segment_field { unsigned selector; unsigned base; unsigned limit; unsigned ar_bytes; } kvm_vmx_segment_fields[] = { VMX_SEGMENT_FIELD(CS), VMX_SEGMENT_FIELD(DS), VMX_SEGMENT_FIELD(ES), VMX_SEGMENT_FIELD(FS), VMX_SEGMENT_FIELD(GS), VMX_SEGMENT_FIELD(SS), VMX_SEGMENT_FIELD(TR), VMX_SEGMENT_FIELD(LDTR), }; static u64 host_efer; static void ept_save_pdptrs(struct kvm_vcpu *vcpu); /* * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it * away by decrementing the array size. */ static const u32 vmx_msr_index[] = { #ifdef CONFIG_X86_64 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, #endif MSR_EFER, MSR_TSC_AUX, MSR_STAR, }; static inline bool is_exception_n(u32 intr_info, u8 vector) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK); } static inline bool is_debug(u32 intr_info) { return is_exception_n(intr_info, DB_VECTOR); } static inline bool is_breakpoint(u32 intr_info) { return is_exception_n(intr_info, BP_VECTOR); } static inline bool is_page_fault(u32 intr_info) { return is_exception_n(intr_info, PF_VECTOR); } static inline bool is_no_device(u32 intr_info) { return is_exception_n(intr_info, NM_VECTOR); } static inline bool is_invalid_opcode(u32 intr_info) { return is_exception_n(intr_info, UD_VECTOR); } static inline bool is_external_interrupt(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); } static inline bool is_machine_check(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); } static inline bool cpu_has_vmx_msr_bitmap(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; } static inline bool cpu_has_vmx_tpr_shadow(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; } static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu) { return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu); } static inline bool cpu_has_secondary_exec_ctrls(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; } static inline bool cpu_has_vmx_virtualize_apic_accesses(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; } static inline bool cpu_has_vmx_virtualize_x2apic_mode(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; } static inline bool cpu_has_vmx_apic_register_virt(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_APIC_REGISTER_VIRT; } static inline bool cpu_has_vmx_virtual_intr_delivery(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; } static inline bool cpu_has_vmx_posted_intr(void) { return IS_ENABLED(CONFIG_X86_LOCAL_APIC) && vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR; } static inline bool cpu_has_vmx_apicv(void) { return cpu_has_vmx_apic_register_virt() && cpu_has_vmx_virtual_intr_delivery() && cpu_has_vmx_posted_intr(); } static inline bool cpu_has_vmx_flexpriority(void) { return cpu_has_vmx_tpr_shadow() && cpu_has_vmx_virtualize_apic_accesses(); } static inline bool cpu_has_vmx_ept_execute_only(void) { return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT; } static inline bool cpu_has_vmx_ept_2m_page(void) { return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT; } static inline bool cpu_has_vmx_ept_1g_page(void) { return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; } static inline bool cpu_has_vmx_ept_4levels(void) { return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; } static inline bool cpu_has_vmx_ept_ad_bits(void) { return vmx_capability.ept & VMX_EPT_AD_BIT; } static inline bool cpu_has_vmx_invept_context(void) { return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT; } static inline bool cpu_has_vmx_invept_global(void) { return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; } static inline bool cpu_has_vmx_invvpid_single(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; } static inline bool cpu_has_vmx_invvpid_global(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; } static inline bool cpu_has_vmx_ept(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_EPT; } static inline bool cpu_has_vmx_unrestricted_guest(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_UNRESTRICTED_GUEST; } static inline bool cpu_has_vmx_ple(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PAUSE_LOOP_EXITING; } static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) { return flexpriority_enabled && lapic_in_kernel(vcpu); } static inline bool cpu_has_vmx_vpid(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_VPID; } static inline bool cpu_has_vmx_rdtscp(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_RDTSCP; } static inline bool cpu_has_vmx_invpcid(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_INVPCID; } static inline bool cpu_has_virtual_nmis(void) { return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; } static inline bool cpu_has_vmx_wbinvd_exit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_WBINVD_EXITING; } static inline bool cpu_has_vmx_shadow_vmcs(void) { u64 vmx_msr; rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); /* check if the cpu supports writing r/o exit information fields */ if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) return false; return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_SHADOW_VMCS; } static inline bool cpu_has_vmx_pml(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML; } static inline bool cpu_has_vmx_tsc_scaling(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_TSC_SCALING; } static inline bool report_flexpriority(void) { return flexpriority_enabled; } static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) { return vmcs12->cpu_based_vm_exec_control & bit; } static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) { return (vmcs12->cpu_based_vm_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && (vmcs12->secondary_vm_exec_control & bit); } static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; } static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER; } static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); } static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES) && vmx_xsaves_supported(); } static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); } static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID); } static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT); } static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); } static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; } static inline bool is_exception(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); } static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification); static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification); static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; for (i = 0; i < vmx->nmsrs; ++i) if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) return i; return -1; } static inline void __invvpid(int ext, u16 vpid, gva_t gva) { struct { u64 vpid : 16; u64 rsvd : 48; u64 gva; } operand = { vpid, 0, gva }; asm volatile (__ex(ASM_VMX_INVVPID) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:" : : "a"(&operand), "c"(ext) : "cc", "memory"); } static inline void __invept(int ext, u64 eptp, gpa_t gpa) { struct { u64 eptp, gpa; } operand = {eptp, gpa}; asm volatile (__ex(ASM_VMX_INVEPT) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:\n" : : "a" (&operand), "c" (ext) : "cc", "memory"); } static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) { int i; i = __find_msr_index(vmx, msr); if (i >= 0) return &vmx->guest_msrs[i]; return NULL; } static void vmcs_clear(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", vmcs, phys_addr); } static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) { vmcs_clear(loaded_vmcs->vmcs); loaded_vmcs->cpu = -1; loaded_vmcs->launched = 0; } static void vmcs_load(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", vmcs, phys_addr); } #ifdef CONFIG_KEXEC_CORE /* * This bitmap is used to indicate whether the vmclear * operation is enabled on all cpus. All disabled by * default. */ static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; static inline void crash_enable_local_vmclear(int cpu) { cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); } static inline void crash_disable_local_vmclear(int cpu) { cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); } static inline int crash_local_vmclear_enabled(int cpu) { return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); } static void crash_vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; if (!crash_local_vmclear_enabled(cpu)) return; list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); } #else static inline void crash_enable_local_vmclear(int cpu) { } static inline void crash_disable_local_vmclear(int cpu) { } #endif /* CONFIG_KEXEC_CORE */ static void __loaded_vmcs_clear(void *arg) { struct loaded_vmcs *loaded_vmcs = arg; int cpu = raw_smp_processor_id(); if (loaded_vmcs->cpu != cpu) return; /* vcpu migration can race with cpu offline */ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) per_cpu(current_vmcs, cpu) = NULL; crash_disable_local_vmclear(cpu); list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); /* * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link * is before setting loaded_vmcs->vcpu to -1 which is done in * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist * then adds the vmcs into percpu list before it is deleted. */ smp_wmb(); loaded_vmcs_init(loaded_vmcs); crash_enable_local_vmclear(cpu); } static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) { int cpu = loaded_vmcs->cpu; if (cpu != -1) smp_call_function_single(cpu, __loaded_vmcs_clear, loaded_vmcs, 1); } static inline void vpid_sync_vcpu_single(int vpid) { if (vpid == 0) return; if (cpu_has_vmx_invvpid_single()) __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); } static inline void vpid_sync_vcpu_global(void) { if (cpu_has_vmx_invvpid_global()) __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); } static inline void vpid_sync_context(int vpid) { if (cpu_has_vmx_invvpid_single()) vpid_sync_vcpu_single(vpid); else vpid_sync_vcpu_global(); } static inline void ept_sync_global(void) { if (cpu_has_vmx_invept_global()) __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); } static inline void ept_sync_context(u64 eptp) { if (enable_ept) { if (cpu_has_vmx_invept_context()) __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); else ept_sync_global(); } } static __always_inline void vmcs_check16(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, "16-bit accessor invalid for 64-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, "16-bit accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, "16-bit accessor invalid for 32-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "16-bit accessor invalid for natural width field"); } static __always_inline void vmcs_check32(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, "32-bit accessor invalid for 16-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "32-bit accessor invalid for natural width field"); } static __always_inline void vmcs_check64(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, "64-bit accessor invalid for 16-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, "64-bit accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, "64-bit accessor invalid for 32-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "64-bit accessor invalid for natural width field"); } static __always_inline void vmcs_checkl(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, "Natural width accessor invalid for 16-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, "Natural width accessor invalid for 64-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, "Natural width accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, "Natural width accessor invalid for 32-bit field"); } static __always_inline unsigned long __vmcs_readl(unsigned long field) { unsigned long value; asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0") : "=a"(value) : "d"(field) : "cc"); return value; } static __always_inline u16 vmcs_read16(unsigned long field) { vmcs_check16(field); return __vmcs_readl(field); } static __always_inline u32 vmcs_read32(unsigned long field) { vmcs_check32(field); return __vmcs_readl(field); } static __always_inline u64 vmcs_read64(unsigned long field) { vmcs_check64(field); #ifdef CONFIG_X86_64 return __vmcs_readl(field); #else return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32); #endif } static __always_inline unsigned long vmcs_readl(unsigned long field) { vmcs_checkl(field); return __vmcs_readl(field); } static noinline void vmwrite_error(unsigned long field, unsigned long value) { printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); dump_stack(); } static __always_inline void __vmcs_writel(unsigned long field, unsigned long value) { u8 error; asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" : "=q"(error) : "a"(value), "d"(field) : "cc"); if (unlikely(error)) vmwrite_error(field, value); } static __always_inline void vmcs_write16(unsigned long field, u16 value) { vmcs_check16(field); __vmcs_writel(field, value); } static __always_inline void vmcs_write32(unsigned long field, u32 value) { vmcs_check32(field); __vmcs_writel(field, value); } static __always_inline void vmcs_write64(unsigned long field, u64 value) { vmcs_check64(field); __vmcs_writel(field, value); #ifndef CONFIG_X86_64 asm volatile (""); __vmcs_writel(field+1, value >> 32); #endif } static __always_inline void vmcs_writel(unsigned long field, unsigned long value) { vmcs_checkl(field); __vmcs_writel(field, value); } static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, "vmcs_clear_bits does not support 64-bit fields"); __vmcs_writel(field, __vmcs_readl(field) & ~mask); } static __always_inline void vmcs_set_bits(unsigned long field, u32 mask) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, "vmcs_set_bits does not support 64-bit fields"); __vmcs_writel(field, __vmcs_readl(field) | mask); } static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) { vmcs_write32(VM_ENTRY_CONTROLS, val); vmx->vm_entry_controls_shadow = val; } static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) { if (vmx->vm_entry_controls_shadow != val) vm_entry_controls_init(vmx, val); } static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) { return vmx->vm_entry_controls_shadow; } static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) { vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); } static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) { vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); } static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) { vmcs_write32(VM_EXIT_CONTROLS, val); vmx->vm_exit_controls_shadow = val; } static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) { if (vmx->vm_exit_controls_shadow != val) vm_exit_controls_init(vmx, val); } static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) { return vmx->vm_exit_controls_shadow; } static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) { vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); } static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) { vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); } static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) { vmx->segment_cache.bitmask = 0; } static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, unsigned field) { bool ret; u32 mask = 1 << (seg * SEG_FIELD_NR + field); if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); vmx->segment_cache.bitmask = 0; } ret = vmx->segment_cache.bitmask & mask; vmx->segment_cache.bitmask |= mask; return ret; } static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) { u16 *p = &vmx->segment_cache.seg[seg].selector; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); return *p; } static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) { ulong *p = &vmx->segment_cache.seg[seg].base; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); return *p; } static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) { u32 *p = &vmx->segment_cache.seg[seg].limit; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); return *p; } static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) { u32 *p = &vmx->segment_cache.seg[seg].ar; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); return *p; } static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | (1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR); if ((vcpu->guest_debug & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) eb |= 1u << BP_VECTOR; if (to_vmx(vcpu)->rmode.vm86_active) eb = ~0; if (enable_ept) eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ if (vcpu->fpu_active) eb &= ~(1u << NM_VECTOR); /* When we are running a nested L2 guest and L1 specified for it a * certain exception bitmap, we must trap the same exceptions and pass * them to L1. When running L2, we will only handle the exceptions * specified above if L1 did not want them. */ if (is_guest_mode(vcpu)) eb |= get_vmcs12(vcpu)->exception_bitmap; vmcs_write32(EXCEPTION_BITMAP, eb); } static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) { vm_entry_controls_clearbit(vmx, entry); vm_exit_controls_clearbit(vmx, exit); } static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) { unsigned i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); return; } break; } for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; if (i == m->nr) return; --m->nr; m->guest[i] = m->guest[m->nr]; m->host[i] = m->host[m->nr]; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit, unsigned long guest_val_vmcs, unsigned long host_val_vmcs, u64 guest_val, u64 host_val) { vmcs_write64(guest_val_vmcs, guest_val); vmcs_write64(host_val_vmcs, host_val); vm_entry_controls_setbit(vmx, entry); vm_exit_controls_setbit(vmx, exit); } static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val) { unsigned i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER, GUEST_IA32_EFER, HOST_IA32_EFER, guest_val, host_val); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, GUEST_IA32_PERF_GLOBAL_CTRL, HOST_IA32_PERF_GLOBAL_CTRL, guest_val, host_val); return; } break; case MSR_IA32_PEBS_ENABLE: /* PEBS needs a quiescent period after being disabled (to write * a record). Disabling PEBS through VMX MSR swapping doesn't * provide that period, so a CPU could write host's record into * guest's memory. */ wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; if (i == NR_AUTOLOAD_MSRS) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; } else if (i == m->nr) { ++m->nr; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } m->guest[i].index = msr; m->guest[i].value = guest_val; m->host[i].index = msr; m->host[i].value = host_val; } static void reload_tss(void) { /* * VT restores TR but not its size. Useless. */ struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); struct desc_struct *descs; descs = (void *)gdt->address; descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ load_TR_desc(); } static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) { u64 guest_efer = vmx->vcpu.arch.efer; u64 ignore_bits = 0; if (!enable_ept) { /* * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing * host CPUID is more efficient than testing guest CPUID * or CR4. Host SMEP is anyway a requirement for guest SMEP. */ if (boot_cpu_has(X86_FEATURE_SMEP)) guest_efer |= EFER_NX; else if (!(guest_efer & EFER_NX)) ignore_bits |= EFER_NX; } /* * LMA and LME handled by hardware; SCE meaningless outside long mode. */ ignore_bits |= EFER_SCE; #ifdef CONFIG_X86_64 ignore_bits |= EFER_LMA | EFER_LME; /* SCE is meaningful only in long mode on Intel */ if (guest_efer & EFER_LMA) ignore_bits &= ~(u64)EFER_SCE; #endif clear_atomic_switch_msr(vmx, MSR_EFER); /* * On EPT, we can't emulate NX, so we must switch EFER atomically. * On CPUs that support "load IA32_EFER", always switch EFER * atomically, since it's faster than switching it manually. */ if (cpu_has_load_ia32_efer || (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { if (!(guest_efer & EFER_LMA)) guest_efer &= ~EFER_LME; if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); return false; } else { guest_efer &= ~ignore_bits; guest_efer |= host_efer & ignore_bits; vmx->guest_msrs[efer_offset].data = guest_efer; vmx->guest_msrs[efer_offset].mask = ~ignore_bits; return true; } } static unsigned long segment_base(u16 selector) { struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); struct desc_struct *d; unsigned long table_base; unsigned long v; if (!(selector & ~3)) return 0; table_base = gdt->address; if (selector & 4) { /* from ldt */ u16 ldt_selector = kvm_read_ldt(); if (!(ldt_selector & ~3)) return 0; table_base = segment_base(ldt_selector); } d = (struct desc_struct *)(table_base + (selector & ~7)); v = get_desc_base(d); #ifdef CONFIG_X86_64 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11)) v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32; #endif return v; } static inline unsigned long kvm_read_tr_base(void) { u16 tr; asm("str %0" : "=g"(tr)); return segment_base(tr); } static void vmx_save_host_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int i; if (vmx->host_state.loaded) return; vmx->host_state.loaded = 1; /* * Set host fs and gs selectors. Unfortunately, 22.2.3 does not * allow segment selectors with cpl > 0 or ti == 1. */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; savesegment(fs, vmx->host_state.fs_sel); if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; } else { vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; } savesegment(gs, vmx->host_state.gs_sel); if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { vmcs_write16(HOST_GS_SELECTOR, 0); vmx->host_state.gs_ldt_reload_needed = 1; } #ifdef CONFIG_X86_64 savesegment(ds, vmx->host_state.ds_sel); savesegment(es, vmx->host_state.es_sel); #endif #ifdef CONFIG_X86_64 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); #else vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); #endif #ifdef CONFIG_X86_64 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); if (is_long_mode(&vmx->vcpu)) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (boot_cpu_has(X86_FEATURE_MPX)) rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); for (i = 0; i < vmx->save_nmsrs; ++i) kvm_set_shared_msr(vmx->guest_msrs[i].index, vmx->guest_msrs[i].data, vmx->guest_msrs[i].mask); } static void __vmx_load_host_state(struct vcpu_vmx *vmx) { if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); #ifdef CONFIG_X86_64 load_gs_index(vmx->host_state.gs_sel); #else loadsegment(gs, vmx->host_state.gs_sel); #endif } if (vmx->host_state.fs_reload_needed) loadsegment(fs, vmx->host_state.fs_sel); #ifdef CONFIG_X86_64 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { loadsegment(ds, vmx->host_state.ds_sel); loadsegment(es, vmx->host_state.es_sel); } #endif reload_tss(); #ifdef CONFIG_X86_64 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif if (vmx->host_state.msr_host_bndcfgs) wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); /* * If the FPU is not active (through the host task or * the guest vcpu), then restore the cr0.TS bit. */ if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded) stts(); load_gdt(this_cpu_ptr(&host_gdt)); } static void vmx_load_host_state(struct vcpu_vmx *vmx) { preempt_disable(); __vmx_load_host_state(vmx); preempt_enable(); } static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); struct pi_desc old, new; unsigned int dest; if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP)) return; do { old.control = new.control = pi_desc->control; /* * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there * are two possible cases: * 1. After running 'pre_block', context switch * happened. For this case, 'sn' was set in * vmx_vcpu_put(), so we need to clear it here. * 2. After running 'pre_block', we were blocked, * and woken up by some other guy. For this case, * we don't need to do anything, 'pi_post_block' * will do everything for us. However, we cannot * check whether it is case #1 or case #2 here * (maybe, not needed), so we also clear sn here, * I think it is not a big deal. */ if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) { if (vcpu->cpu != cpu) { dest = cpu_physical_id(cpu); if (x2apic_enabled()) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; } /* set 'NV' to 'notification vector' */ new.nv = POSTED_INTR_VECTOR; } /* Allow posting non-urgent interrupts */ new.sn = 0; } while (cmpxchg(&pi_desc->control, old.control, new.control) != old.control); } /* * Switches to specified vcpu, until a matching vcpu_put(), but assumes * vcpu mutex is already taken. */ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); if (!vmm_exclusive) kvm_cpu_vmxon(phys_addr); else if (vmx->loaded_vmcs->cpu != cpu) loaded_vmcs_clear(vmx->loaded_vmcs); if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); } if (vmx->loaded_vmcs->cpu != cpu) { struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); unsigned long sysenter_esp; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); local_irq_disable(); crash_disable_local_vmclear(cpu); /* * Read loaded_vmcs->cpu should be before fetching * loaded_vmcs->loaded_vmcss_on_cpu_link. * See the comments in __loaded_vmcs_clear(). */ smp_rmb(); list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, &per_cpu(loaded_vmcss_on_cpu, cpu)); crash_enable_local_vmclear(cpu); local_irq_enable(); /* * Linux uses per-cpu TSS and GDT, so set these when switching * processors. */ vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */ rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ vmx->loaded_vmcs->cpu = cpu; } /* Setup TSC multiplier */ if (kvm_has_tsc_control && vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) { vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio; vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); } vmx_vcpu_pi_load(vcpu, cpu); vmx->host_pkru = read_pkru(); } static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP)) return; /* Set SN when the vCPU is preempted */ if (vcpu->preempted) pi_set_sn(pi_desc); } static void vmx_vcpu_put(struct kvm_vcpu *vcpu) { vmx_vcpu_pi_put(vcpu); __vmx_load_host_state(to_vmx(vcpu)); if (!vmm_exclusive) { __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); vcpu->cpu = -1; kvm_cpu_vmxoff(); } } static void vmx_fpu_activate(struct kvm_vcpu *vcpu) { ulong cr0; if (vcpu->fpu_active) return; vcpu->fpu_active = 1; cr0 = vmcs_readl(GUEST_CR0); cr0 &= ~(X86_CR0_TS | X86_CR0_MP); cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP); vmcs_writel(GUEST_CR0, cr0); update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; if (is_guest_mode(vcpu)) vcpu->arch.cr0_guest_owned_bits &= ~get_vmcs12(vcpu)->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); /* * Return the cr0 value that a nested guest would read. This is a combination * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by * its hypervisor (cr0_read_shadow). */ static inline unsigned long nested_read_cr0(struct vmcs12 *fields) { return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | (fields->cr0_read_shadow & fields->cr0_guest_host_mask); } static inline unsigned long nested_read_cr4(struct vmcs12 *fields) { return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | (fields->cr4_read_shadow & fields->cr4_guest_host_mask); } static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) { /* Note that there is no vcpu->fpu_active = 0 here. The caller must * set this *before* calling this function. */ vmx_decache_cr0_guest_bits(vcpu); vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP); update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = 0; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); if (is_guest_mode(vcpu)) { /* * L1's specified read shadow might not contain the TS bit, * so now that we turned on shadowing of this bit, we need to * set this bit of the shadow. Like in nested_vmx_run we need * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet * up-to-date here because we just decached cr0.TS (and we'll * only update vmcs12->guest_cr0 on nested exit). */ struct vmcs12 *vmcs12 = get_vmcs12(vcpu); vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) | (vcpu->arch.cr0 & X86_CR0_TS); vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); } else vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); } static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags, save_rflags; if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); rflags = vmcs_readl(GUEST_RFLAGS); if (to_vmx(vcpu)->rmode.vm86_active) { rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; save_rflags = to_vmx(vcpu)->rmode.save_rflags; rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; } to_vmx(vcpu)->rflags = rflags; } return to_vmx(vcpu)->rflags; } static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); to_vmx(vcpu)->rflags = rflags; if (to_vmx(vcpu)->rmode.vm86_active) { to_vmx(vcpu)->rmode.save_rflags = rflags; rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; } vmcs_writel(GUEST_RFLAGS, rflags); } static u32 vmx_get_pkru(struct kvm_vcpu *vcpu) { return to_vmx(vcpu)->guest_pkru; } static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) { u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); int ret = 0; if (interruptibility & GUEST_INTR_STATE_STI) ret |= KVM_X86_SHADOW_INT_STI; if (interruptibility & GUEST_INTR_STATE_MOV_SS) ret |= KVM_X86_SHADOW_INT_MOV_SS; return ret; } static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) { u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); u32 interruptibility = interruptibility_old; interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); if (mask & KVM_X86_SHADOW_INT_MOV_SS) interruptibility |= GUEST_INTR_STATE_MOV_SS; else if (mask & KVM_X86_SHADOW_INT_STI) interruptibility |= GUEST_INTR_STATE_STI; if ((interruptibility != interruptibility_old)) vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); } static void skip_emulated_instruction(struct kvm_vcpu *vcpu) { unsigned long rip; rip = kvm_rip_read(vcpu); rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_rip_write(vcpu, rip); /* skipping an emulated instruction also counts */ vmx_set_interrupt_shadow(vcpu, 0); } /* * KVM wants to inject page-faults which it got to the guest. This function * checks whether in a nested guest, we need to inject them to L1 or L2. */ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (!(vmcs12->exception_bitmap & (1u << nr))) return 0; nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; } static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code, bool reinject) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info = nr | INTR_INFO_VALID_MASK; if (!reinject && is_guest_mode(vcpu) && nested_vmx_check_exception(vcpu, nr)) return; if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; } if (vmx->rmode.vm86_active) { int inc_eip = 0; if (kvm_exception_is_soft(nr)) inc_eip = vcpu->arch.event_exit_inst_len; if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } if (kvm_exception_is_soft(nr)) { vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); intr_info |= INTR_TYPE_SOFT_EXCEPTION; } else intr_info |= INTR_TYPE_HARD_EXCEPTION; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); } static bool vmx_rdtscp_supported(void) { return cpu_has_vmx_rdtscp(); } static bool vmx_invpcid_supported(void) { return cpu_has_vmx_invpcid() && enable_ept; } /* * Swap MSR entry in host/guest MSR entry array. */ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) { struct shared_msr_entry tmp; tmp = vmx->guest_msrs[to]; vmx->guest_msrs[to] = vmx->guest_msrs[from]; vmx->guest_msrs[from] = tmp; } static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) { unsigned long *msr_bitmap; if (is_guest_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_nested; else if (vcpu->arch.apic_base & X2APIC_ENABLE) { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode_x2apic; else msr_bitmap = vmx_msr_bitmap_legacy_x2apic; } else { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode; else msr_bitmap = vmx_msr_bitmap_legacy; } vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); } /* * Set up the vmcs to automatically save and restore system * msrs. Don't touch the 64-bit msrs if the guest is in legacy * mode, as fiddling with msrs is very expensive. */ static void setup_msrs(struct vcpu_vmx *vmx) { int save_nmsrs, index; save_nmsrs = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) { index = __find_msr_index(vmx, MSR_SYSCALL_MASK); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_LSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_CSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_TSC_AUX); if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu)) move_msr_up(vmx, index, save_nmsrs++); /* * MSR_STAR is only needed on long mode guests, and only * if efer.sce is enabled. */ index = __find_msr_index(vmx, MSR_STAR); if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) move_msr_up(vmx, index, save_nmsrs++); } #endif index = __find_msr_index(vmx, MSR_EFER); if (index >= 0 && update_transition_efer(vmx, index)) move_msr_up(vmx, index, save_nmsrs++); vmx->save_nmsrs = save_nmsrs; if (cpu_has_vmx_msr_bitmap()) vmx_set_msr_bitmap(&vmx->vcpu); } /* * reads and returns guest's timestamp counter "register" * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 */ static u64 guest_read_tsc(struct kvm_vcpu *vcpu) { u64 host_tsc, tsc_offset; host_tsc = rdtsc(); tsc_offset = vmcs_read64(TSC_OFFSET); return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; } /* * Like guest_read_tsc, but always returns L1's notion of the timestamp * counter, even if a nested guest (L2) is currently running. */ static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) { u64 tsc_offset; tsc_offset = is_guest_mode(vcpu) ? to_vmx(vcpu)->nested.vmcs01_tsc_offset : vmcs_read64(TSC_OFFSET); return host_tsc + tsc_offset; } static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu) { return vmcs_read64(TSC_OFFSET); } /* * writes 'offset' into guest's timestamp counter offset register */ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { if (is_guest_mode(vcpu)) { /* * We're here if L1 chose not to trap WRMSR to TSC. According * to the spec, this should set L1's TSC; The offset that L1 * set for L2 remains unchanged, and still needs to be added * to the newly set TSC to get L2's TSC. */ struct vmcs12 *vmcs12; to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset; /* recalculate vmcs02.TSC_OFFSET: */ vmcs12 = get_vmcs12(vcpu); vmcs_write64(TSC_OFFSET, offset + (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? vmcs12->tsc_offset : 0)); } else { trace_kvm_write_tsc_offset(vcpu->vcpu_id, vmcs_read64(TSC_OFFSET), offset); vmcs_write64(TSC_OFFSET, offset); } } static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment) { u64 offset = vmcs_read64(TSC_OFFSET); vmcs_write64(TSC_OFFSET, offset + adjustment); if (is_guest_mode(vcpu)) { /* Even when running L2, the adjustment needs to apply to L1 */ to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; } else trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset, offset + adjustment); } static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31))); } /* * nested_vmx_allowed() checks whether a guest should be allowed to use VMX * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for * all guests if the "nested" module option is off, and can also be disabled * for a single guest by disabling its VMX cpuid bit. */ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) { return nested && guest_cpuid_has_vmx(vcpu); } /* * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be * returned for the various VMX controls MSRs when nested VMX is enabled. * The same values should also be used to verify that vmcs12 control fields are * valid during nested entry from L1 to L2. * Each of these control msrs has a low and high 32-bit half: A low bit is on * if the corresponding bit in the (32-bit) control field *must* be on, and a * bit in the high half is on if the corresponding bit in the control field * may be on. See also vmx_control_verify(). */ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) { /* * Note that as a general rule, the high half of the MSRs (bits in * the control fields which may be 1) should be initialized by the * intersection of the underlying hardware's MSR (i.e., features which * can be supported) and the list of features we want to expose - * because they are known to be properly supported in our code. * Also, usually, the low half of the MSRs (bits which must be 1) can * be set to 0, meaning that L1 may turn off any of these bits. The * reason is that if one of these bits is necessary, it will appear * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control * fields of vmcs01 and vmcs02, will turn these bits off - and * nested_vmx_exit_handled() will not pass related exits to L1. * These rules have exceptions below. */ /* pin-based controls */ rdmsr(MSR_IA32_VMX_PINBASED_CTLS, vmx->nested.nested_vmx_pinbased_ctls_low, vmx->nested.nested_vmx_pinbased_ctls_high); vmx->nested.nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; vmx->nested.nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | PIN_BASED_VMX_PREEMPTION_TIMER; if (kvm_vcpu_apicv_active(&vmx->vcpu)) vmx->nested.nested_vmx_pinbased_ctls_high |= PIN_BASED_POSTED_INTR; /* exit controls */ rdmsr(MSR_IA32_VMX_EXIT_CTLS, vmx->nested.nested_vmx_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high); vmx->nested.nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_exit_ctls_high &= #ifdef CONFIG_X86_64 VM_EXIT_HOST_ADDR_SPACE_SIZE | #endif VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; if (kvm_mpx_supported()) vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; /* We support free control of debug control saving. */ vmx->nested.nested_vmx_true_exit_ctls_low = vmx->nested.nested_vmx_exit_ctls_low & ~VM_EXIT_SAVE_DEBUG_CONTROLS; /* entry controls */ rdmsr(MSR_IA32_VMX_ENTRY_CTLS, vmx->nested.nested_vmx_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high); vmx->nested.nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_entry_ctls_high &= #ifdef CONFIG_X86_64 VM_ENTRY_IA32E_MODE | #endif VM_ENTRY_LOAD_IA32_PAT; vmx->nested.nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); if (kvm_mpx_supported()) vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; /* We support free control of debug control loading. */ vmx->nested.nested_vmx_true_entry_ctls_low = vmx->nested.nested_vmx_entry_ctls_low & ~VM_ENTRY_LOAD_DEBUG_CONTROLS; /* cpu-based controls */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx->nested.nested_vmx_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high); vmx->nested.nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_procbased_ctls_high &= CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; /* * We can allow some features even when not supported by the * hardware. For example, L1 can specify an MSR bitmap - and we * can use it to avoid exits to L1 - even when L0 runs L2 * without MSR bitmaps. */ vmx->nested.nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | CPU_BASED_USE_MSR_BITMAPS; /* We support free control of CR3 access interception. */ vmx->nested.nested_vmx_true_procbased_ctls_low = vmx->nested.nested_vmx_procbased_ctls_low & ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); /* secondary cpu-based controls */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high); vmx->nested.nested_vmx_secondary_ctls_low = 0; vmx->nested.nested_vmx_secondary_ctls_high &= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_WBINVD_EXITING | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_PCOMMIT; if (enable_ept) { /* nested EPT: emulate EPT also to L1 */ vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | VMX_EPT_INVEPT_BIT; vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept; /* * For nested guests, we don't do anything specific * for single context invalidation. Hence, only advertise * support for global context invalidation. */ vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT; } else vmx->nested.nested_vmx_ept_caps = 0; /* * Old versions of KVM use the single-context version without * checking for support, so declare that it is supported even * though it is treated as global context. The alternative is * not failing the single-context invvpid, and it is worse. */ if (enable_vpid) vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; else vmx->nested.nested_vmx_vpid_caps = 0; if (enable_unrestricted_guest) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_UNRESTRICTED_GUEST; /* miscellaneous data */ rdmsr(MSR_IA32_VMX_MISC, vmx->nested.nested_vmx_misc_low, vmx->nested.nested_vmx_misc_high); vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA; vmx->nested.nested_vmx_misc_low |= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | VMX_MISC_ACTIVITY_HLT; vmx->nested.nested_vmx_misc_high = 0; } static inline bool vmx_control_verify(u32 control, u32 low, u32 high) { /* * Bits 0 in high must be 0, and bits 1 in low must be 1. */ return ((control & high) | low) == control; } static inline u64 vmx_control_msr(u32 low, u32 high) { return low | ((u64)high << 32); } /* Returns 0 on success, non-0 otherwise. */ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { struct vcpu_vmx *vmx = to_vmx(vcpu); switch (msr_index) { case MSR_IA32_VMX_BASIC: /* * This MSR reports some information about VMX support. We * should return information about the VMX we emulate for the * guest, and the VMCS structure we give it - not about the * VMX support of the underlying hardware. */ *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS | ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); break; case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_PINBASED_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_pinbased_ctls_low, vmx->nested.nested_vmx_pinbased_ctls_high); break; case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_true_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high); break; case MSR_IA32_VMX_PROCBASED_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high); break; case MSR_IA32_VMX_TRUE_EXIT_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_true_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high); break; case MSR_IA32_VMX_EXIT_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high); break; case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_true_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high); break; case MSR_IA32_VMX_ENTRY_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high); break; case MSR_IA32_VMX_MISC: *pdata = vmx_control_msr( vmx->nested.nested_vmx_misc_low, vmx->nested.nested_vmx_misc_high); break; /* * These MSRs specify bits which the guest must keep fixed (on or off) * while L1 is in VMXON mode (in L1's root mode, or running an L2). * We picked the standard core2 setting. */ #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) #define VMXON_CR4_ALWAYSON X86_CR4_VMXE case MSR_IA32_VMX_CR0_FIXED0: *pdata = VMXON_CR0_ALWAYSON; break; case MSR_IA32_VMX_CR0_FIXED1: *pdata = -1ULL; break; case MSR_IA32_VMX_CR4_FIXED0: *pdata = VMXON_CR4_ALWAYSON; break; case MSR_IA32_VMX_CR4_FIXED1: *pdata = -1ULL; break; case MSR_IA32_VMX_VMCS_ENUM: *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */ break; case MSR_IA32_VMX_PROCBASED_CTLS2: *pdata = vmx_control_msr( vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high); break; case MSR_IA32_VMX_EPT_VPID_CAP: /* Currently, no nested vpid support */ *pdata = vmx->nested.nested_vmx_ept_caps | ((u64)vmx->nested.nested_vmx_vpid_caps << 32); break; default: return 1; } return 0; } /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct shared_msr_entry *msr; switch (msr_info->index) { #ifdef CONFIG_X86_64 case MSR_FS_BASE: msr_info->data = vmcs_readl(GUEST_FS_BASE); break; case MSR_GS_BASE: msr_info->data = vmcs_readl(GUEST_GS_BASE); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(to_vmx(vcpu)); msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base; break; #endif case MSR_EFER: return kvm_get_msr_common(vcpu, msr_info); case MSR_IA32_TSC: msr_info->data = guest_read_tsc(vcpu); break; case MSR_IA32_SYSENTER_CS: msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); break; case MSR_IA32_SYSENTER_EIP: msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); break; case MSR_IA32_SYSENTER_ESP: msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported()) return 1; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; case MSR_IA32_FEATURE_CONTROL: if (!nested_vmx_allowed(vcpu)) return 1; msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control; break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!nested_vmx_allowed(vcpu)) return 1; return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data); case MSR_IA32_XSS: if (!vmx_xsaves_supported()) return 1; msr_info->data = vcpu->arch.ia32_xss; break; case MSR_TSC_AUX: if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(to_vmx(vcpu), msr_info->index); if (msr) { msr_info->data = msr->data; break; } return kvm_get_msr_common(vcpu, msr_info); } return 0; } static void vmx_leave_nested(struct kvm_vcpu *vcpu); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr; int ret = 0; u32 msr_index = msr_info->index; u64 data = msr_info->data; switch (msr_index) { case MSR_EFER: ret = kvm_set_msr_common(vcpu, msr_info); break; #ifdef CONFIG_X86_64 case MSR_FS_BASE: vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_FS_BASE, data); break; case MSR_GS_BASE: vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_GS_BASE, data); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(vmx); vmx->msr_guest_kernel_gs_base = data; break; #endif case MSR_IA32_SYSENTER_CS: vmcs_write32(GUEST_SYSENTER_CS, data); break; case MSR_IA32_SYSENTER_EIP: vmcs_writel(GUEST_SYSENTER_EIP, data); break; case MSR_IA32_SYSENTER_ESP: vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported()) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr_info); break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) return 1; vmcs_write64(GUEST_IA32_PAT, data); vcpu->arch.pat = data; break; } ret = kvm_set_msr_common(vcpu, msr_info); break; case MSR_IA32_TSC_ADJUST: ret = kvm_set_msr_common(vcpu, msr_info); break; case MSR_IA32_FEATURE_CONTROL: if (!nested_vmx_allowed(vcpu) || (to_vmx(vcpu)->nested.msr_ia32_feature_control & FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) return 1; vmx->nested.msr_ia32_feature_control = data; if (msr_info->host_initiated && data == 0) vmx_leave_nested(vcpu); break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: return 1; /* they are read-only */ case MSR_IA32_XSS: if (!vmx_xsaves_supported()) return 1; /* * The only supported bit as of Skylake is bit 8, but * it is not supported on KVM. */ if (data != 0) return 1; vcpu->arch.ia32_xss = data; if (vcpu->arch.ia32_xss != host_xss) add_atomic_switch_msr(vmx, MSR_IA32_XSS, vcpu->arch.ia32_xss, host_xss); else clear_atomic_switch_msr(vmx, MSR_IA32_XSS); break; case MSR_TSC_AUX: if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) return 1; /* Check reserved bit, higher 32 bits should be zero */ if ((data >> 32) != 0) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(vmx, msr_index); if (msr) { u64 old_msr_data = msr->data; msr->data = data; if (msr - vmx->guest_msrs < vmx->save_nmsrs) { preempt_disable(); ret = kvm_set_shared_msr(msr->index, msr->data, msr->mask); preempt_enable(); if (ret) msr->data = old_msr_data; } break; } ret = kvm_set_msr_common(vcpu, msr_info); } return ret; } static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); switch (reg) { case VCPU_REGS_RSP: vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); break; case VCPU_REGS_RIP: vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); break; case VCPU_EXREG_PDPTR: if (enable_ept) ept_save_pdptrs(vcpu); break; default: break; } } static __init int cpu_has_kvm_support(void) { return cpu_has_vmx(); } static __init int vmx_disabled_by_bios(void) { u64 msr; rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); if (msr & FEATURE_CONTROL_LOCKED) { /* launched w/ TXT and VMX disabled */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) && tboot_enabled()) return 1; /* launched w/o TXT and VMX only enabled w/ TXT */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) && !tboot_enabled()) { printk(KERN_WARNING "kvm: disable TXT in the BIOS or " "activate TXT before enabling KVM\n"); return 1; } /* launched w/o TXT and VMX disabled */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) && !tboot_enabled()) return 1; } return 0; } static void kvm_cpu_vmxon(u64 addr) { asm volatile (ASM_VMX_VMXON_RAX : : "a"(&addr), "m"(addr) : "memory", "cc"); } static int hardware_enable(void) { int cpu = raw_smp_processor_id(); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); u64 old, test_bits; if (cr4_read_shadow() & X86_CR4_VMXE) return -EBUSY; INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); /* * Now we can enable the vmclear operation in kdump * since the loaded_vmcss_on_cpu list on this cpu * has been initialized. * * Though the cpu is not in VMX operation now, there * is no problem to enable the vmclear operation * for the loaded_vmcss_on_cpu list is empty! */ crash_enable_local_vmclear(cpu); rdmsrl(MSR_IA32_FEATURE_CONTROL, old); test_bits = FEATURE_CONTROL_LOCKED; test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; if (tboot_enabled()) test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; if ((old & test_bits) != test_bits) { /* enable and lock */ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); } cr4_set_bits(X86_CR4_VMXE); if (vmm_exclusive) { kvm_cpu_vmxon(phys_addr); ept_sync_global(); } native_store_gdt(this_cpu_ptr(&host_gdt)); return 0; } static void vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v, *n; list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) __loaded_vmcs_clear(v); } /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() * tricks. */ static void kvm_cpu_vmxoff(void) { asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); } static void hardware_disable(void) { if (vmm_exclusive) { vmclear_local_loaded_vmcss(); kvm_cpu_vmxoff(); } cr4_clear_bits(X86_CR4_VMXE); } static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result) { u32 vmx_msr_low, vmx_msr_high; u32 ctl = ctl_min | ctl_opt; rdmsr(msr, vmx_msr_low, vmx_msr_high); ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ /* Ensure minimum (required) set of control bits are supported. */ if (ctl_min & ~ctl) return -EIO; *result = ctl; return 0; } static __init bool allow_1_setting(u32 msr, u32 ctl) { u32 vmx_msr_low, vmx_msr_high; rdmsr(msr, vmx_msr_low, vmx_msr_high); return vmx_msr_high & ctl; } static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) { u32 vmx_msr_low, vmx_msr_high; u32 min, opt, min2, opt2; u32 _pin_based_exec_control = 0; u32 _cpu_based_exec_control = 0; u32 _cpu_based_2nd_exec_control = 0; u32 _vmexit_control = 0; u32 _vmentry_control = 0; min = CPU_BASED_HLT_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_MWAIT_EXITING | CPU_BASED_MONITOR_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_RDPMC_EXITING; opt = CPU_BASED_TPR_SHADOW | CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, &_cpu_based_exec_control) < 0) return -EIO; #ifdef CONFIG_X86_64 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & ~CPU_BASED_CR8_STORE_EXITING; #endif if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { min2 = 0; opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_WBINVD_EXITING | SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_PAUSE_LOOP_EXITING | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_SHADOW_VMCS | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_ENABLE_PML | SECONDARY_EXEC_PCOMMIT | SECONDARY_EXEC_TSC_SCALING; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) return -EIO; } #ifndef CONFIG_X86_64 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; #endif if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_2nd_exec_control &= ~( SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { /* CR3 accesses and invlpg don't need to cause VM Exits when EPT enabled */ _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_INVLPG_EXITING); rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, vmx_capability.ept, vmx_capability.vpid); } min = VM_EXIT_SAVE_DEBUG_CONTROLS; #ifdef CONFIG_X86_64 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; #endif opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_CLEAR_BNDCFGS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, &_vmexit_control) < 0) return -EIO; min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, &_pin_based_exec_control) < 0) return -EIO; if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) || !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT)) _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; min = VM_ENTRY_LOAD_DEBUG_CONTROLS; opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, &_vmentry_control) < 0) return -EIO; rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) return -EIO; #ifdef CONFIG_X86_64 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ if (vmx_msr_high & (1u<<16)) return -EIO; #endif /* Require Write-Back (WB) memory type for VMCS accesses. */ if (((vmx_msr_high >> 18) & 15) != 6) return -EIO; vmcs_conf->size = vmx_msr_high & 0x1fff; vmcs_conf->order = get_order(vmcs_config.size); vmcs_conf->revision_id = vmx_msr_low; vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; vmcs_conf->vmexit_ctrl = _vmexit_control; vmcs_conf->vmentry_ctrl = _vmentry_control; cpu_has_load_ia32_efer = allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, VM_ENTRY_LOAD_IA32_EFER) && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_LOAD_IA32_EFER); cpu_has_load_perf_global_ctrl = allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); /* * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL * but due to arrata below it can't be used. Workaround is to use * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. * * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] * * AAK155 (model 26) * AAP115 (model 30) * AAT100 (model 37) * BC86,AAY89,BD102 (model 44) * BA97 (model 46) * */ if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) { switch (boot_cpu_data.x86_model) { case 26: case 30: case 37: case 44: case 46: cpu_has_load_perf_global_ctrl = false; printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " "does not work properly. Using workaround\n"); break; default: break; } } if (cpu_has_xsaves) rdmsrl(MSR_IA32_XSS, host_xss); return 0; } static struct vmcs *alloc_vmcs_cpu(int cpu) { int node = cpu_to_node(cpu); struct page *pages; struct vmcs *vmcs; pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); if (!pages) return NULL; vmcs = page_address(pages); memset(vmcs, 0, vmcs_config.size); vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ return vmcs; } static struct vmcs *alloc_vmcs(void) { return alloc_vmcs_cpu(raw_smp_processor_id()); } static void free_vmcs(struct vmcs *vmcs) { free_pages((unsigned long)vmcs, vmcs_config.order); } /* * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded */ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) { if (!loaded_vmcs->vmcs) return; loaded_vmcs_clear(loaded_vmcs); free_vmcs(loaded_vmcs->vmcs); loaded_vmcs->vmcs = NULL; } static void free_kvm_area(void) { int cpu; for_each_possible_cpu(cpu) { free_vmcs(per_cpu(vmxarea, cpu)); per_cpu(vmxarea, cpu) = NULL; } } static void init_vmcs_shadow_fields(void) { int i, j; /* No checks for read only fields yet */ for (i = j = 0; i < max_shadow_read_write_fields; i++) { switch (shadow_read_write_fields[i]) { case GUEST_BNDCFGS: if (!kvm_mpx_supported()) continue; break; default: break; } if (j < i) shadow_read_write_fields[j] = shadow_read_write_fields[i]; j++; } max_shadow_read_write_fields = j; /* shadowed fields guest access without vmexit */ for (i = 0; i < max_shadow_read_write_fields; i++) { clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap); clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap); } for (i = 0; i < max_shadow_read_only_fields; i++) clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap); } static __init int alloc_kvm_area(void) { int cpu; for_each_possible_cpu(cpu) { struct vmcs *vmcs; vmcs = alloc_vmcs_cpu(cpu); if (!vmcs) { free_kvm_area(); return -ENOMEM; } per_cpu(vmxarea, cpu) = vmcs; } return 0; } static bool emulation_required(struct kvm_vcpu *vcpu) { return emulate_invalid_guest_state && !guest_state_valid(vcpu); } static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save) { if (!emulate_invalid_guest_state) { /* * CS and SS RPL should be equal during guest entry according * to VMX spec, but in reality it is not always so. Since vcpu * is in the middle of the transition from real mode to * protected mode it is safe to assume that RPL 0 is a good * default value. */ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) save->selector &= ~SEGMENT_RPL_MASK; save->dpl = save->selector & SEGMENT_RPL_MASK; save->s = 1; } vmx_set_segment(vcpu, save, seg); } static void enter_pmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Update real mode segment cache. It may be not up-to-date if sement * register was written while vcpu was in a guest mode. */ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->rmode.vm86_active = 0; vmx_segment_cache_clear(vmx); vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); flags = vmcs_readl(GUEST_RFLAGS); flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); update_exception_bitmap(vcpu); fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); } static void fix_rmode_seg(int seg, struct kvm_segment *save) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; struct kvm_segment var = *save; var.dpl = 0x3; if (seg == VCPU_SREG_CS) var.type = 0x3; if (!emulate_invalid_guest_state) { var.selector = var.base >> 4; var.base = var.base & 0xffff0; var.limit = 0xffff; var.g = 0; var.db = 0; var.present = 1; var.s = 1; var.l = 0; var.unusable = 0; var.type = 0x3; var.avl = 0; if (save->base & 0xf) printk_once(KERN_WARNING "kvm: segment base is not " "paragraph aligned when entering " "protected mode (seg=%d)", seg); } vmcs_write16(sf->selector, var.selector); vmcs_write32(sf->base, var.base); vmcs_write32(sf->limit, var.limit); vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); } static void enter_rmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->rmode.vm86_active = 1; /* * Very old userspace does not call KVM_SET_TSS_ADDR before entering * vcpu. Warn the user that an update is overdue. */ if (!vcpu->kvm->arch.tss_addr) printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " "called before entering vcpu\n"); vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); flags = vmcs_readl(GUEST_RFLAGS); vmx->rmode.save_rflags = flags; flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); update_exception_bitmap(vcpu); fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); kvm_mmu_reset_context(vcpu); } static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); if (!msr) return; /* * Force kernel_gs_base reloading before EFER changes, as control * of this msr depends on is_long_mode(). */ vmx_load_host_state(to_vmx(vcpu)); vcpu->arch.efer = efer; if (efer & EFER_LMA) { vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer; } else { vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer & ~EFER_LME; } setup_msrs(vmx); } #ifdef CONFIG_X86_64 static void enter_lmode(struct kvm_vcpu *vcpu) { u32 guest_tr_ar; vmx_segment_cache_clear(to_vmx(vcpu)); guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { pr_debug_ratelimited("%s: tss fixup for long mode. \n", __func__); vmcs_write32(GUEST_TR_AR_BYTES, (guest_tr_ar & ~VMX_AR_TYPE_MASK) | VMX_AR_TYPE_BUSY_64_TSS); } vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); } static void exit_lmode(struct kvm_vcpu *vcpu) { vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); } #endif static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) { vpid_sync_context(vpid); if (enable_ept) { if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); } } static void vmx_flush_tlb(struct kvm_vcpu *vcpu) { __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; vcpu->arch.cr0 &= ~cr0_guest_owned_bits; vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; } static void vmx_decache_cr3(struct kvm_vcpu *vcpu) { if (enable_ept && is_paging(vcpu)) vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); } static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; vcpu->arch.cr4 &= ~cr4_guest_owned_bits; vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; } static void ept_load_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty)) return; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); } } static void ept_save_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); } __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); } static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | (CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } else if (!is_paging(vcpu)) { /* From nonpaging to paging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } if (!(cr0 & X86_CR0_WP)) *hw_cr0 &= ~X86_CR0_WP; } static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long hw_cr0; hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK); if (enable_unrestricted_guest) hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; else { hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) enter_pmode(vcpu); if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) enter_rmode(vcpu); } #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) enter_lmode(vcpu); if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) exit_lmode(vcpu); } #endif if (enable_ept) ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); if (!vcpu->fpu_active) hw_cr0 |= X86_CR0_TS | X86_CR0_MP; vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(GUEST_CR0, hw_cr0); vcpu->arch.cr0 = cr0; /* depends on vcpu->arch.cr0 to be set to a new value */ vmx->emulation_required = emulation_required(vcpu); } static u64 construct_eptp(unsigned long root_hpa) { u64 eptp; /* TODO write the value reading from MSR */ eptp = VMX_EPT_DEFAULT_MT | VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; if (enable_ept_ad_bits) eptp |= VMX_EPT_AD_ENABLE_BIT; eptp |= (root_hpa & PAGE_MASK); return eptp; } static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { unsigned long guest_cr3; u64 eptp; guest_cr3 = cr3; if (enable_ept) { eptp = construct_eptp(cr3); vmcs_write64(EPT_POINTER, eptp); if (is_paging(vcpu) || is_guest_mode(vcpu)) guest_cr3 = kvm_read_cr3(vcpu); else guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; ept_load_pdptrs(vcpu); } vmx_flush_tlb(vcpu); vmcs_writel(GUEST_CR3, guest_cr3); } static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { /* * Pass through host's Machine Check Enable value to hw_cr4, which * is in force while we are in guest mode. Do not let guests control * this bit, even if host CR4.MCE == 0. */ unsigned long hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE) | (to_vmx(vcpu)->rmode.vm86_active ? KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); if (cr4 & X86_CR4_VMXE) { /* * To use VMXON (and later other VMX instructions), a guest * must first be able to turn on cr4.VMXE (see handle_vmon()). * So basically the check on whether to allow nested VMX * is here. */ if (!nested_vmx_allowed(vcpu)) return 1; } if (to_vmx(vcpu)->nested.vmxon && ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) return 1; vcpu->arch.cr4 = cr4; if (enable_ept) { if (!is_paging(vcpu)) { hw_cr4 &= ~X86_CR4_PAE; hw_cr4 |= X86_CR4_PSE; } else if (!(cr4 & X86_CR4_PAE)) { hw_cr4 &= ~X86_CR4_PAE; } } if (!enable_unrestricted_guest && !is_paging(vcpu)) /* * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in * hardware. To emulate this behavior, SMEP/SMAP/PKU needs * to be manually disabled when guest switches to non-paging * mode. * * If !enable_unrestricted_guest, the CPU is always running * with CR0.PG=1 and CR4 needs to be modified. * If enable_unrestricted_guest, the CPU automatically * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. */ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, hw_cr4); return 0; } static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 ar; if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { *var = vmx->rmode.segs[seg]; if (seg == VCPU_SREG_TR || var->selector == vmx_read_guest_seg_selector(vmx, seg)) return; var->base = vmx_read_guest_seg_base(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); return; } var->base = vmx_read_guest_seg_base(vmx, seg); var->limit = vmx_read_guest_seg_limit(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); ar = vmx_read_guest_seg_ar(vmx, seg); var->unusable = (ar >> 16) & 1; var->type = ar & 15; var->s = (ar >> 4) & 1; var->dpl = (ar >> 5) & 3; /* * Some userspaces do not preserve unusable property. Since usable * segment has to be present according to VMX spec we can use present * property to amend userspace bug by making unusable segment always * nonpresent. vmx_segment_access_rights() already marks nonpresent * segment as unusable. */ var->present = !var->unusable; var->avl = (ar >> 12) & 1; var->l = (ar >> 13) & 1; var->db = (ar >> 14) & 1; var->g = (ar >> 15) & 1; } static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment s; if (to_vmx(vcpu)->rmode.vm86_active) { vmx_get_segment(vcpu, &s, seg); return s.base; } return vmx_read_guest_seg_base(to_vmx(vcpu), seg); } static int vmx_get_cpl(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (unlikely(vmx->rmode.vm86_active)) return 0; else { int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); return VMX_AR_DPL(ar); } } static u32 vmx_segment_access_rights(struct kvm_segment *var) { u32 ar; if (var->unusable || !var->present) ar = 1 << 16; else { ar = var->type & 15; ar |= (var->s & 1) << 4; ar |= (var->dpl & 3) << 5; ar |= (var->present & 1) << 7; ar |= (var->avl & 1) << 12; ar |= (var->l & 1) << 13; ar |= (var->db & 1) << 14; ar |= (var->g & 1) << 15; } return ar; } static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_vmx *vmx = to_vmx(vcpu); const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; vmx_segment_cache_clear(vmx); if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { vmx->rmode.segs[seg] = *var; if (seg == VCPU_SREG_TR) vmcs_write16(sf->selector, var->selector); else if (var->s) fix_rmode_seg(seg, &vmx->rmode.segs[seg]); goto out; } vmcs_writel(sf->base, var->base); vmcs_write32(sf->limit, var->limit); vmcs_write16(sf->selector, var->selector); /* * Fix the "Accessed" bit in AR field of segment registers for older * qemu binaries. * IA32 arch specifies that at the time of processor reset the * "Accessed" bit in the AR field of segment registers is 1. And qemu * is setting it to 0 in the userland code. This causes invalid guest * state vmexit when "unrestricted guest" mode is turned on. * Fix for this setup issue in cpu_reset is being pushed in the qemu * tree. Newer qemu binaries with that qemu fix would not need this * kvm hack. */ if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) var->type |= 0x1; /* Accessed */ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); out: vmx->emulation_required = emulation_required(vcpu); } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); *db = (ar >> 14) & 1; *l = (ar >> 13) & 1; } static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { dt->size = vmcs_read32(GUEST_IDTR_LIMIT); dt->address = vmcs_readl(GUEST_IDTR_BASE); } static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { vmcs_write32(GUEST_IDTR_LIMIT, dt->size); vmcs_writel(GUEST_IDTR_BASE, dt->address); } static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { dt->size = vmcs_read32(GUEST_GDTR_LIMIT); dt->address = vmcs_readl(GUEST_GDTR_BASE); } static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { vmcs_write32(GUEST_GDTR_LIMIT, dt->size); vmcs_writel(GUEST_GDTR_BASE, dt->address); } static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; u32 ar; vmx_get_segment(vcpu, &var, seg); var.dpl = 0x3; if (seg == VCPU_SREG_CS) var.type = 0x3; ar = vmx_segment_access_rights(&var); if (var.base != (var.selector << 4)) return false; if (var.limit != 0xffff) return false; if (ar != 0xf3) return false; return true; } static bool code_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment cs; unsigned int cs_rpl; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); cs_rpl = cs.selector & SEGMENT_RPL_MASK; if (cs.unusable) return false; if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) return false; if (!cs.s) return false; if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { if (cs.dpl > cs_rpl) return false; } else { if (cs.dpl != cs_rpl) return false; } if (!cs.present) return false; /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ return true; } static bool stack_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ss; unsigned int ss_rpl; vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); ss_rpl = ss.selector & SEGMENT_RPL_MASK; if (ss.unusable) return true; if (ss.type != 3 && ss.type != 7) return false; if (!ss.s) return false; if (ss.dpl != ss_rpl) /* DPL != RPL */ return false; if (!ss.present) return false; return true; } static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; unsigned int rpl; vmx_get_segment(vcpu, &var, seg); rpl = var.selector & SEGMENT_RPL_MASK; if (var.unusable) return true; if (!var.s) return false; if (!var.present) return false; if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { if (var.dpl < rpl) /* DPL < RPL */ return false; } /* TODO: Add other members to kvm_segment_field to allow checking for other access * rights flags */ return true; } static bool tr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment tr; vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); if (tr.unusable) return false; if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ return false; if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ return false; if (!tr.present) return false; return true; } static bool ldtr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ldtr; vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); if (ldtr.unusable) return true; if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ return false; if (ldtr.type != 2) return false; if (!ldtr.present) return false; return true; } static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) { struct kvm_segment cs, ss; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); return ((cs.selector & SEGMENT_RPL_MASK) == (ss.selector & SEGMENT_RPL_MASK)); } /* * Check if guest state is valid. Returns true if valid, false if * not. * We assume that registers are always usable */ static bool guest_state_valid(struct kvm_vcpu *vcpu) { if (enable_unrestricted_guest) return true; /* real mode guest state checks */ if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) return false; } else { /* protected mode guest state checks */ if (!cs_ss_rpl_check(vcpu)) return false; if (!code_segment_valid(vcpu)) return false; if (!stack_segment_valid(vcpu)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_GS)) return false; if (!tr_valid(vcpu)) return false; if (!ldtr_valid(vcpu)) return false; } /* TODO: * - Add checks on RIP * - Add checks on RFLAGS */ return true; } static int init_rmode_tss(struct kvm *kvm) { gfn_t fn; u16 data = 0; int idx, r; idx = srcu_read_lock(&kvm->srcu); fn = kvm->arch.tss_addr >> PAGE_SHIFT; r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; r = kvm_write_guest_page(kvm, fn++, &data, TSS_IOPB_BASE_OFFSET, sizeof(u16)); if (r < 0) goto out; r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); if (r < 0) goto out; r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = ~0; r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, sizeof(u8)); out: srcu_read_unlock(&kvm->srcu, idx); return r; } static int init_rmode_identity_map(struct kvm *kvm) { int i, idx, r = 0; kvm_pfn_t identity_map_pfn; u32 tmp; if (!enable_ept) return 0; /* Protect kvm->arch.ept_identity_pagetable_done. */ mutex_lock(&kvm->slots_lock); if (likely(kvm->arch.ept_identity_pagetable_done)) goto out2; identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; r = alloc_identity_pagetable(kvm); if (r < 0) goto out2; idx = srcu_read_lock(&kvm->srcu); r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); if (r < 0) goto out; /* Set up identity-mapping pagetable for EPT in real mode */ for (i = 0; i < PT32_ENT_PER_PAGE; i++) { tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); r = kvm_write_guest_page(kvm, identity_map_pfn, &tmp, i * sizeof(tmp), sizeof(tmp)); if (r < 0) goto out; } kvm->arch.ept_identity_pagetable_done = true; out: srcu_read_unlock(&kvm->srcu, idx); out2: mutex_unlock(&kvm->slots_lock); return r; } static void seg_setup(int seg) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; unsigned int ar; vmcs_write16(sf->selector, 0); vmcs_writel(sf->base, 0); vmcs_write32(sf->limit, 0xffff); ar = 0x93; if (seg == VCPU_SREG_CS) ar |= 0x08; /* code segment */ vmcs_write32(sf->ar_bytes, ar); } static int alloc_apic_access_page(struct kvm *kvm) { struct page *page; int r = 0; mutex_lock(&kvm->slots_lock); if (kvm->arch.apic_access_page_done) goto out; r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); if (r) goto out; page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); if (is_error_page(page)) { r = -EFAULT; goto out; } /* * Do not pin the page in memory, so that memory hot-unplug * is able to migrate it. */ put_page(page); kvm->arch.apic_access_page_done = true; out: mutex_unlock(&kvm->slots_lock); return r; } static int alloc_identity_pagetable(struct kvm *kvm) { /* Called with kvm->slots_lock held. */ int r = 0; BUG_ON(kvm->arch.ept_identity_pagetable_done); r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, kvm->arch.ept_identity_map_addr, PAGE_SIZE); return r; } static int allocate_vpid(void) { int vpid; if (!enable_vpid) return 0; spin_lock(&vmx_vpid_lock); vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); if (vpid < VMX_NR_VPIDS) __set_bit(vpid, vmx_vpid_bitmap); else vpid = 0; spin_unlock(&vmx_vpid_lock); return vpid; } static void free_vpid(int vpid) { if (!enable_vpid || vpid == 0) return; spin_lock(&vmx_vpid_lock); __clear_bit(vpid, vmx_vpid_bitmap); spin_unlock(&vmx_vpid_lock); } #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) return; /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R) /* read-low */ __clear_bit(msr, msr_bitmap + 0x000 / f); if (type & MSR_TYPE_W) /* write-low */ __clear_bit(msr, msr_bitmap + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R) /* read-high */ __clear_bit(msr, msr_bitmap + 0x400 / f); if (type & MSR_TYPE_W) /* write-high */ __clear_bit(msr, msr_bitmap + 0xc00 / f); } } static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) return; /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R) /* read-low */ __set_bit(msr, msr_bitmap + 0x000 / f); if (type & MSR_TYPE_W) /* write-low */ __set_bit(msr, msr_bitmap + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R) /* read-high */ __set_bit(msr, msr_bitmap + 0x400 / f); if (type & MSR_TYPE_W) /* write-high */ __set_bit(msr, msr_bitmap + 0xc00 / f); } } /* * If a msr is allowed by L0, we should check whether it is allowed by L1. * The corresponding bit will be cleared unless both of L0 and L1 allow it. */ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, unsigned long *msr_bitmap_nested, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) { WARN_ON(1); return; } /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R && !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) /* read-low */ __clear_bit(msr, msr_bitmap_nested + 0x000 / f); if (type & MSR_TYPE_W && !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) /* write-low */ __clear_bit(msr, msr_bitmap_nested + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R && !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) /* read-high */ __clear_bit(msr, msr_bitmap_nested + 0x400 / f); if (type & MSR_TYPE_W && !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) /* write-high */ __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); } } static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) { if (!longmode_only) __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr, MSR_TYPE_R | MSR_TYPE_W); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr, MSR_TYPE_R | MSR_TYPE_W); } static void vmx_enable_intercept_msr_read_x2apic(u32 msr) { __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, MSR_TYPE_R); __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, MSR_TYPE_R); } static void vmx_disable_intercept_msr_read_x2apic(u32 msr) { __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, MSR_TYPE_R); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, MSR_TYPE_R); } static void vmx_disable_intercept_msr_write_x2apic(u32 msr) { __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, MSR_TYPE_W); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, MSR_TYPE_W); } static bool vmx_get_enable_apicv(void) { return enable_apicv; } static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int max_irr; void *vapic_page; u16 status; if (vmx->nested.pi_desc && vmx->nested.pi_pending) { vmx->nested.pi_pending = false; if (!pi_test_and_clear_on(vmx->nested.pi_desc)) return 0; max_irr = find_last_bit( (unsigned long *)vmx->nested.pi_desc->pir, 256); if (max_irr == 256) return 0; vapic_page = kmap(vmx->nested.virtual_apic_page); if (!vapic_page) { WARN_ON(1); return -ENOMEM; } __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page); kunmap(vmx->nested.virtual_apic_page); status = vmcs_read16(GUEST_INTR_STATUS); if ((u8)max_irr > ((u8)status & 0xff)) { status &= ~0xff; status |= (u8)max_irr; vmcs_write16(GUEST_INTR_STATUS, status); } } return 0; } static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu) { #ifdef CONFIG_SMP if (vcpu->mode == IN_GUEST_MODE) { struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Currently, we don't support urgent interrupt, * all interrupts are recognized as non-urgent * interrupt, so we cannot post interrupts when * 'SN' is set. * * If the vcpu is in guest mode, it means it is * running instead of being scheduled out and * waiting in the run queue, and that's the only * case when 'SN' is set currently, warning if * 'SN' is set. */ WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc)); apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), POSTED_INTR_VECTOR); return true; } #endif return false; } static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (is_guest_mode(vcpu) && vector == vmx->nested.posted_intr_nv) { /* the PIR and ON have been set by L1. */ kvm_vcpu_trigger_posted_interrupt(vcpu); /* * If a posted intr is not recognized by hardware, * we will accomplish it in the next vmentry. */ vmx->nested.pi_pending = true; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } return -1; } /* * Send interrupt to vcpu via posted interrupt way. * 1. If target vcpu is running(non-root mode), send posted interrupt * notification to vcpu and hardware will sync PIR to vIRR atomically. * 2. If target vcpu isn't running(root mode), kick it to pick up the * interrupt from PIR in next vmentry. */ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { struct vcpu_vmx *vmx = to_vmx(vcpu); int r; r = vmx_deliver_nested_posted_interrupt(vcpu, vector); if (!r) return; if (pi_test_and_set_pir(vector, &vmx->pi_desc)) return; r = pi_test_and_set_on(&vmx->pi_desc); kvm_make_request(KVM_REQ_EVENT, vcpu); if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu)) kvm_vcpu_kick(vcpu); } static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (!pi_test_and_clear_on(&vmx->pi_desc)) return; kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); } /* * Set up the vmcs's constant host-state fields, i.e., host-state fields that * will not change in the lifetime of the guest. * Note that host-state that does change is set elsewhere. E.g., host-state * that is set differently for each CPU is set in vmx_vcpu_load(), not here. */ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) { u32 low32, high32; unsigned long tmpl; struct desc_ptr dt; unsigned long cr4; vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ /* Save the most likely value for this task's CR4 in the VMCS. */ cr4 = cr4_read_shadow(); vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ vmx->host_state.vmcs_host_cr4 = cr4; vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ #ifdef CONFIG_X86_64 /* * Load null selectors, so we can avoid reloading them in * __vmx_load_host_state(), in case userspace uses the null selectors * too (the expected case). */ vmcs_write16(HOST_DS_SELECTOR, 0); vmcs_write16(HOST_ES_SELECTOR, 0); #else vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #endif vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ native_store_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ vmx->host_idt_base = dt.address; vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { rdmsr(MSR_IA32_CR_PAT, low32, high32); vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); } } static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) { vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; if (enable_ept) vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; if (is_guest_mode(&vmx->vcpu)) vmx->vcpu.arch.cr4_guest_owned_bits &= ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); } static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) { u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; if (!kvm_vcpu_apicv_active(&vmx->vcpu)) pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; return pin_based_exec_ctrl; } static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); } static u32 vmx_exec_control(struct vcpu_vmx *vmx) { u32 exec_control = vmcs_config.cpu_based_exec_ctrl; if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) exec_control &= ~CPU_BASED_MOV_DR_EXITING; if (!cpu_need_tpr_shadow(&vmx->vcpu)) { exec_control &= ~CPU_BASED_TPR_SHADOW; #ifdef CONFIG_X86_64 exec_control |= CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING; #endif } if (!enable_ept) exec_control |= CPU_BASED_CR3_STORE_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_INVLPG_EXITING; return exec_control; } static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) { u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu)) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if (vmx->vpid == 0) exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; if (!enable_ept) { exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; enable_unrestricted_guest = 0; /* Enable INVPCID for non-ept guests may cause performance regression. */ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; } if (!enable_unrestricted_guest) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; if (!ple_gap) exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; if (!kvm_vcpu_apicv_active(&vmx->vcpu)) exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD (handle_vmptrld). We can NOT enable shadow_vmcs here because we don't have yet a current VMCS12 */ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; if (!enable_pml) exec_control &= ~SECONDARY_EXEC_ENABLE_PML; /* Currently, we allow L1 guest to directly run pcommit instruction. */ exec_control &= ~SECONDARY_EXEC_PCOMMIT; return exec_control; } static void ept_set_mmio_spte_mask(void) { /* * EPT Misconfigurations can be generated if the value of bits 2:0 * of an EPT paging-structure entry is 110b (write/execute). * Also, magic bits (0x3ull << 62) is set to quickly identify mmio * spte. */ kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull); } #define VMX_XSS_EXIT_BITMAP 0 /* * Sets up the vmcs for emulated real mode. */ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { #ifdef CONFIG_X86_64 unsigned long a; #endif int i; /* I/O */ vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); if (enable_shadow_vmcs) { vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); } if (cpu_has_vmx_msr_bitmap()) vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ /* Control */ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); if (cpu_has_secondary_exec_ctrls()) vmcs_write32(SECONDARY_VM_EXEC_CONTROL, vmx_secondary_exec_control(vmx)); if (kvm_vcpu_apicv_active(&vmx->vcpu)) { vmcs_write64(EOI_EXIT_BITMAP0, 0); vmcs_write64(EOI_EXIT_BITMAP1, 0); vmcs_write64(EOI_EXIT_BITMAP2, 0); vmcs_write64(EOI_EXIT_BITMAP3, 0); vmcs_write16(GUEST_INTR_STATUS, 0); vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); } if (ple_gap) { vmcs_write32(PLE_GAP, ple_gap); vmx->ple_window = ple_window; vmx->ple_window_dirty = true; } vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmx_set_constant_host_state(vmx); #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ rdmsrl(MSR_GS_BASE, a); vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ #else vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ #endif vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { u32 index = vmx_msr_index[i]; u32 data_low, data_high; int j = vmx->nmsrs; if (rdmsr_safe(index, &data_low, &data_high) < 0) continue; if (wrmsr_safe(index, data_low, data_high) < 0) continue; vmx->guest_msrs[j].index = i; vmx->guest_msrs[j].data = 0; vmx->guest_msrs[j].mask = -1ull; ++vmx->nmsrs; } vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); /* 22.2.1, 20.8.1 */ vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); set_cr4_guest_host_mask(vmx); if (vmx_xsaves_supported()) vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); return 0; } static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct msr_data apic_base_msr; u64 cr0; vmx->rmode.vm86_active = 0; vmx->soft_vnmi_blocked = 0; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(vcpu, 0); if (!init_event) { apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_reset_bsp(vcpu)) apic_base_msr.data |= MSR_IA32_APICBASE_BSP; apic_base_msr.host_initiated = true; kvm_set_apic_base(vcpu, &apic_base_msr); } vmx_segment_cache_clear(vmx); seg_setup(VCPU_SREG_CS); vmcs_write16(GUEST_CS_SELECTOR, 0xf000); vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); seg_setup(VCPU_SREG_DS); seg_setup(VCPU_SREG_ES); seg_setup(VCPU_SREG_FS); seg_setup(VCPU_SREG_GS); seg_setup(VCPU_SREG_SS); vmcs_write16(GUEST_TR_SELECTOR, 0); vmcs_writel(GUEST_TR_BASE, 0); vmcs_write32(GUEST_TR_LIMIT, 0xffff); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); vmcs_write16(GUEST_LDTR_SELECTOR, 0); vmcs_writel(GUEST_LDTR_BASE, 0); vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); if (!init_event) { vmcs_write32(GUEST_SYSENTER_CS, 0); vmcs_writel(GUEST_SYSENTER_ESP, 0); vmcs_writel(GUEST_SYSENTER_EIP, 0); vmcs_write64(GUEST_IA32_DEBUGCTL, 0); } vmcs_writel(GUEST_RFLAGS, 0x02); kvm_rip_write(vcpu, 0xfff0); vmcs_writel(GUEST_GDTR_BASE, 0); vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); vmcs_writel(GUEST_IDTR_BASE, 0); vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); setup_msrs(vmx); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ if (cpu_has_vmx_tpr_shadow() && !init_event) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); if (cpu_need_tpr_shadow(vcpu)) vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, __pa(vcpu->arch.apic->regs)); vmcs_write32(TPR_THRESHOLD, 0); } kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); if (kvm_vcpu_apicv_active(vcpu)) memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); if (vmx->vpid != 0) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; vmx->vcpu.arch.cr0 = cr0; vmx_set_cr0(vcpu, cr0); /* enter rmode */ vmx_set_cr4(vcpu, 0); vmx_set_efer(vcpu, 0); vmx_fpu_activate(vcpu); update_exception_bitmap(vcpu); vpid_sync_context(vmx->vpid); } /* * In nested virtualization, check if L1 asked to exit on external interrupts. * For most existing hypervisors, this will always return true. */ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->pin_based_vm_exec_control & PIN_BASED_EXT_INTR_MASK; } /* * In nested virtualization, check if L1 has set * VM_EXIT_ACK_INTR_ON_EXIT */ static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_ACK_INTR_ON_EXIT; } static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING; } static void enable_irq_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); } static void enable_nmi_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; if (!cpu_has_virtual_nmis() || vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { enable_irq_window(vcpu); return; } cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); } static void vmx_inject_irq(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); uint32_t intr; int irq = vcpu->arch.interrupt.nr; trace_kvm_inj_virq(irq); ++vcpu->stat.irq_injections; if (vmx->rmode.vm86_active) { int inc_eip = 0; if (vcpu->arch.interrupt.soft) inc_eip = vcpu->arch.event_exit_inst_len; if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } intr = irq | INTR_INFO_VALID_MASK; if (vcpu->arch.interrupt.soft) { intr |= INTR_TYPE_SOFT_INTR; vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); } else intr |= INTR_TYPE_EXT_INTR; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); } static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (is_guest_mode(vcpu)) return; if (!cpu_has_virtual_nmis()) { /* * Tracking the NMI-blocked state in software is built upon * finding the next open IRQ window. This, in turn, depends on * well-behaving guests: They have to keep IRQs disabled at * least as long as the NMI handler runs. Otherwise we may * cause NMI nesting, maybe breaking the guest. But as this is * highly unlikely, we can live with the residual risk. */ vmx->soft_vnmi_blocked = 1; vmx->vnmi_blocked_time = 0; } ++vcpu->stat.nmi_injections; vmx->nmi_known_unmasked = false; if (vmx->rmode.vm86_active) { if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); } static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) { if (!cpu_has_virtual_nmis()) return to_vmx(vcpu)->soft_vnmi_blocked; if (to_vmx(vcpu)->nmi_known_unmasked) return false; return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; } static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (!cpu_has_virtual_nmis()) { if (vmx->soft_vnmi_blocked != masked) { vmx->soft_vnmi_blocked = masked; vmx->vnmi_blocked_time = 0; } } else { vmx->nmi_known_unmasked = !masked; if (masked) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); else vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); } } static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) { if (to_vmx(vcpu)->nested.nested_run_pending) return 0; if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) return 0; return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | GUEST_INTR_STATE_NMI)); } static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) { return (!to_vmx(vcpu)->nested.nested_run_pending && vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); } static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { int ret; ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, PAGE_SIZE * 3); if (ret) return ret; kvm->arch.tss_addr = addr; return init_rmode_tss(kvm); } static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) { switch (vec) { case BP_VECTOR: /* * Update instruction length as we may reinject the exception * from user space while in guest debugging mode. */ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; /* fall through */ case DB_VECTOR: if (vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) return false; /* fall through */ case DE_VECTOR: case OF_VECTOR: case BR_VECTOR: case UD_VECTOR: case DF_VECTOR: case SS_VECTOR: case GP_VECTOR: case MF_VECTOR: return true; break; } return false; } static int handle_rmode_exception(struct kvm_vcpu *vcpu, int vec, u32 err_code) { /* * Instruction with address size override prefix opcode 0x67 * Cause the #SS fault with 0 error code in VM86 mode. */ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; return kvm_vcpu_halt(vcpu); } return 1; } return 0; } /* * Forward all other exceptions that are valid in real mode. * FIXME: Breaks guest debugging in real mode, needs to be fixed with * the required debugging infrastructure rework. */ kvm_queue_exception(vcpu, vec); return 1; } /* * Trigger machine check on the host. We assume all the MSRs are already set up * by the CPU and that we still run on the same CPU as the MCE occurred on. * We pass a fake environment to the machine check handler because we want * the guest to be always treated like user space, no matter what context * it used internally. */ static void kvm_machine_check(void) { #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) struct pt_regs regs = { .cs = 3, /* Fake ring 3 no matter what the guest ran on */ .flags = X86_EFLAGS_IF, }; do_machine_check(&regs, 0); #endif } static int handle_machine_check(struct kvm_vcpu *vcpu) { /* already handled by vcpu_run */ return 1; } static int handle_exception(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_run *kvm_run = vcpu->run; u32 intr_info, ex_no, error_code; unsigned long cr2, rip, dr6; u32 vect_info; enum emulation_result er; vect_info = vmx->idt_vectoring_info; intr_info = vmx->exit_intr_info; if (is_machine_check(intr_info)) return handle_machine_check(vcpu); if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) return 1; /* already handled by vmx_vcpu_run() */ if (is_no_device(intr_info)) { vmx_fpu_activate(vcpu); return 1; } if (is_invalid_opcode(intr_info)) { if (is_guest_mode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; } error_code = 0; if (intr_info & INTR_INFO_DELIVER_CODE_MASK) error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); /* * The #PF with PFEC.RSVD = 1 indicates the guest is accessing * MMIO, it is better to report an internal error. * See the comments in vmx_handle_exit. */ if ((vect_info & VECTORING_INFO_VALID_MASK) && !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; vcpu->run->internal.ndata = 3; vcpu->run->internal.data[0] = vect_info; vcpu->run->internal.data[1] = intr_info; vcpu->run->internal.data[2] = error_code; return 0; } if (is_page_fault(intr_info)) { /* EPT won't cause page fault directly */ BUG_ON(enable_ept); cr2 = vmcs_readl(EXIT_QUALIFICATION); trace_kvm_page_fault(cr2, error_code); if (kvm_event_needs_reinjection(vcpu)) kvm_mmu_unprotect_page_virt(vcpu, cr2); return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0); } ex_no = intr_info & INTR_INFO_VECTOR_MASK; if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) return handle_rmode_exception(vcpu, ex_no, error_code); switch (ex_no) { case AC_VECTOR: kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); return 1; case DB_VECTOR: dr6 = vmcs_readl(EXIT_QUALIFICATION); if (!(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; if (!(dr6 & ~DR6_RESERVED)) /* icebp */ skip_emulated_instruction(vcpu); kvm_queue_exception(vcpu, DB_VECTOR); return 1; } kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); /* fall through */ case BP_VECTOR: /* * Update instruction length as we may reinject #BP from * user space while in guest debugging mode. Reading it for * #DB as well causes no harm, it is not used in that case. */ vmx->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_run->exit_reason = KVM_EXIT_DEBUG; rip = kvm_rip_read(vcpu); kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; kvm_run->debug.arch.exception = ex_no; break; default: kvm_run->exit_reason = KVM_EXIT_EXCEPTION; kvm_run->ex.exception = ex_no; kvm_run->ex.error_code = error_code; break; } return 0; } static int handle_external_interrupt(struct kvm_vcpu *vcpu) { ++vcpu->stat.irq_exits; return 1; } static int handle_triple_fault(struct kvm_vcpu *vcpu) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } static int handle_io(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int size, in, string; unsigned port; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); string = (exit_qualification & 16) != 0; in = (exit_qualification & 8) != 0; ++vcpu->stat.io_exits; if (string || in) return emulate_instruction(vcpu, 0) == EMULATE_DONE; port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; skip_emulated_instruction(vcpu); return kvm_fast_pio_out(vcpu, size, port); } static void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) { /* * Patch in the VMCALL instruction: */ hypercall[0] = 0x0f; hypercall[1] = 0x01; hypercall[2] = 0xc1; } static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) { unsigned long always_on = VMXON_CR0_ALWAYSON; struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_UNRESTRICTED_GUEST && nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) always_on &= ~(X86_CR0_PE | X86_CR0_PG); return (val & always_on) == always_on; } /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned long orig_val = val; /* * We get here when L2 changed cr0 in a way that did not change * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), * but did change L0 shadowed bits. So we first calculate the * effective cr0 value that L1 would like to write into the * hardware. It consists of the L2-owned bits from the new * value combined with the L1-owned bits from L1's guest_cr0. */ val = (val & ~vmcs12->cr0_guest_host_mask) | (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); if (!nested_cr0_valid(vcpu, val)) return 1; if (kvm_set_cr0(vcpu, val)) return 1; vmcs_writel(CR0_READ_SHADOW, orig_val); return 0; } else { if (to_vmx(vcpu)->nested.vmxon && ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)) return 1; return kvm_set_cr0(vcpu, val); } } static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned long orig_val = val; /* analogously to handle_set_cr0 */ val = (val & ~vmcs12->cr4_guest_host_mask) | (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); if (kvm_set_cr4(vcpu, val)) return 1; vmcs_writel(CR4_READ_SHADOW, orig_val); return 0; } else return kvm_set_cr4(vcpu, val); } /* called to set cr0 as appropriate for clts instruction exit. */ static void handle_clts(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) { /* * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS * but we did (!fpu_active). We need to keep GUEST_CR0.TS on, * just pretend it's off (also in arch.cr0 for fpu_activate). */ vmcs_writel(CR0_READ_SHADOW, vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS); vcpu->arch.cr0 &= ~X86_CR0_TS; } else vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); } static int handle_cr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification, val; int cr; int reg; int err; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); cr = exit_qualification & 15; reg = (exit_qualification >> 8) & 15; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ val = kvm_register_readl(vcpu, reg); trace_kvm_cr_write(cr, val); switch (cr) { case 0: err = handle_set_cr0(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 3: err = kvm_set_cr3(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 4: err = handle_set_cr4(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 8: { u8 cr8_prev = kvm_get_cr8(vcpu); u8 cr8 = (u8)val; err = kvm_set_cr8(vcpu, cr8); kvm_complete_insn_gp(vcpu, err); if (lapic_in_kernel(vcpu)) return 1; if (cr8_prev <= cr8) return 1; vcpu->run->exit_reason = KVM_EXIT_SET_TPR; return 0; } } break; case 2: /* clts */ handle_clts(vcpu); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); skip_emulated_instruction(vcpu); vmx_fpu_activate(vcpu); return 1; case 1: /*mov from cr*/ switch (cr) { case 3: val = kvm_read_cr3(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); skip_emulated_instruction(vcpu); return 1; case 8: val = kvm_get_cr8(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); skip_emulated_instruction(vcpu); return 1; } break; case 3: /* lmsw */ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); kvm_lmsw(vcpu, val); skip_emulated_instruction(vcpu); return 1; default: break; } vcpu->run->exit_reason = 0; vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", (int)(exit_qualification >> 4) & 3, cr); return 0; } static int handle_dr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int dr, dr7, reg; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); dr = exit_qualification & DEBUG_REG_ACCESS_NUM; /* First, if DR does not exist, trigger UD */ if (!kvm_require_dr(vcpu, dr)) return 1; /* Do not handle if the CPL > 0, will trigger GP on re-entry */ if (!kvm_require_cpl(vcpu, 0)) return 1; dr7 = vmcs_readl(GUEST_DR7); if (dr7 & DR7_GD) { /* * As the vm-exit takes precedence over the debug trap, we * need to emulate the latter, either for the host or the * guest debugging itself. */ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; vcpu->run->debug.arch.dr7 = dr7; vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); vcpu->run->debug.arch.exception = DB_VECTOR; vcpu->run->exit_reason = KVM_EXIT_DEBUG; return 0; } else { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= DR6_BD | DR6_RTM; kvm_queue_exception(vcpu, DB_VECTOR); return 1; } } if (vcpu->guest_debug == 0) { vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); /* * No more DR vmexits; force a reload of the debug registers * and reenter on this instruction. The next vmexit will * retrieve the full state of the debug registers. */ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; return 1; } reg = DEBUG_REG_ACCESS_REG(exit_qualification); if (exit_qualification & TYPE_MOV_FROM_DR) { unsigned long val; if (kvm_get_dr(vcpu, dr, &val)) return 1; kvm_register_write(vcpu, reg, val); } else if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) return 1; skip_emulated_instruction(vcpu); return 1; } static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) { return vcpu->arch.dr6; } static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) { } static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) { get_debugreg(vcpu->arch.db[0], 0); get_debugreg(vcpu->arch.db[1], 1); get_debugreg(vcpu->arch.db[2], 2); get_debugreg(vcpu->arch.db[3], 3); get_debugreg(vcpu->arch.dr6, 6); vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); } static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) { vmcs_writel(GUEST_DR7, val); } static int handle_cpuid(struct kvm_vcpu *vcpu) { kvm_emulate_cpuid(vcpu); return 1; } static int handle_rdmsr(struct kvm_vcpu *vcpu) { u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; struct msr_data msr_info; msr_info.index = ecx; msr_info.host_initiated = false; if (vmx_get_msr(vcpu, &msr_info)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(vcpu, 0); return 1; } trace_kvm_msr_read(ecx, msr_info.data); /* FIXME: handling of bits 32:63 of rax, rdx */ vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; skip_emulated_instruction(vcpu); return 1; } static int handle_wrmsr(struct kvm_vcpu *vcpu) { struct msr_data msr; u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); msr.data = data; msr.index = ecx; msr.host_initiated = false; if (kvm_set_msr(vcpu, &msr) != 0) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(vcpu, 0); return 1; } trace_kvm_msr_write(ecx, data); skip_emulated_instruction(vcpu); return 1; } static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) { kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; } static int handle_interrupt_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; /* clear pending irq */ cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); kvm_make_request(KVM_REQ_EVENT, vcpu); ++vcpu->stat.irq_window_exits; return 1; } static int handle_halt(struct kvm_vcpu *vcpu) { return kvm_emulate_halt(vcpu); } static int handle_vmcall(struct kvm_vcpu *vcpu) { return kvm_emulate_hypercall(vcpu); } static int handle_invd(struct kvm_vcpu *vcpu) { return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_invlpg(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); kvm_mmu_invlpg(vcpu, exit_qualification); skip_emulated_instruction(vcpu); return 1; } static int handle_rdpmc(struct kvm_vcpu *vcpu) { int err; err = kvm_rdpmc(vcpu); kvm_complete_insn_gp(vcpu, err); return 1; } static int handle_wbinvd(struct kvm_vcpu *vcpu) { kvm_emulate_wbinvd(vcpu); return 1; } static int handle_xsetbv(struct kvm_vcpu *vcpu) { u64 new_bv = kvm_read_edx_eax(vcpu); u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); if (kvm_set_xcr(vcpu, index, new_bv) == 0) skip_emulated_instruction(vcpu); return 1; } static int handle_xsaves(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); WARN(1, "this should never happen\n"); return 1; } static int handle_xrstors(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); WARN(1, "this should never happen\n"); return 1; } static int handle_apic_access(struct kvm_vcpu *vcpu) { if (likely(fasteoi)) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int access_type, offset; access_type = exit_qualification & APIC_ACCESS_TYPE; offset = exit_qualification & APIC_ACCESS_OFFSET; /* * Sane guest uses MOV to write EOI, with written value * not cared. So make a short-circuit here by avoiding * heavy instruction emulation. */ if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && (offset == APIC_EOI)) { kvm_lapic_set_eoi(vcpu); skip_emulated_instruction(vcpu); return 1; } } return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int vector = exit_qualification & 0xff; /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ kvm_apic_set_eoi_accelerated(vcpu, vector); return 1; } static int handle_apic_write(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 offset = exit_qualification & 0xfff; /* APIC-write VM exit is trap-like and thus no need to adjust IP */ kvm_apic_write_nodecode(vcpu, offset); return 1; } static int handle_task_switch(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qualification; bool has_error_code = false; u32 error_code = 0; u16 tss_selector; int reason, type, idt_v, idt_index; idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); reason = (u32)exit_qualification >> 30; if (reason == TASK_SWITCH_GATE && idt_v) { switch (type) { case INTR_TYPE_NMI_INTR: vcpu->arch.nmi_injected = false; vmx_set_nmi_mask(vcpu, true); break; case INTR_TYPE_EXT_INTR: case INTR_TYPE_SOFT_INTR: kvm_clear_interrupt_queue(vcpu); break; case INTR_TYPE_HARD_EXCEPTION: if (vmx->idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { has_error_code = true; error_code = vmcs_read32(IDT_VECTORING_ERROR_CODE); } /* fall through */ case INTR_TYPE_SOFT_EXCEPTION: kvm_clear_exception_queue(vcpu); break; default: break; } } tss_selector = exit_qualification; if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && type != INTR_TYPE_EXT_INTR && type != INTR_TYPE_NMI_INTR)) skip_emulated_instruction(vcpu); if (kvm_task_switch(vcpu, tss_selector, type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, has_error_code, error_code) == EMULATE_FAIL) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; return 0; } /* * TODO: What about debug traps on tss switch? * Are we supposed to inject them and update dr6? */ return 1; } static int handle_ept_violation(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; gpa_t gpa; u32 error_code; int gla_validity; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); gla_validity = (exit_qualification >> 7) & 0x3; if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) { printk(KERN_ERR "EPT: Handling EPT violation failed!\n"); printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), vmcs_readl(GUEST_LINEAR_ADDRESS)); printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", (long unsigned int)exit_qualification); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; return 0; } /* * EPT violation happened while executing iret from NMI, * "blocked by NMI" bit has to be set before next VM entry. * There are errata that may cause this bit to not be set: * AAK134, BY25. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); trace_kvm_page_fault(gpa, exit_qualification); /* It is a write fault? */ error_code = exit_qualification & PFERR_WRITE_MASK; /* It is a fetch fault? */ error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK; /* ept page table is present? */ error_code |= (exit_qualification >> 3) & PFERR_PRESENT_MASK; vcpu->arch.exit_qualification = exit_qualification; return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); } static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { int ret; gpa_t gpa; gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { skip_emulated_instruction(vcpu); trace_kvm_fast_mmio(gpa); return 1; } ret = handle_mmio_page_fault(vcpu, gpa, true); if (likely(ret == RET_MMIO_PF_EMULATE)) return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == EMULATE_DONE; if (unlikely(ret == RET_MMIO_PF_INVALID)) return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0); if (unlikely(ret == RET_MMIO_PF_RETRY)) return 1; /* It is the real ept misconfig */ WARN_ON(1); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; return 0; } static int handle_nmi_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; /* clear pending NMI */ cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); ++vcpu->stat.nmi_window_exits; kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; } static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); enum emulation_result err = EMULATE_DONE; int ret = 1; u32 cpu_exec_ctrl; bool intr_window_requested; unsigned count = 130; cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; while (vmx->emulation_required && count-- != 0) { if (intr_window_requested && vmx_interrupt_allowed(vcpu)) return handle_interrupt_window(&vmx->vcpu); if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) return 1; err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); if (err == EMULATE_USER_EXIT) { ++vcpu->stat.mmio_exits; ret = 0; goto out; } if (err != EMULATE_DONE) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; return 0; } if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; ret = kvm_vcpu_halt(vcpu); goto out; } if (signal_pending(current)) goto out; if (need_resched()) schedule(); } out: return ret; } static int __grow_ple_window(int val) { if (ple_window_grow < 1) return ple_window; val = min(val, ple_window_actual_max); if (ple_window_grow < ple_window) val *= ple_window_grow; else val += ple_window_grow; return val; } static int __shrink_ple_window(int val, int modifier, int minimum) { if (modifier < 1) return ple_window; if (modifier < ple_window) val /= modifier; else val -= modifier; return max(val, minimum); } static void grow_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int old = vmx->ple_window; vmx->ple_window = __grow_ple_window(old); if (vmx->ple_window != old) vmx->ple_window_dirty = true; trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); } static void shrink_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int old = vmx->ple_window; vmx->ple_window = __shrink_ple_window(old, ple_window_shrink, ple_window); if (vmx->ple_window != old) vmx->ple_window_dirty = true; trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); } /* * ple_window_actual_max is computed to be one grow_ple_window() below * ple_window_max. (See __grow_ple_window for the reason.) * This prevents overflows, because ple_window_max is int. * ple_window_max effectively rounded down to a multiple of ple_window_grow in * this process. * ple_window_max is also prevented from setting vmx->ple_window < ple_window. */ static void update_ple_window_actual_max(void) { ple_window_actual_max = __shrink_ple_window(max(ple_window_max, ple_window), ple_window_grow, INT_MIN); } /* * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. */ static void wakeup_handler(void) { struct kvm_vcpu *vcpu; int cpu = smp_processor_id(); spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), blocked_vcpu_list) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); if (pi_test_on(pi_desc) == 1) kvm_vcpu_kick(vcpu); } spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); } static __init int hardware_setup(void) { int r = -ENOMEM, i, msr; rdmsrl_safe(MSR_EFER, &host_efer); for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) kvm_define_shared_msr(i, vmx_msr_index[i]); vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_io_bitmap_a) return r; vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_io_bitmap_b) goto out; vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_legacy) goto out1; vmx_msr_bitmap_legacy_x2apic = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_legacy_x2apic) goto out2; vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_longmode) goto out3; vmx_msr_bitmap_longmode_x2apic = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_longmode_x2apic) goto out4; if (nested) { vmx_msr_bitmap_nested = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_nested) goto out5; } vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_vmread_bitmap) goto out6; vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_vmwrite_bitmap) goto out7; memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); /* * Allow direct access to the PC debug port (it is often used for I/O * delays, but the vmexits simply slow things down). */ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); clear_bit(0x80, vmx_io_bitmap_a); memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); if (nested) memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE); if (setup_vmcs_config(&vmcs_config) < 0) { r = -EIO; goto out8; } if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); if (!cpu_has_vmx_vpid()) enable_vpid = 0; if (!cpu_has_vmx_shadow_vmcs()) enable_shadow_vmcs = 0; if (enable_shadow_vmcs) init_vmcs_shadow_fields(); if (!cpu_has_vmx_ept() || !cpu_has_vmx_ept_4levels()) { enable_ept = 0; enable_unrestricted_guest = 0; enable_ept_ad_bits = 0; } if (!cpu_has_vmx_ept_ad_bits()) enable_ept_ad_bits = 0; if (!cpu_has_vmx_unrestricted_guest()) enable_unrestricted_guest = 0; if (!cpu_has_vmx_flexpriority()) flexpriority_enabled = 0; /* * set_apic_access_page_addr() is used to reload apic access * page upon invalidation. No need to do anything if not * using the APIC_ACCESS_ADDR VMCS field. */ if (!flexpriority_enabled) kvm_x86_ops->set_apic_access_page_addr = NULL; if (!cpu_has_vmx_tpr_shadow()) kvm_x86_ops->update_cr8_intercept = NULL; if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); if (!cpu_has_vmx_ple()) ple_gap = 0; if (!cpu_has_vmx_apicv()) enable_apicv = 0; if (cpu_has_vmx_tsc_scaling()) { kvm_has_tsc_control = true; kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; kvm_tsc_scaling_ratio_frac_bits = 48; } vmx_disable_intercept_for_msr(MSR_FS_BASE, false); vmx_disable_intercept_for_msr(MSR_GS_BASE, false); vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); memcpy(vmx_msr_bitmap_legacy_x2apic, vmx_msr_bitmap_legacy, PAGE_SIZE); memcpy(vmx_msr_bitmap_longmode_x2apic, vmx_msr_bitmap_longmode, PAGE_SIZE); set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ if (enable_apicv) { for (msr = 0x800; msr <= 0x8ff; msr++) vmx_disable_intercept_msr_read_x2apic(msr); /* According SDM, in x2apic mode, the whole id reg is used. * But in KVM, it only use the highest eight bits. Need to * intercept it */ vmx_enable_intercept_msr_read_x2apic(0x802); /* TMCCT */ vmx_enable_intercept_msr_read_x2apic(0x839); /* TPR */ vmx_disable_intercept_msr_write_x2apic(0x808); /* EOI */ vmx_disable_intercept_msr_write_x2apic(0x80b); /* SELF-IPI */ vmx_disable_intercept_msr_write_x2apic(0x83f); } if (enable_ept) { kvm_mmu_set_mask_ptes(0ull, (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull, (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull, 0ull, VMX_EPT_EXECUTABLE_MASK); ept_set_mmio_spte_mask(); kvm_enable_tdp(); } else kvm_disable_tdp(); update_ple_window_actual_max(); /* * Only enable PML when hardware supports PML feature, and both EPT * and EPT A/D bit features are enabled -- PML depends on them to work. */ if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) enable_pml = 0; if (!enable_pml) { kvm_x86_ops->slot_enable_log_dirty = NULL; kvm_x86_ops->slot_disable_log_dirty = NULL; kvm_x86_ops->flush_log_dirty = NULL; kvm_x86_ops->enable_log_dirty_pt_masked = NULL; } kvm_set_posted_intr_wakeup_handler(wakeup_handler); return alloc_kvm_area(); out8: free_page((unsigned long)vmx_vmwrite_bitmap); out7: free_page((unsigned long)vmx_vmread_bitmap); out6: if (nested) free_page((unsigned long)vmx_msr_bitmap_nested); out5: free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); out4: free_page((unsigned long)vmx_msr_bitmap_longmode); out3: free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic); out2: free_page((unsigned long)vmx_msr_bitmap_legacy); out1: free_page((unsigned long)vmx_io_bitmap_b); out: free_page((unsigned long)vmx_io_bitmap_a); return r; } static __exit void hardware_unsetup(void) { free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic); free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); free_page((unsigned long)vmx_msr_bitmap_legacy); free_page((unsigned long)vmx_msr_bitmap_longmode); free_page((unsigned long)vmx_io_bitmap_b); free_page((unsigned long)vmx_io_bitmap_a); free_page((unsigned long)vmx_vmwrite_bitmap); free_page((unsigned long)vmx_vmread_bitmap); if (nested) free_page((unsigned long)vmx_msr_bitmap_nested); free_kvm_area(); } /* * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE * exiting, so only get here on cpu with PAUSE-Loop-Exiting. */ static int handle_pause(struct kvm_vcpu *vcpu) { if (ple_gap) grow_ple_window(vcpu); skip_emulated_instruction(vcpu); kvm_vcpu_on_spin(vcpu); return 1; } static int handle_nop(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); return 1; } static int handle_mwait(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); return handle_nop(vcpu); } static int handle_monitor_trap(struct kvm_vcpu *vcpu) { return 1; } static int handle_monitor(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); return handle_nop(vcpu); } /* * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12. * We could reuse a single VMCS for all the L2 guests, but we also want the * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this * allows keeping them loaded on the processor, and in the future will allow * optimizations where prepare_vmcs02 doesn't need to set all the fields on * every entry if they never change. * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first. * * The following functions allocate and free a vmcs02 in this pool. */ /* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) { struct vmcs02_list *item; list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) if (item->vmptr == vmx->nested.current_vmptr) { list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; } if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { /* Recycle the least recently used VMCS. */ item = list_last_entry(&vmx->nested.vmcs02_pool, struct vmcs02_list, list); item->vmptr = vmx->nested.current_vmptr; list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; } /* Create a new VMCS */ item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); if (!item) return NULL; item->vmcs02.vmcs = alloc_vmcs(); if (!item->vmcs02.vmcs) { kfree(item); return NULL; } loaded_vmcs_init(&item->vmcs02); item->vmptr = vmx->nested.current_vmptr; list_add(&(item->list), &(vmx->nested.vmcs02_pool)); vmx->nested.vmcs02_num++; return &item->vmcs02; } /* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) { struct vmcs02_list *item; list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) if (item->vmptr == vmptr) { free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); vmx->nested.vmcs02_num--; return; } } /* * Free all VMCSs saved for this vcpu, except the one pointed by * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs * must be &vmx->vmcs01. */ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) { struct vmcs02_list *item, *n; WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { /* * Something will leak if the above WARN triggers. Better than * a use-after-free. */ if (vmx->loaded_vmcs == &item->vmcs02) continue; free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); vmx->nested.vmcs02_num--; } } /* * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), * set the success or error code of an emulated VMX instruction, as specified * by Vol 2B, VMX Instruction Reference, "Conventions". */ static void nested_vmx_succeed(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); } static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_CF); } static void nested_vmx_failValid(struct kvm_vcpu *vcpu, u32 vm_instruction_error) { if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { /* * failValid writes the error number to the current VMCS, which * can't be done there isn't a current VMCS. */ nested_vmx_failInvalid(vcpu); return; } vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_ZF); get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; /* * We don't need to force a shadow sync because * VM_INSTRUCTION_ERROR is not shadowed */ } static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) { /* TODO: not to reset guest simply here. */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); pr_warn("kvm: nested vmx abort, indicator %d\n", indicator); } static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) { struct vcpu_vmx *vmx = container_of(timer, struct vcpu_vmx, nested.preemption_timer); vmx->nested.preemption_timer_expired = true; kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); kvm_vcpu_kick(&vmx->vcpu); return HRTIMER_NORESTART; } /* * Decode the memory-address operand of a vmx instruction, as recorded on an * exit caused by such an instruction (run by a guest hypervisor). * On success, returns 0. When the operand is invalid, returns 1 and throws * #UD or #GP. */ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, gva_t *ret) { gva_t off; bool exn; struct kvm_segment s; /* * According to Vol. 3B, "Information for VM Exits Due to Instruction * Execution", on an exit, vmx_instruction_info holds most of the * addressing components of the operand. Only the displacement part * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). * For how an actual address is calculated from all these components, * refer to Vol. 1, "Operand Addressing". */ int scaling = vmx_instruction_info & 3; int addr_size = (vmx_instruction_info >> 7) & 7; bool is_reg = vmx_instruction_info & (1u << 10); int seg_reg = (vmx_instruction_info >> 15) & 7; int index_reg = (vmx_instruction_info >> 18) & 0xf; bool index_is_valid = !(vmx_instruction_info & (1u << 22)); int base_reg = (vmx_instruction_info >> 23) & 0xf; bool base_is_valid = !(vmx_instruction_info & (1u << 27)); if (is_reg) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } /* Addr = segment_base + offset */ /* offset = base + [index * scale] + displacement */ off = exit_qualification; /* holds the displacement */ if (base_is_valid) off += kvm_register_read(vcpu, base_reg); if (index_is_valid) off += kvm_register_read(vcpu, index_reg)<<scaling; vmx_get_segment(vcpu, &s, seg_reg); *ret = s.base + off; if (addr_size == 1) /* 32 bit */ *ret &= 0xffffffff; /* Checks for #GP/#SS exceptions. */ exn = false; if (is_protmode(vcpu)) { /* Protected mode: apply checks for segment validity in the * following order: * - segment type check (#GP(0) may be thrown) * - usability check (#GP(0)/#SS(0)) * - limit check (#GP(0)/#SS(0)) */ if (wr) /* #GP(0) if the destination operand is located in a * read-only data segment or any code segment. */ exn = ((s.type & 0xa) == 0 || (s.type & 8)); else /* #GP(0) if the source operand is located in an * execute-only code segment */ exn = ((s.type & 0xa) == 8); } if (exn) { kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return 1; } if (is_long_mode(vcpu)) { /* Long mode: #GP(0)/#SS(0) if the memory address is in a * non-canonical form. This is an only check for long mode. */ exn = is_noncanonical_address(*ret); } else if (is_protmode(vcpu)) { /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. */ exn = (s.unusable != 0); /* Protected mode: #GP(0)/#SS(0) if the memory * operand is outside the segment limit. */ exn = exn || (off + sizeof(u64) > s.limit); } if (exn) { kvm_queue_exception_e(vcpu, seg_reg == VCPU_SREG_SS ? SS_VECTOR : GP_VECTOR, 0); return 1; } return 0; } /* * This function performs the various checks including * - if it's 4KB aligned * - No bits beyond the physical address width are set * - Returns 0 on success or else 1 * (Intel SDM Section 30.3) */ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, gpa_t *vmpointer) { gva_t gva; gpa_t vmptr; struct x86_exception e; struct page *page; struct vcpu_vmx *vmx = to_vmx(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu); if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, sizeof(vmptr), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (exit_reason) { case EXIT_REASON_VMON: /* * SDM 3: 24.11.5 * The first 4 bytes of VMXON region contain the supported * VMCS revision identifier * * Note - IA32_VMX_BASIC[48] will never be 1 * for the nested case; * which replaces physical address width with 32 * */ if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failInvalid(vcpu); skip_emulated_instruction(vcpu); return 1; } page = nested_get_page(vcpu, vmptr); if (page == NULL || *(u32 *)kmap(page) != VMCS12_REVISION) { nested_vmx_failInvalid(vcpu); kunmap(page); skip_emulated_instruction(vcpu); return 1; } kunmap(page); vmx->nested.vmxon_ptr = vmptr; break; case EXIT_REASON_VMCLEAR: if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); skip_emulated_instruction(vcpu); return 1; } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); skip_emulated_instruction(vcpu); return 1; } break; case EXIT_REASON_VMPTRLD: if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); skip_emulated_instruction(vcpu); return 1; } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); skip_emulated_instruction(vcpu); return 1; } break; default: return 1; /* shouldn't happen */ } if (vmpointer) *vmpointer = vmptr; return 0; } /* * Emulate the VMXON instruction. * Currently, we just remember that VMX is active, and do not save or even * inspect the argument to VMXON (the so-called "VMXON pointer") because we * do not currently need to store anything in that guest-allocated memory * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their * argument is different from the VMXON pointer (which the spec says they do). */ static int handle_vmon(struct kvm_vcpu *vcpu) { struct kvm_segment cs; struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs *shadow_vmcs; const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; /* The Intel VMX Instruction Reference lists a bunch of bits that * are prerequisite to running VMXON, most notably cr4.VMXE must be * set to 1 (see vmx_set_cr4() for when we allow the guest to set this). * Otherwise, we should fail with #UD. We test these now: */ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) || !kvm_read_cr0_bits(vcpu, X86_CR0_PE) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); if (is_long_mode(vcpu) && !cs.l) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); return 1; } if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) return 1; if (vmx->nested.vmxon) { nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); skip_emulated_instruction(vcpu); return 1; } if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); return 1; } if (enable_shadow_vmcs) { shadow_vmcs = alloc_vmcs(); if (!shadow_vmcs) return -ENOMEM; /* mark vmcs as shadow */ shadow_vmcs->revision_id |= (1u << 31); /* init shadow vmcs */ vmcs_clear(shadow_vmcs); vmx->nested.current_shadow_vmcs = shadow_vmcs; } INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); vmx->nested.vmcs02_num = 0; hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; vmx->nested.vmxon = true; skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); return 1; } /* * Intel's VMX Instruction Reference specifies a common set of prerequisites * for running VMX instructions (except VMXON, whose prerequisites are * slightly different). It also specifies what exception to inject otherwise. */ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) { struct kvm_segment cs; struct vcpu_vmx *vmx = to_vmx(vcpu); if (!vmx->nested.vmxon) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) || (is_long_mode(vcpu) && !cs.l)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); return 0; } return 1; } static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) { if (vmx->nested.current_vmptr == -1ull) return; /* current_vmptr and current_vmcs12 are always set/reset together */ if (WARN_ON(vmx->nested.current_vmcs12 == NULL)) return; if (enable_shadow_vmcs) { /* copy to memory all shadowed fields in case they were modified */ copy_shadow_to_vmcs12(vmx); vmx->nested.sync_shadow_vmcs = false; vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); vmcs_write64(VMCS_LINK_POINTER, -1ull); } vmx->nested.posted_intr_nv = -1; kunmap(vmx->nested.current_vmcs12_page); nested_release_page(vmx->nested.current_vmcs12_page); vmx->nested.current_vmptr = -1ull; vmx->nested.current_vmcs12 = NULL; } /* * Free whatever needs to be freed from vmx->nested when L1 goes down, or * just stops using VMX. */ static void free_nested(struct vcpu_vmx *vmx) { if (!vmx->nested.vmxon) return; vmx->nested.vmxon = false; free_vpid(vmx->nested.vpid02); nested_release_vmcs12(vmx); if (enable_shadow_vmcs) free_vmcs(vmx->nested.current_shadow_vmcs); /* Unpin physical memory we referred to in current vmcs02 */ if (vmx->nested.apic_access_page) { nested_release_page(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { nested_release_page(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } if (vmx->nested.pi_desc_page) { kunmap(vmx->nested.pi_desc_page); nested_release_page(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; vmx->nested.pi_desc = NULL; } nested_free_all_saved_vmcss(vmx); } /* Emulate the VMXOFF instruction */ static int handle_vmoff(struct kvm_vcpu *vcpu) { if (!nested_vmx_check_permission(vcpu)) return 1; free_nested(to_vmx(vcpu)); skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); return 1; } /* Emulate the VMCLEAR instruction */ static int handle_vmclear(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; struct vmcs12 *vmcs12; struct page *page; if (!nested_vmx_check_permission(vcpu)) return 1; if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) return 1; if (vmptr == vmx->nested.current_vmptr) nested_release_vmcs12(vmx); page = nested_get_page(vcpu, vmptr); if (page == NULL) { /* * For accurate processor emulation, VMCLEAR beyond available * physical memory should do nothing at all. However, it is * possible that a nested vmx bug, not a guest hypervisor bug, * resulted in this case, so let's shut down before doing any * more damage: */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 1; } vmcs12 = kmap(page); vmcs12->launch_state = 0; kunmap(page); nested_release_page(page); nested_free_vmcs02(vmx, vmptr); skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); return 1; } static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); /* Emulate the VMLAUNCH instruction */ static int handle_vmlaunch(struct kvm_vcpu *vcpu) { return nested_vmx_run(vcpu, true); } /* Emulate the VMRESUME instruction */ static int handle_vmresume(struct kvm_vcpu *vcpu) { return nested_vmx_run(vcpu, false); } enum vmcs_field_type { VMCS_FIELD_TYPE_U16 = 0, VMCS_FIELD_TYPE_U64 = 1, VMCS_FIELD_TYPE_U32 = 2, VMCS_FIELD_TYPE_NATURAL_WIDTH = 3 }; static inline int vmcs_field_type(unsigned long field) { if (0x1 & field) /* the *_HIGH fields are all 32 bit */ return VMCS_FIELD_TYPE_U32; return (field >> 13) & 0x3 ; } static inline int vmcs_field_readonly(unsigned long field) { return (((field >> 10) & 0x3) == 1); } /* * Read a vmcs12 field. Since these can have varying lengths and we return * one type, we chose the biggest type (u64) and zero-extend the return value * to that size. Note that the caller, handle_vmread, might need to use only * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of * 64-bit fields are to be returned). */ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu, unsigned long field, u64 *ret) { short offset = vmcs_field_to_offset(field); char *p; if (offset < 0) return offset; p = ((char *)(get_vmcs12(vcpu))) + offset; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_NATURAL_WIDTH: *ret = *((natural_width *)p); return 0; case VMCS_FIELD_TYPE_U16: *ret = *((u16 *)p); return 0; case VMCS_FIELD_TYPE_U32: *ret = *((u32 *)p); return 0; case VMCS_FIELD_TYPE_U64: *ret = *((u64 *)p); return 0; default: WARN_ON(1); return -ENOENT; } } static inline int vmcs12_write_any(struct kvm_vcpu *vcpu, unsigned long field, u64 field_value){ short offset = vmcs_field_to_offset(field); char *p = ((char *) get_vmcs12(vcpu)) + offset; if (offset < 0) return offset; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: *(u16 *)p = field_value; return 0; case VMCS_FIELD_TYPE_U32: *(u32 *)p = field_value; return 0; case VMCS_FIELD_TYPE_U64: *(u64 *)p = field_value; return 0; case VMCS_FIELD_TYPE_NATURAL_WIDTH: *(natural_width *)p = field_value; return 0; default: WARN_ON(1); return -ENOENT; } } static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) { int i; unsigned long field; u64 field_value; struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; const unsigned long *fields = shadow_read_write_fields; const int num_fields = max_shadow_read_write_fields; preempt_disable(); vmcs_load(shadow_vmcs); for (i = 0; i < num_fields; i++) { field = fields[i]; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: field_value = vmcs_read16(field); break; case VMCS_FIELD_TYPE_U32: field_value = vmcs_read32(field); break; case VMCS_FIELD_TYPE_U64: field_value = vmcs_read64(field); break; case VMCS_FIELD_TYPE_NATURAL_WIDTH: field_value = vmcs_readl(field); break; default: WARN_ON(1); continue; } vmcs12_write_any(&vmx->vcpu, field, field_value); } vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); preempt_enable(); } static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) { const unsigned long *fields[] = { shadow_read_write_fields, shadow_read_only_fields }; const int max_fields[] = { max_shadow_read_write_fields, max_shadow_read_only_fields }; int i, q; unsigned long field; u64 field_value = 0; struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; vmcs_load(shadow_vmcs); for (q = 0; q < ARRAY_SIZE(fields); q++) { for (i = 0; i < max_fields[q]; i++) { field = fields[q][i]; vmcs12_read_any(&vmx->vcpu, field, &field_value); switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: vmcs_write16(field, (u16)field_value); break; case VMCS_FIELD_TYPE_U32: vmcs_write32(field, (u32)field_value); break; case VMCS_FIELD_TYPE_U64: vmcs_write64(field, (u64)field_value); break; case VMCS_FIELD_TYPE_NATURAL_WIDTH: vmcs_writel(field, (long)field_value); break; default: WARN_ON(1); break; } } } vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); } /* * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was * used before) all generate the same failure when it is missing. */ static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmx->nested.current_vmptr == -1ull) { nested_vmx_failInvalid(vcpu); skip_emulated_instruction(vcpu); return 0; } return 1; } static int handle_vmread(struct kvm_vcpu *vcpu) { unsigned long field; u64 field_value; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t gva = 0; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) return 1; /* Decode instruction info and find the field to read */ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); /* Read the field, zero-extended to a u64 field_value */ if (vmcs12_read_any(vcpu, field, &field_value) < 0) { nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); skip_emulated_instruction(vcpu); return 1; } /* * Now copy part of this value to register or memory, as requested. * Note that the number of bits actually copied is 32 or 64 depending * on the guest's mode (32 or 64 bit), not on the given field's length. */ if (vmx_instruction_info & (1u << 10)) { kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), field_value); } else { if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &gva)) return 1; /* _system ok, as nested_vmx_check_permission verified cpl=0 */ kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } static int handle_vmwrite(struct kvm_vcpu *vcpu) { unsigned long field; gva_t gva; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); /* The value to write might be 32 or 64 bits, depending on L1's long * mode, and eventually we need to write that into a field of several * possible lengths. The code below first zero-extends the value to 64 * bit (field_value), and then copies only the appropriate number of * bits into the vmcs12 field. */ u64 field_value = 0; struct x86_exception e; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) return 1; if (vmx_instruction_info & (1u << 10)) field_value = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 3) & 0xf)); else { if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } } field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); if (vmcs_field_readonly(field)) { nested_vmx_failValid(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); skip_emulated_instruction(vcpu); return 1; } if (vmcs12_write_any(vcpu, field, field_value) < 0) { nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); skip_emulated_instruction(vcpu); return 1; } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } /* Emulate the VMPTRLD instruction */ static int handle_vmptrld(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; if (!nested_vmx_check_permission(vcpu)) return 1; if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) return 1; if (vmx->nested.current_vmptr != vmptr) { struct vmcs12 *new_vmcs12; struct page *page; page = nested_get_page(vcpu, vmptr); if (page == NULL) { nested_vmx_failInvalid(vcpu); skip_emulated_instruction(vcpu); return 1; } new_vmcs12 = kmap(page); if (new_vmcs12->revision_id != VMCS12_REVISION) { kunmap(page); nested_release_page_clean(page); nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); skip_emulated_instruction(vcpu); return 1; } nested_release_vmcs12(vmx); vmx->nested.current_vmptr = vmptr; vmx->nested.current_vmcs12 = new_vmcs12; vmx->nested.current_vmcs12_page = page; if (enable_shadow_vmcs) { vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); vmcs_write64(VMCS_LINK_POINTER, __pa(vmx->nested.current_shadow_vmcs)); vmx->nested.sync_shadow_vmcs = true; } } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } /* Emulate the VMPTRST instruction */ static int handle_vmptrst(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t vmcs_gva; struct x86_exception e; if (!nested_vmx_check_permission(vcpu)) return 1; if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &vmcs_gva)) return 1; /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, (void *)&to_vmx(vcpu)->nested.current_vmptr, sizeof(u64), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } /* Emulate the INVEPT instruction */ static int handle_invept(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 vmx_instruction_info, types; unsigned long type; gva_t gva; struct x86_exception e; struct { u64 eptp, gpa; } operand; if (!(vmx->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) || !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (!nested_vmx_check_permission(vcpu)) return 1; if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; if (!(types & (1UL << type))) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); skip_emulated_instruction(vcpu); return 1; } /* According to the Intel VMX instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (type) { case VMX_EPT_EXTENT_GLOBAL: kvm_mmu_sync_roots(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); nested_vmx_succeed(vcpu); break; default: /* Trap single context invalidation invept calls */ BUG_ON(1); break; } skip_emulated_instruction(vcpu); return 1; } static int handle_invvpid(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 vmx_instruction_info; unsigned long type, types; gva_t gva; struct x86_exception e; int vpid; if (!(vmx->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_VPID) || !(vmx->nested.nested_vmx_vpid_caps & VMX_VPID_INVVPID_BIT)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (!nested_vmx_check_permission(vcpu)) return 1; vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7; if (!(types & (1UL << type))) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); skip_emulated_instruction(vcpu); return 1; } /* according to the intel vmx instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid, sizeof(u32), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (type) { case VMX_VPID_EXTENT_SINGLE_CONTEXT: /* * Old versions of KVM use the single-context version so we * have to support it; just treat it the same as all-context. */ case VMX_VPID_EXTENT_ALL_CONTEXT: __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); nested_vmx_succeed(vcpu); break; default: /* Trap individual address invalidation invvpid calls */ BUG_ON(1); break; } skip_emulated_instruction(vcpu); return 1; } static int handle_pml_full(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; trace_kvm_pml_full(vcpu->vcpu_id); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); /* * PML buffer FULL happened while executing iret from NMI, * "blocked by NMI" bit has to be set before next VM entry. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); /* * PML buffer already flushed at beginning of VMEXIT. Nothing to do * here.., and there's no userspace involvement needed for PML. */ return 1; } static int handle_pcommit(struct kvm_vcpu *vcpu) { /* we never catch pcommit instruct for L1 guest. */ WARN_ON(1); return 1; } /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs * to be done to userspace and return 0. */ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_EXCEPTION_NMI] = handle_exception, [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, [EXIT_REASON_IO_INSTRUCTION] = handle_io, [EXIT_REASON_CR_ACCESS] = handle_cr, [EXIT_REASON_DR_ACCESS] = handle_dr, [EXIT_REASON_CPUID] = handle_cpuid, [EXIT_REASON_MSR_READ] = handle_rdmsr, [EXIT_REASON_MSR_WRITE] = handle_wrmsr, [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, [EXIT_REASON_HLT] = handle_halt, [EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_RDPMC] = handle_rdpmc, [EXIT_REASON_VMCALL] = handle_vmcall, [EXIT_REASON_VMCLEAR] = handle_vmclear, [EXIT_REASON_VMLAUNCH] = handle_vmlaunch, [EXIT_REASON_VMPTRLD] = handle_vmptrld, [EXIT_REASON_VMPTRST] = handle_vmptrst, [EXIT_REASON_VMREAD] = handle_vmread, [EXIT_REASON_VMRESUME] = handle_vmresume, [EXIT_REASON_VMWRITE] = handle_vmwrite, [EXIT_REASON_VMOFF] = handle_vmoff, [EXIT_REASON_VMON] = handle_vmon, [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_APIC_WRITE] = handle_apic_write, [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, [EXIT_REASON_WBINVD] = handle_wbinvd, [EXIT_REASON_XSETBV] = handle_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, [EXIT_REASON_INVEPT] = handle_invept, [EXIT_REASON_INVVPID] = handle_invvpid, [EXIT_REASON_XSAVES] = handle_xsaves, [EXIT_REASON_XRSTORS] = handle_xrstors, [EXIT_REASON_PML_FULL] = handle_pml_full, [EXIT_REASON_PCOMMIT] = handle_pcommit, }; static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { unsigned long exit_qualification; gpa_t bitmap, last_bitmap; unsigned int port; int size; u8 b; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; last_bitmap = (gpa_t)-1; b = -1; while (size > 0) { if (port < 0x8000) bitmap = vmcs12->io_bitmap_a; else if (port < 0x10000) bitmap = vmcs12->io_bitmap_b; else return true; bitmap += (port & 0x7fff) / 8; if (last_bitmap != bitmap) if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) return true; if (b & (1 << (port & 7))) return true; port++; size--; last_bitmap = bitmap; } return false; } /* * Return 1 if we should exit from L2 to L1 to handle an MSR access access, * rather than handle it ourselves in L0. I.e., check whether L1 expressed * disinterest in the current event (read or write a specific MSR) by using an * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. */ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason) { u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; gpa_t bitmap; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) return true; /* * The MSR_BITMAP page is divided into four 1024-byte bitmaps, * for the four combinations of read/write and low/high MSR numbers. * First we need to figure out which of the four to use: */ bitmap = vmcs12->msr_bitmap; if (exit_reason == EXIT_REASON_MSR_WRITE) bitmap += 2048; if (msr_index >= 0xc0000000) { msr_index -= 0xc0000000; bitmap += 1024; } /* Then read the msr_index'th bit from this bitmap: */ if (msr_index < 1024*8) { unsigned char b; if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) return true; return 1 & (b >> (msr_index & 7)); } else return true; /* let L1 handle the wrong parameter */ } /* * Return 1 if we should exit from L2 to L1 to handle a CR access exit, * rather than handle it ourselves in L0. I.e., check if L1 wanted to * intercept (via guest_host_mask etc.) the current event. */ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int cr = exit_qualification & 15; int reg = (exit_qualification >> 8) & 15; unsigned long val = kvm_register_readl(vcpu, reg); switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ switch (cr) { case 0: if (vmcs12->cr0_guest_host_mask & (val ^ vmcs12->cr0_read_shadow)) return true; break; case 3: if ((vmcs12->cr3_target_count >= 1 && vmcs12->cr3_target_value0 == val) || (vmcs12->cr3_target_count >= 2 && vmcs12->cr3_target_value1 == val) || (vmcs12->cr3_target_count >= 3 && vmcs12->cr3_target_value2 == val) || (vmcs12->cr3_target_count >= 4 && vmcs12->cr3_target_value3 == val)) return false; if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) return true; break; case 4: if (vmcs12->cr4_guest_host_mask & (vmcs12->cr4_read_shadow ^ val)) return true; break; case 8: if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) return true; break; } break; case 2: /* clts */ if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && (vmcs12->cr0_read_shadow & X86_CR0_TS)) return true; break; case 1: /* mov from cr */ switch (cr) { case 3: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR3_STORE_EXITING) return true; break; case 8: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR8_STORE_EXITING) return true; break; } break; case 3: /* lmsw */ /* * lmsw can change bits 1..3 of cr0, and only set bit 0 of * cr0. Other attempted changes are ignored, with no exit. */ if (vmcs12->cr0_guest_host_mask & 0xe & (val ^ vmcs12->cr0_read_shadow)) return true; if ((vmcs12->cr0_guest_host_mask & 0x1) && !(vmcs12->cr0_read_shadow & 0x1) && (val & 0x1)) return true; break; } return false; } /* * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we * should handle it ourselves in L0 (and then continue L2). Only call this * when in is_guest_mode (L2). */ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) { u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 exit_reason = vmx->exit_reason; trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, vmcs_readl(EXIT_QUALIFICATION), vmx->idt_vectoring_info, intr_info, vmcs_read32(VM_EXIT_INTR_ERROR_CODE), KVM_ISA_VMX); if (vmx->nested.nested_run_pending) return false; if (unlikely(vmx->fail)) { pr_info_ratelimited("%s failed vm entry %x\n", __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); return true; } switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: if (!is_exception(intr_info)) return false; else if (is_page_fault(intr_info)) return enable_ept; else if (is_no_device(intr_info) && !(vmcs12->guest_cr0 & X86_CR0_TS)) return false; else if (is_debug(intr_info) && vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) return false; else if (is_breakpoint(intr_info) && vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; return vmcs12->exception_bitmap & (1u << (intr_info & INTR_INFO_VECTOR_MASK)); case EXIT_REASON_EXTERNAL_INTERRUPT: return false; case EXIT_REASON_TRIPLE_FAULT: return true; case EXIT_REASON_PENDING_INTERRUPT: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); case EXIT_REASON_NMI_WINDOW: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); case EXIT_REASON_TASK_SWITCH: return true; case EXIT_REASON_CPUID: if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) return false; return true; case EXIT_REASON_HLT: return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); case EXIT_REASON_INVD: return true; case EXIT_REASON_INVLPG: return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); case EXIT_REASON_RDPMC: return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: /* * VMX instructions trap unconditionally. This allows L1 to * emulate them for its L2 guest, i.e., allows 3-level nesting! */ return true; case EXIT_REASON_CR_ACCESS: return nested_vmx_exit_handled_cr(vcpu, vmcs12); case EXIT_REASON_DR_ACCESS: return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); case EXIT_REASON_IO_INSTRUCTION: return nested_vmx_exit_handled_io(vcpu, vmcs12); case EXIT_REASON_MSR_READ: case EXIT_REASON_MSR_WRITE: return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); case EXIT_REASON_INVALID_STATE: return true; case EXIT_REASON_MWAIT_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); case EXIT_REASON_MONITOR_TRAP_FLAG: return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); case EXIT_REASON_MONITOR_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); case EXIT_REASON_PAUSE_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || nested_cpu_has2(vmcs12, SECONDARY_EXEC_PAUSE_LOOP_EXITING); case EXIT_REASON_MCE_DURING_VMENTRY: return false; case EXIT_REASON_TPR_BELOW_THRESHOLD: return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); case EXIT_REASON_APIC_ACCESS: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); case EXIT_REASON_APIC_WRITE: case EXIT_REASON_EOI_INDUCED: /* apic_write and eoi_induced should exit unconditionally. */ return true; case EXIT_REASON_EPT_VIOLATION: /* * L0 always deals with the EPT violation. If nested EPT is * used, and the nested mmu code discovers that the address is * missing in the guest EPT table (EPT12), the EPT violation * will be injected with nested_ept_inject_page_fault() */ return false; case EXIT_REASON_EPT_MISCONFIG: /* * L2 never uses directly L1's EPT, but rather L0's own EPT * table (shadow on EPT) or a merged EPT table that L0 built * (EPT on EPT). So any problems with the structure of the * table is L0's fault. */ return false; case EXIT_REASON_WBINVD: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); case EXIT_REASON_XSETBV: return true; case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: /* * This should never happen, since it is not possible to * set XSS to a non-zero value---neither in L1 nor in L2. * If if it were, XSS would have to be checked against * the XSS exit bitmap in vmcs12. */ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); case EXIT_REASON_PCOMMIT: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT); default: return true; } } static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { *info1 = vmcs_readl(EXIT_QUALIFICATION); *info2 = vmcs_read32(VM_EXIT_INTR_INFO); } static int vmx_create_pml_buffer(struct vcpu_vmx *vmx) { struct page *pml_pg; pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!pml_pg) return -ENOMEM; vmx->pml_pg = pml_pg; vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); return 0; } static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) { if (vmx->pml_pg) { __free_page(vmx->pml_pg); vmx->pml_pg = NULL; } } static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 *pml_buf; u16 pml_idx; pml_idx = vmcs_read16(GUEST_PML_INDEX); /* Do nothing if PML buffer is empty */ if (pml_idx == (PML_ENTITY_NUM - 1)) return; /* PML index always points to next available PML buffer entity */ if (pml_idx >= PML_ENTITY_NUM) pml_idx = 0; else pml_idx++; pml_buf = page_address(vmx->pml_pg); for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { u64 gpa; gpa = pml_buf[pml_idx]; WARN_ON(gpa & (PAGE_SIZE - 1)); kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); } /* reset PML index */ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } /* * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap. * Called before reporting dirty_bitmap to userspace. */ static void kvm_flush_pml_buffers(struct kvm *kvm) { int i; struct kvm_vcpu *vcpu; /* * We only need to kick vcpu out of guest mode here, as PML buffer * is flushed at beginning of all VMEXITs, and it's obvious that only * vcpus running in guest are possible to have unflushed GPAs in PML * buffer. */ kvm_for_each_vcpu(i, vcpu, kvm) kvm_vcpu_kick(vcpu); } static void vmx_dump_sel(char *name, uint32_t sel) { pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", name, vmcs_read32(sel), vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); } static void vmx_dump_dtsel(char *name, uint32_t limit) { pr_err("%s limit=0x%08x, base=0x%016lx\n", name, vmcs_read32(limit), vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); } static void dump_vmcs(void) { u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); u32 secondary_exec_control = 0; unsigned long cr4 = vmcs_readl(GUEST_CR4); u64 efer = vmcs_read64(GUEST_IA32_EFER); int i, n; if (cpu_has_secondary_exec_ctrls()) secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); pr_err("*** Guest State ***\n"); pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), vmcs_readl(CR0_GUEST_HOST_MASK)); pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA)) { pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); } pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", vmcs_readl(GUEST_SYSENTER_ESP), vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) || (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER))) pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", efer, vmcs_read64(GUEST_IA32_PAT)); pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", vmcs_read64(GUEST_IA32_DEBUGCTL), vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) pr_err("PerfGlobCtl = 0x%016llx\n", vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); pr_err("Interruptibility = %08x ActivityState = %08x\n", vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), vmcs_read32(GUEST_ACTIVITY_STATE)); if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) pr_err("InterruptStatus = %04x\n", vmcs_read16(GUEST_INTR_STATUS)); pr_err("*** Host State ***\n"); pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), vmcs_read16(HOST_TR_SELECTOR)); pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), vmcs_readl(HOST_TR_BASE)); pr_err("GDTBase=%016lx IDTBase=%016lx\n", vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), vmcs_readl(HOST_CR4)); pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", vmcs_readl(HOST_IA32_SYSENTER_ESP), vmcs_read32(HOST_IA32_SYSENTER_CS), vmcs_readl(HOST_IA32_SYSENTER_EIP)); if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER)) pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_EFER), vmcs_read64(HOST_IA32_PAT)); if (vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) pr_err("PerfGlobCtl = 0x%016llx\n", vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); pr_err("*** Control State ***\n"); pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", vmcs_read32(EXCEPTION_BITMAP), vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", vmcs_read32(VM_EXIT_INTR_INFO), vmcs_read32(VM_EXIT_INTR_ERROR_CODE), vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); pr_err(" reason=%08x qualification=%016lx\n", vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); pr_err("IDTVectoring: info=%08x errcode=%08x\n", vmcs_read32(IDT_VECTORING_INFO_FIELD), vmcs_read32(IDT_VECTORING_ERROR_CODE)); pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) pr_err("TSC Multiplier = 0x%016llx\n", vmcs_read64(TSC_MULTIPLIER)); if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); n = vmcs_read32(CR3_TARGET_COUNT); for (i = 0; i + 1 < n; i += 4) pr_err("CR3 target%u=%016lx target%u=%016lx\n", i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2), i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2)); if (i < n) pr_err("CR3 target%u=%016lx\n", i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2)); if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) pr_err("PLE Gap=%08x Window=%08x\n", vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) pr_err("Virtual processor ID = 0x%04x\n", vmcs_read16(VIRTUAL_PROCESSOR_ID)); } /* * The guest has exited. See if we can fix it or if we need userspace * assistance. */ static int vmx_handle_exit(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); /* * Flush logged GPAs PML buffer, this will make dirty_bitmap more * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before * querying dirty_bitmap, we only need to kick all vcpus out of guest * mode as if vcpus is in root mode, the PML buffer must has been * flushed already. */ if (enable_pml) vmx_flush_pml_buffer(vcpu); /* If guest state is invalid, start emulating */ if (vmx->emulation_required) return handle_invalid_guest_state(vcpu); if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { nested_vmx_vmexit(vcpu, exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; } if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { dump_vmcs(); vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason; return 0; } if (unlikely(vmx->fail)) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = vmcs_read32(VM_INSTRUCTION_ERROR); return 0; } /* * Note: * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by * delivery event since it indicates guest is accessing MMIO. * The vm-exit can be triggered again after return to guest that * will cause infinite loop. */ if ((vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = vectoring_info; vcpu->run->internal.data[1] = exit_reason; return 0; } if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( get_vmcs12(vcpu))))) { if (vmx_interrupt_allowed(vcpu)) { vmx->soft_vnmi_blocked = 0; } else if (vmx->vnmi_blocked_time > 1000000000LL && vcpu->arch.nmi_pending) { /* * This CPU don't support us in finding the end of an * NMI-blocked window if the guest runs with IRQs * disabled. So we pull the trigger after 1 s of * futile waiting, but inform the user about this. */ printk(KERN_WARNING "%s: Breaking out of NMI-blocked " "state on VCPU %d after 1 s timeout\n", __func__, vcpu->vcpu_id); vmx->soft_vnmi_blocked = 0; } } if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); else { WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); kvm_queue_exception(vcpu, UD_VECTOR); return 1; } } static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (is_guest_mode(vcpu) && nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return; if (irr == -1 || tpr < irr) { vmcs_write32(TPR_THRESHOLD, 0); return; } vmcs_write32(TPR_THRESHOLD, irr); } static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) { u32 sec_exec_control; /* * There is not point to enable virtualize x2apic without enable * apicv */ if (!cpu_has_vmx_virtualize_x2apic_mode() || !kvm_vcpu_apicv_active(vcpu)) return; if (!cpu_need_tpr_shadow(vcpu)) return; sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); if (set) { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; } else { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); vmx_set_msr_bitmap(vcpu); } static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) { struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Currently we do not handle the nested case where L2 has an * APIC access page of its own; that page is still pinned. * Hence, we skip the case where the VCPU is in guest mode _and_ * L1 prepared an APIC access page for L2. * * For the case where L1 and L2 share the same APIC access page * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear * in the vmcs12), this function will only update either the vmcs01 * or the vmcs02. If the former, the vmcs02 will be updated by * prepare_vmcs02. If the latter, the vmcs01 will be updated in * the next L2->L1 exit. */ if (!is_guest_mode(vcpu) || !nested_cpu_has2(vmx->nested.current_vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) vmcs_write64(APIC_ACCESS_ADDR, hpa); } static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) { u16 status; u8 old; if (max_isr == -1) max_isr = 0; status = vmcs_read16(GUEST_INTR_STATUS); old = status >> 8; if (max_isr != old) { status &= 0xff; status |= max_isr << 8; vmcs_write16(GUEST_INTR_STATUS, status); } } static void vmx_set_rvi(int vector) { u16 status; u8 old; if (vector == -1) vector = 0; status = vmcs_read16(GUEST_INTR_STATUS); old = (u8)status & 0xff; if ((u8)vector != old) { status &= ~0xff; status |= (u8)vector; vmcs_write16(GUEST_INTR_STATUS, status); } } static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) { if (!is_guest_mode(vcpu)) { vmx_set_rvi(max_irr); return; } if (max_irr == -1) return; /* * In guest mode. If a vmexit is needed, vmx_check_nested_events * handles it. */ if (nested_exit_on_intr(vcpu)) return; /* * Else, fall back to pre-APICv interrupt injection since L2 * is run without virtual interrupt delivery. */ if (!kvm_event_needs_reinjection(vcpu) && vmx_interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, max_irr, false); vmx_inject_irq(vcpu); } } static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) { if (!kvm_vcpu_apicv_active(vcpu)) return; vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); } static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) { u32 exit_intr_info; if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)) return; vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); exit_intr_info = vmx->exit_intr_info; /* Handle machine checks before interrupts are enabled */ if (is_machine_check(exit_intr_info)) kvm_machine_check(); /* We need to handle NMIs before interrupts are enabled */ if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && (exit_intr_info & INTR_INFO_VALID_MASK)) { kvm_before_handle_nmi(&vmx->vcpu); asm("int $2"); kvm_after_handle_nmi(&vmx->vcpu); } } static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) { u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); register void *__sp asm(_ASM_SP); /* * If external interrupt exists, IF bit is set in rflags/eflags on the * interrupt stack frame, and interrupt will be enabled on a return * from interrupt handler. */ if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { unsigned int vector; unsigned long entry; gate_desc *desc; struct vcpu_vmx *vmx = to_vmx(vcpu); #ifdef CONFIG_X86_64 unsigned long tmp; #endif vector = exit_intr_info & INTR_INFO_VECTOR_MASK; desc = (gate_desc *)vmx->host_idt_base + vector; entry = gate_offset(*desc); asm volatile( #ifdef CONFIG_X86_64 "mov %%" _ASM_SP ", %[sp]\n\t" "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" "push $%c[ss]\n\t" "push %[sp]\n\t" #endif "pushf\n\t" "orl $0x200, (%%" _ASM_SP ")\n\t" __ASM_SIZE(push) " $%c[cs]\n\t" "call *%[entry]\n\t" : #ifdef CONFIG_X86_64 [sp]"=&r"(tmp), #endif "+r"(__sp) : [entry]"r"(entry), [ss]"i"(__KERNEL_DS), [cs]"i"(__KERNEL_CS) ); } else local_irq_enable(); } static bool vmx_has_high_real_mode_segbase(void) { return enable_unrestricted_guest || emulate_invalid_guest_state; } static bool vmx_mpx_supported(void) { return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) && (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS); } static bool vmx_xsaves_supported(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_XSAVES; } static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) { u32 exit_intr_info; bool unblock_nmi; u8 vector; bool idtv_info_valid; idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; if (cpu_has_virtual_nmis()) { if (vmx->nmi_known_unmasked) return; /* * Can't use vmx->exit_intr_info since we're not sure what * the exit reason is. */ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; vector = exit_intr_info & INTR_INFO_VECTOR_MASK; /* * SDM 3: 27.7.1.2 (September 2008) * Re-set bit "block by NMI" before VM entry if vmexit caused by * a guest IRET fault. * SDM 3: 23.2.2 (September 2008) * Bit 12 is undefined in any of the following cases: * If the VM exit sets the valid bit in the IDT-vectoring * information field. * If the VM exit is due to a double fault. */ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && vector != DF_VECTOR && !idtv_info_valid) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); else vmx->nmi_known_unmasked = !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI); } else if (unlikely(vmx->soft_vnmi_blocked)) vmx->vnmi_blocked_time += ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); } static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, u32 idt_vectoring_info, int instr_len_field, int error_code_field) { u8 vector; int type; bool idtv_info_valid; idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); if (!idtv_info_valid) return; kvm_make_request(KVM_REQ_EVENT, vcpu); vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; switch (type) { case INTR_TYPE_NMI_INTR: vcpu->arch.nmi_injected = true; /* * SDM 3: 27.7.1.2 (September 2008) * Clear bit "block by NMI" before VM entry if a NMI * delivery faulted. */ vmx_set_nmi_mask(vcpu, false); break; case INTR_TYPE_SOFT_EXCEPTION: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(error_code_field); kvm_requeue_exception_e(vcpu, vector, err); } else kvm_requeue_exception(vcpu, vector); break; case INTR_TYPE_SOFT_INTR: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_EXT_INTR: kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); break; default: break; } } static void vmx_complete_interrupts(struct vcpu_vmx *vmx) { __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_ERROR_CODE); } static void vmx_cancel_injection(struct kvm_vcpu *vcpu) { __vmx_complete_interrupts(vcpu, vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) { int i, nr_msrs; struct perf_guest_switch_msr *msrs; msrs = perf_guest_get_msrs(&nr_msrs); if (!msrs) return; for (i = 0; i < nr_msrs; i++) if (msrs[i].host == msrs[i].guest) clear_atomic_switch_msr(vmx, msrs[i].msr); else add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, msrs[i].host); } static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long debugctlmsr, cr4; /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) vmx->entry_time = ktime_get(); /* Don't enter VMX if guest state is invalid, let the exit handler start emulation until we arrive back to a valid state */ if (vmx->emulation_required) return; if (vmx->ple_window_dirty) { vmx->ple_window_dirty = false; vmcs_write32(PLE_WINDOW, vmx->ple_window); } if (vmx->nested.sync_shadow_vmcs) { copy_vmcs12_to_shadow(vmx); vmx->nested.sync_shadow_vmcs = false; } if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); cr4 = cr4_read_shadow(); if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { vmcs_writel(HOST_CR4, cr4); vmx->host_state.vmcs_host_cr4 = cr4; } /* When single-stepping over STI and MOV SS, we must clear the * corresponding interruptibility bits in the guest state. Otherwise * vmentry fails as it then expects bit 14 (BS) in pending debug * exceptions being set, but that's not correct for the guest debugging * case. */ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmx_set_interrupt_shadow(vcpu, 0); if (vmx->guest_pkru_valid) __write_pkru(vmx->guest_pkru); atomic_switch_perf_msrs(vmx); debugctlmsr = get_debugctlmsr(); vmx->__launched = vmx->loaded_vmcs->launched; asm( /* Store host registers */ "push %%" _ASM_DX "; push %%" _ASM_BP ";" "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */ "push %%" _ASM_CX " \n\t" "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t" "je 1f \n\t" "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t" __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" "1: \n\t" /* Reload cr2 if changed */ "mov %c[cr2](%0), %%" _ASM_AX " \n\t" "mov %%cr2, %%" _ASM_DX " \n\t" "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t" "je 2f \n\t" "mov %%" _ASM_AX", %%cr2 \n\t" "2: \n\t" /* Check if vmlaunch of vmresume is needed */ "cmpl $0, %c[launched](%0) \n\t" /* Load guest registers. Don't clobber flags. */ "mov %c[rax](%0), %%" _ASM_AX " \n\t" "mov %c[rbx](%0), %%" _ASM_BX " \n\t" "mov %c[rdx](%0), %%" _ASM_DX " \n\t" "mov %c[rsi](%0), %%" _ASM_SI " \n\t" "mov %c[rdi](%0), %%" _ASM_DI " \n\t" "mov %c[rbp](%0), %%" _ASM_BP " \n\t" #ifdef CONFIG_X86_64 "mov %c[r8](%0), %%r8 \n\t" "mov %c[r9](%0), %%r9 \n\t" "mov %c[r10](%0), %%r10 \n\t" "mov %c[r11](%0), %%r11 \n\t" "mov %c[r12](%0), %%r12 \n\t" "mov %c[r13](%0), %%r13 \n\t" "mov %c[r14](%0), %%r14 \n\t" "mov %c[r15](%0), %%r15 \n\t" #endif "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */ /* Enter guest mode */ "jne 1f \n\t" __ex(ASM_VMX_VMLAUNCH) "\n\t" "jmp 2f \n\t" "1: " __ex(ASM_VMX_VMRESUME) "\n\t" "2: " /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" "mov %%" _ASM_AX ", %c[rax](%0) \n\t" "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" __ASM_SIZE(pop) " %c[rcx](%0) \n\t" "mov %%" _ASM_DX ", %c[rdx](%0) \n\t" "mov %%" _ASM_SI ", %c[rsi](%0) \n\t" "mov %%" _ASM_DI ", %c[rdi](%0) \n\t" "mov %%" _ASM_BP ", %c[rbp](%0) \n\t" #ifdef CONFIG_X86_64 "mov %%r8, %c[r8](%0) \n\t" "mov %%r9, %c[r9](%0) \n\t" "mov %%r10, %c[r10](%0) \n\t" "mov %%r11, %c[r11](%0) \n\t" "mov %%r12, %c[r12](%0) \n\t" "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" #endif "mov %%cr2, %%" _ASM_AX " \n\t" "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" "setbe %c[fail](%0) \n\t" ".pushsection .rodata \n\t" ".global vmx_return \n\t" "vmx_return: " _ASM_PTR " 2b \n\t" ".popsection" : : "c"(vmx), "d"((unsigned long)HOST_RSP), [launched]"i"(offsetof(struct vcpu_vmx, __launched)), [fail]"i"(offsetof(struct vcpu_vmx, fail)), [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), #ifdef CONFIG_X86_64 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), [wordsize]"i"(sizeof(ulong)) : "cc", "memory" #ifdef CONFIG_X86_64 , "rax", "rbx", "rdi", "rsi" , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" #else , "eax", "ebx", "edi", "esi" #endif ); /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (debugctlmsr) update_debugctlmsr(debugctlmsr); #ifndef CONFIG_X86_64 /* * The sysexit path does not restore ds/es, so we must set them to * a reasonable value ourselves. * * We can't defer this to vmx_load_host_state() since that function * may be executed in interrupt context, which saves and restore segments * around it, nullifying its effect. */ loadsegment(ds, __USER_DS); loadsegment(es, __USER_DS); #endif vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) | (1 << VCPU_EXREG_RFLAGS) | (1 << VCPU_EXREG_PDPTR) | (1 << VCPU_EXREG_SEGMENTS) | (1 << VCPU_EXREG_CR3)); vcpu->arch.regs_dirty = 0; vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx->loaded_vmcs->launched = 1; vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); /* * eager fpu is enabled if PKEY is supported and CR4 is switched * back on host, so it is safe to read guest PKRU from current * XSAVE. */ if (boot_cpu_has(X86_FEATURE_OSPKE)) { vmx->guest_pkru = __read_pkru(); if (vmx->guest_pkru != vmx->host_pkru) { vmx->guest_pkru_valid = true; __write_pkru(vmx->host_pkru); } else vmx->guest_pkru_valid = false; } /* * the KVM_REQ_EVENT optimization bit is only on for one entry, and if * we did not inject a still-pending event to L1 now because of * nested_run_pending, we need to re-enable this bit. */ if (vmx->nested.nested_run_pending) kvm_make_request(KVM_REQ_EVENT, vcpu); vmx->nested.nested_run_pending = 0; vmx_complete_atomic_exit(vmx); vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); } static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; if (vmx->loaded_vmcs == &vmx->vmcs01) return; cpu = get_cpu(); vmx->loaded_vmcs = &vmx->vmcs01; vmx_vcpu_put(vcpu); vmx_vcpu_load(vcpu, cpu); vcpu->cpu = cpu; put_cpu(); } static void vmx_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (enable_pml) vmx_destroy_pml_buffer(vmx); free_vpid(vmx->vpid); leave_guest_mode(vcpu); vmx_load_vmcs01(vcpu); free_nested(vmx); free_loaded_vmcs(vmx->loaded_vmcs); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); } static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) { int err; struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); int cpu; if (!vmx) return ERR_PTR(-ENOMEM); vmx->vpid = allocate_vpid(); err = kvm_vcpu_init(&vmx->vcpu, kvm, id); if (err) goto free_vcpu; vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) > PAGE_SIZE); err = -ENOMEM; if (!vmx->guest_msrs) { goto uninit_vcpu; } vmx->loaded_vmcs = &vmx->vmcs01; vmx->loaded_vmcs->vmcs = alloc_vmcs(); if (!vmx->loaded_vmcs->vmcs) goto free_msrs; if (!vmm_exclusive) kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id()))); loaded_vmcs_init(vmx->loaded_vmcs); if (!vmm_exclusive) kvm_cpu_vmxoff(); cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); vmx->vcpu.cpu = cpu; err = vmx_vcpu_setup(vmx); vmx_vcpu_put(&vmx->vcpu); put_cpu(); if (err) goto free_vmcs; if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { err = alloc_apic_access_page(kvm); if (err) goto free_vmcs; } if (enable_ept) { if (!kvm->arch.ept_identity_map_addr) kvm->arch.ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; err = init_rmode_identity_map(kvm); if (err) goto free_vmcs; } if (nested) { nested_vmx_setup_ctls_msrs(vmx); vmx->nested.vpid02 = allocate_vpid(); } vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; vmx->nested.current_vmcs12 = NULL; /* * If PML is turned on, failure on enabling PML just results in failure * of creating the vcpu, therefore we can simplify PML logic (by * avoiding dealing with cases, such as enabling PML partially on vcpus * for the guest, etc. */ if (enable_pml) { err = vmx_create_pml_buffer(vmx); if (err) goto free_vmcs; } return &vmx->vcpu; free_vmcs: free_vpid(vmx->nested.vpid02); free_loaded_vmcs(vmx->loaded_vmcs); free_msrs: kfree(vmx->guest_msrs); uninit_vcpu: kvm_vcpu_uninit(&vmx->vcpu); free_vcpu: free_vpid(vmx->vpid); kmem_cache_free(kvm_vcpu_cache, vmx); return ERR_PTR(err); } static void __init vmx_check_processor_compat(void *rtn) { struct vmcs_config vmcs_conf; *(int *)rtn = 0; if (setup_vmcs_config(&vmcs_conf) < 0) *(int *)rtn = -EIO; if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", smp_processor_id()); *(int *)rtn = -EIO; } } static int get_ept_level(void) { return VMX_EPT_DEFAULT_GAW + 1; } static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { u8 cache; u64 ipat = 0; /* For VT-d and EPT combination * 1. MMIO: always map as UC * 2. EPT with VT-d: * a. VT-d without snooping control feature: can't guarantee the * result, try to trust guest. * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ if (is_mmio) { cache = MTRR_TYPE_UNCACHABLE; goto exit; } if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { ipat = VMX_EPT_IPAT_BIT; cache = MTRR_TYPE_WRBACK; goto exit; } if (kvm_read_cr0(vcpu) & X86_CR0_CD) { ipat = VMX_EPT_IPAT_BIT; if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) cache = MTRR_TYPE_WRBACK; else cache = MTRR_TYPE_UNCACHABLE; goto exit; } cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); exit: return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; } static int vmx_get_lpage_level(void) { if (enable_ept && !cpu_has_vmx_ept_1g_page()) return PT_DIRECTORY_LEVEL; else /* For shadow and EPT supported 1GB page */ return PT_PDPE_LEVEL; } static void vmcs_set_secondary_exec_control(u32 new_ctl) { /* * These bits in the secondary execution controls field * are dynamic, the others are mostly based on the hypervisor * architecture and the guest's CPUID. Do not touch the * dynamic bits. */ u32 mask = SECONDARY_EXEC_SHADOW_VMCS | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, (new_ctl & ~mask) | (cur_ctl & mask)); } static void vmx_cpuid_update(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; struct vcpu_vmx *vmx = to_vmx(vcpu); u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx); if (vmx_rdtscp_supported()) { bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu); if (!rdtscp_enabled) secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP; if (nested) { if (rdtscp_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_RDTSCP; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_RDTSCP; } } /* Exposing INVPCID only when PCID is exposed */ best = kvm_find_cpuid_entry(vcpu, 0x7, 0); if (vmx_invpcid_supported() && (!best || !(best->ebx & bit(X86_FEATURE_INVPCID)) || !guest_cpuid_has_pcid(vcpu))) { secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID; if (best) best->ebx &= ~bit(X86_FEATURE_INVPCID); } if (cpu_has_secondary_exec_ctrls()) vmcs_set_secondary_exec_control(secondary_exec_ctl); if (static_cpu_has(X86_FEATURE_PCOMMIT) && nested) { if (guest_cpuid_has_pcommit(vcpu)) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_PCOMMIT; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_PCOMMIT; } } static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { if (func == 1 && nested) entry->ecx |= bit(X86_FEATURE_VMX); } static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 exit_reason; if (fault->error_code & PFERR_RSVD_MASK) exit_reason = EXIT_REASON_EPT_MISCONFIG; else exit_reason = EXIT_REASON_EPT_VIOLATION; nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification); vmcs12->guest_physical_address = fault->address; } /* Callbacks for nested_ept_init_mmu_context: */ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) { /* return the page table to be shadowed - in our case, EPT12 */ return get_vmcs12(vcpu)->ept_pointer; } static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); kvm_init_shadow_ept_mmu(vcpu, to_vmx(vcpu)->nested.nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT); vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) { vcpu->arch.walk_mmu = &vcpu->arch.mmu; } static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code) { bool inequality, bit; bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; inequality = (error_code & vmcs12->page_fault_error_code_mask) != vmcs12->page_fault_error_code_match; return inequality ^ bit; } static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); WARN_ON(!is_guest_mode(vcpu)); if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); else kvm_inject_page_fault(vcpu, fault); } static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu); if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { if (!PAGE_ALIGNED(vmcs12->apic_access_addr) || vmcs12->apic_access_addr >> maxphyaddr) return false; /* * Translate L1 physical address to host physical * address for vmcs02. Keep the page pinned, so this * physical address remains valid. We keep a reference * to it so we can release it later. */ if (vmx->nested.apic_access_page) /* shouldn't happen */ nested_release_page(vmx->nested.apic_access_page); vmx->nested.apic_access_page = nested_get_page(vcpu, vmcs12->apic_access_addr); } if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr) || vmcs12->virtual_apic_page_addr >> maxphyaddr) return false; if (vmx->nested.virtual_apic_page) /* shouldn't happen */ nested_release_page(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = nested_get_page(vcpu, vmcs12->virtual_apic_page_addr); /* * Failing the vm entry is _not_ what the processor does * but it's basically the only possibility we have. * We could still enter the guest if CR8 load exits are * enabled, CR8 store exits are enabled, and virtualize APIC * access is disabled; in this case the processor would never * use the TPR shadow and we could simply clear the bit from * the execution control. But such a configuration is useless, * so let's keep the code simple. */ if (!vmx->nested.virtual_apic_page) return false; } if (nested_cpu_has_posted_intr(vmcs12)) { if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64) || vmcs12->posted_intr_desc_addr >> maxphyaddr) return false; if (vmx->nested.pi_desc_page) { /* shouldn't happen */ kunmap(vmx->nested.pi_desc_page); nested_release_page(vmx->nested.pi_desc_page); } vmx->nested.pi_desc_page = nested_get_page(vcpu, vmcs12->posted_intr_desc_addr); if (!vmx->nested.pi_desc_page) return false; vmx->nested.pi_desc = (struct pi_desc *)kmap(vmx->nested.pi_desc_page); if (!vmx->nested.pi_desc) { nested_release_page_clean(vmx->nested.pi_desc_page); return false; } vmx->nested.pi_desc = (struct pi_desc *)((void *)vmx->nested.pi_desc + (unsigned long)(vmcs12->posted_intr_desc_addr & (PAGE_SIZE - 1))); } return true; } static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) { u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; struct vcpu_vmx *vmx = to_vmx(vcpu); if (vcpu->arch.virtual_tsc_khz == 0) return; /* Make sure short timeouts reliably trigger an immediate vmexit. * hrtimer_start does not guarantee this. */ if (preemption_timeout <= 1) { vmx_preemption_timer_fn(&vmx->nested.preemption_timer); return; } preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; preemption_timeout *= 1000000; do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); hrtimer_start(&vmx->nested.preemption_timer, ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); } static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { int maxphyaddr; u64 addr; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) return 0; if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) { WARN_ON(1); return -EINVAL; } maxphyaddr = cpuid_maxphyaddr(vcpu); if (!PAGE_ALIGNED(vmcs12->msr_bitmap) || ((addr + PAGE_SIZE) >> maxphyaddr)) return -EINVAL; return 0; } /* * Merge L0's and L1's MSR bitmap, return false to indicate that * we do not use the hardware. */ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { int msr; struct page *page; unsigned long *msr_bitmap; if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) return false; page = nested_get_page(vcpu, vmcs12->msr_bitmap); if (!page) { WARN_ON(1); return false; } msr_bitmap = (unsigned long *)kmap(page); if (!msr_bitmap) { nested_release_page_clean(page); WARN_ON(1); return false; } if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { if (nested_cpu_has_apic_reg_virt(vmcs12)) for (msr = 0x800; msr <= 0x8ff; msr++) nested_vmx_disable_intercept_for_msr( msr_bitmap, vmx_msr_bitmap_nested, msr, MSR_TYPE_R); /* TPR is allowed */ nested_vmx_disable_intercept_for_msr(msr_bitmap, vmx_msr_bitmap_nested, APIC_BASE_MSR + (APIC_TASKPRI >> 4), MSR_TYPE_R | MSR_TYPE_W); if (nested_cpu_has_vid(vmcs12)) { /* EOI and self-IPI are allowed */ nested_vmx_disable_intercept_for_msr( msr_bitmap, vmx_msr_bitmap_nested, APIC_BASE_MSR + (APIC_EOI >> 4), MSR_TYPE_W); nested_vmx_disable_intercept_for_msr( msr_bitmap, vmx_msr_bitmap_nested, APIC_BASE_MSR + (APIC_SELF_IPI >> 4), MSR_TYPE_W); } } else { /* * Enable reading intercept of all the x2apic * MSRs. We should not rely on vmcs12 to do any * optimizations here, it may have been modified * by L1. */ for (msr = 0x800; msr <= 0x8ff; msr++) __vmx_enable_intercept_for_msr( vmx_msr_bitmap_nested, msr, MSR_TYPE_R); __vmx_enable_intercept_for_msr( vmx_msr_bitmap_nested, APIC_BASE_MSR + (APIC_TASKPRI >> 4), MSR_TYPE_W); __vmx_enable_intercept_for_msr( vmx_msr_bitmap_nested, APIC_BASE_MSR + (APIC_EOI >> 4), MSR_TYPE_W); __vmx_enable_intercept_for_msr( vmx_msr_bitmap_nested, APIC_BASE_MSR + (APIC_SELF_IPI >> 4), MSR_TYPE_W); } kunmap(page); nested_release_page_clean(page); return true; } static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && !nested_cpu_has_apic_reg_virt(vmcs12) && !nested_cpu_has_vid(vmcs12) && !nested_cpu_has_posted_intr(vmcs12)) return 0; /* * If virtualize x2apic mode is enabled, * virtualize apic access must be disabled. */ if (nested_cpu_has_virt_x2apic_mode(vmcs12) && nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) return -EINVAL; /* * If virtual interrupt delivery is enabled, * we must exit on external interrupts. */ if (nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)) return -EINVAL; /* * bits 15:8 should be zero in posted_intr_nv, * the descriptor address has been already checked * in nested_get_vmcs12_pages. */ if (nested_cpu_has_posted_intr(vmcs12) && (!nested_cpu_has_vid(vmcs12) || !nested_exit_intr_ack_set(vcpu) || vmcs12->posted_intr_nv & 0xff00)) return -EINVAL; /* tpr shadow is needed by all apicv features. */ if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return -EINVAL; return 0; } static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, unsigned long count_field, unsigned long addr_field) { int maxphyaddr; u64 count, addr; if (vmcs12_read_any(vcpu, count_field, &count) || vmcs12_read_any(vcpu, addr_field, &addr)) { WARN_ON(1); return -EINVAL; } if (count == 0) return 0; maxphyaddr = cpuid_maxphyaddr(vcpu); if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { pr_warn_ratelimited( "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)", addr_field, maxphyaddr, count, addr); return -EINVAL; } return 0; } static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (vmcs12->vm_exit_msr_load_count == 0 && vmcs12->vm_exit_msr_store_count == 0 && vmcs12->vm_entry_msr_load_count == 0) return 0; /* Fast path */ if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, VM_EXIT_MSR_LOAD_ADDR) || nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, VM_EXIT_MSR_STORE_ADDR) || nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, VM_ENTRY_MSR_LOAD_ADDR)) return -EINVAL; return 0; } static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e) { /* x2APIC MSR accesses are not allowed */ if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) return -EINVAL; if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */ e->index == MSR_IA32_UCODE_REV) return -EINVAL; if (e->reserved != 0) return -EINVAL; return 0; } static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e) { if (e->index == MSR_FS_BASE || e->index == MSR_GS_BASE || e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */ nested_vmx_msr_check_common(vcpu, e)) return -EINVAL; return 0; } static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e) { if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */ nested_vmx_msr_check_common(vcpu, e)) return -EINVAL; return 0; } /* * Load guest's/host's msr at nested entry/exit. * return 0 for success, entry index for failure. */ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) { u32 i; struct vmx_msr_entry e; struct msr_data msr; msr.host_initiated = false; for (i = 0; i < count; i++) { if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), &e, sizeof(e))) { pr_warn_ratelimited( "%s cannot read MSR entry (%u, 0x%08llx)\n", __func__, i, gpa + i * sizeof(e)); goto fail; } if (nested_vmx_load_msr_check(vcpu, &e)) { pr_warn_ratelimited( "%s check failed (%u, 0x%x, 0x%x)\n", __func__, i, e.index, e.reserved); goto fail; } msr.index = e.index; msr.data = e.value; if (kvm_set_msr(vcpu, &msr)) { pr_warn_ratelimited( "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", __func__, i, e.index, e.value); goto fail; } } return 0; fail: return i + 1; } static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) { u32 i; struct vmx_msr_entry e; for (i = 0; i < count; i++) { struct msr_data msr_info; if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), &e, 2 * sizeof(u32))) { pr_warn_ratelimited( "%s cannot read MSR entry (%u, 0x%08llx)\n", __func__, i, gpa + i * sizeof(e)); return -EINVAL; } if (nested_vmx_store_msr_check(vcpu, &e)) { pr_warn_ratelimited( "%s check failed (%u, 0x%x, 0x%x)\n", __func__, i, e.index, e.reserved); return -EINVAL; } msr_info.host_initiated = false; msr_info.index = e.index; if (kvm_get_msr(vcpu, &msr_info)) { pr_warn_ratelimited( "%s cannot read MSR (%u, 0x%x)\n", __func__, i, e.index); return -EINVAL; } if (kvm_vcpu_write_guest(vcpu, gpa + i * sizeof(e) + offsetof(struct vmx_msr_entry, value), &msr_info.data, sizeof(msr_info.data))) { pr_warn_ratelimited( "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", __func__, i, e.index, msr_info.data); return -EINVAL; } } return 0; } /* * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 * guest in a way that will both be appropriate to L1's requests, and our * needs. In addition to modifying the active vmcs (which is vmcs02), this * function also has additional necessary side-effects, like setting various * vcpu->arch fields. */ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control; vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); } else { kvm_set_dr(vcpu, 7, vcpu->arch.dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs12->vm_entry_intr_info_field); vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, vmcs12->vm_entry_exception_error_code); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmcs12->vm_entry_instruction_len); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, vmcs12->guest_interruptibility_info); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); vmx_set_rflags(vcpu, vmcs12->guest_rflags); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, vmcs12->guest_pending_dbg_exceptions); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); if (nested_cpu_has_xsaves(vmcs12)) vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); vmcs_write64(VMCS_LINK_POINTER, -1ull); exec_control = vmcs12->pin_based_vm_exec_control; exec_control |= vmcs_config.pin_based_exec_ctrl; exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; if (nested_cpu_has_posted_intr(vmcs12)) { /* * Note that we use L0's vector here and in * vmx_deliver_nested_posted_interrupt. */ vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; vmx->nested.pi_pending = false; vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); vmcs_write64(POSTED_INTR_DESC_ADDR, page_to_phys(vmx->nested.pi_desc_page) + (unsigned long)(vmcs12->posted_intr_desc_addr & (PAGE_SIZE - 1))); } else exec_control &= ~PIN_BASED_POSTED_INTR; vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); vmx->nested.preemption_timer_expired = false; if (nested_cpu_has_preemption_timer(vmcs12)) vmx_start_preemption_timer(vcpu); /* * Whether page-faults are trapped is determined by a combination of * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. * If enable_ept, L0 doesn't care about page faults and we should * set all of these to L1's desires. However, if !enable_ept, L0 does * care about (at least some) page faults, and because it is not easy * (if at all possible?) to merge L0 and L1's desires, we simply ask * to exit on each and every L2 page fault. This is done by setting * MASK=MATCH=0 and (see below) EB.PF=1. * Note that below we don't need special code to set EB.PF beyond the * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when * !enable_ept, EB.PF is 1, so the "or" will always be 1. * * A problem with this approach (when !enable_ept) is that L1 may be * injected with more page faults than it asked for. This could have * caused problems, but in practice existing hypervisors don't care. * To fix this, we will need to emulate the PFEC checking (on the L1 * page tables), using walk_addr(), when injecting PFs to L1. */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, enable_ept ? vmcs12->page_fault_error_code_mask : 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, enable_ept ? vmcs12->page_fault_error_code_match : 0); if (cpu_has_secondary_exec_ctrls()) { exec_control = vmx_secondary_exec_control(vmx); /* Take the following fields only from vmcs12 */ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_PCOMMIT); if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) exec_control |= vmcs12->secondary_vm_exec_control; if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) { /* * If translation failed, no matter: This feature asks * to exit when accessing the given address, and if it * can never be accessed, this feature won't do * anything anyway. */ if (!vmx->nested.apic_access_page) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; else vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(vmx->nested.apic_access_page)); } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) && cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; kvm_vcpu_reload_apic_access_page(vcpu); } if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); vmcs_write16(GUEST_INTR_STATUS, vmcs12->guest_intr_status); } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } /* * Set host-state according to L0's settings (vmcs12 is irrelevant here) * Some constant fields are set here by vmx_set_constant_host_state(). * Other fields are different per CPU, and will be set later when * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. */ vmx_set_constant_host_state(vmx); /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before * entry, but only if the current (host) sp changed from the value * we wrote last (vmx->host_rsp). This cache is no longer relevant * if we switch vmcs, and rather than hold a separate cache per vmcs, * here we just force the write to happen on entry. */ vmx->host_rsp = 0; exec_control = vmx_exec_control(vmx); /* L0's desires */ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; exec_control &= ~CPU_BASED_TPR_SHADOW; exec_control |= vmcs12->cpu_based_vm_exec_control; if (exec_control & CPU_BASED_TPR_SHADOW) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, page_to_phys(vmx->nested.virtual_apic_page)); vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); } if (cpu_has_vmx_msr_bitmap() && exec_control & CPU_BASED_USE_MSR_BITMAPS) { nested_vmx_merge_msr_bitmap(vcpu, vmcs12); /* MSR_BITMAP will be set by following vmx_set_efer. */ } else exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; /* * Merging of IO bitmap not currently supported. * Rather, exit every time. */ exec_control &= ~CPU_BASED_USE_IO_BITMAPS; exec_control |= CPU_BASED_UNCOND_IO_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the * bitwise-or of what L1 wants to trap for L2, and what we want to * trap. Note that CR0.TS also needs updating - we do this later. */ update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); /* L2->L1 exit controls are emulated - the hardware exit is to L0 so * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits are further modified by vmx_set_efer() below. */ vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are * emulated by vmx_set_efer(), below. */ vm_entry_controls_init(vmx, (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & ~VM_ENTRY_IA32E_MODE) | (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); vcpu->arch.pat = vmcs12->guest_ia32_pat; } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); set_cr4_guest_host_mask(vmx); if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); else vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); if (enable_vpid) { /* * There is no direct mapping between vpid02 and vpid12, the * vpid02 is per-vCPU for L0 and reused while the value of * vpid12 is changed w/ one invvpid during nested vmentry. * The vpid12 is allocated by L1 for L2, so it will not * influence global bitmap(for vpid01 and vpid02 allocation) * even if spawn a lot of nested vCPUs. */ if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); } } else { vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx_flush_tlb(vcpu); } } if (nested_cpu_has_ept(vmcs12)) { kvm_mmu_unload(vcpu); nested_ept_init_mmu_context(vcpu); } if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->guest_ia32_efer; else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); /* * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified * TS bit (for lazy fpu) and bits which we consider mandatory enabled. * The CR0_READ_SHADOW is what L2 should have expected to read given * the specifications by L1; It's not enough to take * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we * have more bits than L1 expected. */ vmx_set_cr0(vcpu, vmcs12->guest_cr0); vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); /* shadow page tables on either EPT or shadow page tables */ kvm_set_cr3(vcpu, vmcs12->guest_cr3); kvm_mmu_reset_context(vcpu); if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; /* * L1 may access the L2's PDPTR, so save them to construct vmcs12 */ if (enable_ept) { vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); } kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); } /* * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 * for running an L2 nested guest. */ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; struct loaded_vmcs *vmcs02; bool ia32e; u32 msr_entry_idx; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) return 1; skip_emulated_instruction(vcpu); vmcs12 = get_vmcs12(vcpu); if (enable_shadow_vmcs) copy_shadow_to_vmcs12(vmx); /* * The nested entry process starts with enforcing various prerequisites * on vmcs12 as required by the Intel SDM, and act appropriately when * they fail: As the SDM explains, some conditions should cause the * instruction to fail, while others will cause the instruction to seem * to succeed, but return an EXIT_REASON_INVALID_STATE. * To speed up the normal (success) code path, we should avoid checking * for misconfigurations which will anyway be caught by the processor * when using the merged vmcs02. */ if (vmcs12->launch_state == launch) { nested_vmx_failValid(vcpu, launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS : VMXERR_VMRESUME_NONLAUNCHED_VMCS); return 1; } if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, vmx->nested.nested_vmx_true_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high) || !vmx_control_verify(vmcs12->secondary_vm_exec_control, vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high) || !vmx_control_verify(vmcs12->pin_based_vm_exec_control, vmx->nested.nested_vmx_pinbased_ctls_low, vmx->nested.nested_vmx_pinbased_ctls_high) || !vmx_control_verify(vmcs12->vm_exit_controls, vmx->nested.nested_vmx_true_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high) || !vmx_control_verify(vmcs12->vm_entry_controls, vmx->nested.nested_vmx_true_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); return 1; } if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) || ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); return 1; } if (vmcs12->vmcs_link_pointer != -1ull) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR); return 1; } /* * If the load IA32_EFER VM-entry control is 1, the following checks * are performed on the field for the IA32_EFER MSR: * - Bits reserved in the IA32_EFER MSR must be 0. * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of * the IA-32e mode guest VM-exit control. It must also be identical * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to * CR0.PG) is 1. */ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) { ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || ((vmcs12->guest_cr0 & X86_CR0_PG) && ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); return 1; } } /* * If the load IA32_EFER VM-exit control is 1, bits reserved in the * IA32_EFER MSR must be 0 in the field for that register. In addition, * the values of the LMA and LME bits in the field must each be that of * the host address-space size VM-exit control. */ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { ia32e = (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); return 1; } } /* * We're finally done with prerequisite checking, and can start with * the nested entry. */ vmcs02 = nested_get_current_vmcs02(vmx); if (!vmcs02) return -ENOMEM; enter_guest_mode(vcpu); vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); cpu = get_cpu(); vmx->loaded_vmcs = vmcs02; vmx_vcpu_put(vcpu); vmx_vcpu_load(vcpu, cpu); vcpu->cpu = cpu; put_cpu(); vmx_segment_cache_clear(vmx); prepare_vmcs02(vcpu, vmcs12); msr_entry_idx = nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_count); if (msr_entry_idx) { leave_guest_mode(vcpu); vmx_load_vmcs01(vcpu); nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); return 1; } vmcs12->launch_state = 1; if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) return kvm_vcpu_halt(vcpu); vmx->nested.nested_run_pending = 1; /* * Note no nested_vmx_succeed or nested_vmx_fail here. At this point * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet * returned as far as L1 is concerned. It will only return (and set * the success flag) when L2 exits (see nested_vmx_vmexit()). */ return 1; } /* * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). * This function returns the new value we should put in vmcs12.guest_cr0. * It's not enough to just return the vmcs02 GUEST_CR0. Rather, * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 * didn't trap the bit, because if L1 did, so would L0). * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have * been modified by L2, and L1 knows it. So just leave the old value of * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 * isn't relevant, because if L0 traps this bit it can set it to anything. * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have * changed these bits, and therefore they need to be updated, but L0 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. */ static inline unsigned long vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { return /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | vcpu->arch.cr0_guest_owned_bits)); } static inline unsigned long vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { return /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | vcpu->arch.cr4_guest_owned_bits)); } static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { u32 idt_vectoring; unsigned int nr; if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) { nr = vcpu->arch.exception.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; if (kvm_exception_is_soft(nr)) { vmcs12->vm_exit_instruction_len = vcpu->arch.event_exit_inst_len; idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; } else idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; if (vcpu->arch.exception.has_error_code) { idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; vmcs12->idt_vectoring_error_code = vcpu->arch.exception.error_code; } vmcs12->idt_vectoring_info_field = idt_vectoring; } else if (vcpu->arch.nmi_injected) { vmcs12->idt_vectoring_info_field = INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; } else if (vcpu->arch.interrupt.pending) { nr = vcpu->arch.interrupt.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; if (vcpu->arch.interrupt.soft) { idt_vectoring |= INTR_TYPE_SOFT_INTR; vmcs12->vm_entry_instruction_len = vcpu->arch.event_exit_inst_len; } else idt_vectoring |= INTR_TYPE_EXT_INTR; vmcs12->idt_vectoring_info_field = idt_vectoring; } } static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && vmx->nested.preemption_timer_expired) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); return 0; } if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { if (vmx->nested.nested_run_pending || vcpu->arch.interrupt.pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, NMI_VECTOR | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK, 0); /* * The NMI-triggered VM exit counts as injection: * clear this one and block further NMIs. */ vcpu->arch.nmi_pending = 0; vmx_set_nmi_mask(vcpu, true); return 0; } if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && nested_exit_on_intr(vcpu)) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); return 0; } return vmx_complete_nested_posted_interrupt(vcpu); } static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) { ktime_t remaining = hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); u64 value; if (ktime_to_ns(remaining) <= 0) return 0; value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; do_div(value, 1000000); return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; } /* * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), * and this function updates it to reflect the changes to the guest state while * L2 was running (and perhaps made some exits which were handled directly by L0 * without going back to L1), and to reflect the exit reason. * Note that we do not have to copy here all VMCS fields, just those that * could have changed by the L2 guest or the exit - i.e., the guest-state and * exit-information fields only. Other fields are modified by L1 with VMWRITE, * which already writes to vmcs12 directly. */ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { /* update guest state fields: */ vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); vmcs12->guest_interruptibility_info = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); vmcs12->guest_pending_dbg_exceptions = vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; else vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; if (nested_cpu_has_preemption_timer(vmcs12)) { if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) vmcs12->vmx_preemption_timer_value = vmx_get_preemption_timer_value(vcpu); hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); } /* * In some cases (usually, nested EPT), L2 is allowed to change its * own CR3 without exiting. If it has changed it, we must keep it. * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. * * Additionally, restore L2's PDPTR to vmcs12. */ if (enable_ept) { vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); } if (nested_cpu_has_vid(vmcs12)) vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); vmcs12->vm_entry_controls = (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); } /* TODO: These cannot have changed unless we have MSR bitmaps and * the relevant bit asks not to trap the change */ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) vmcs12->guest_ia32_efer = vcpu->arch.efer; vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); if (kvm_mpx_supported()) vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); if (nested_cpu_has_xsaves(vmcs12)) vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); /* update exit information fields: */ vmcs12->vm_exit_reason = exit_reason; vmcs12->exit_qualification = exit_qualification; vmcs12->vm_exit_intr_info = exit_intr_info; if ((vmcs12->vm_exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); vmcs12->idt_vectoring_info_field = 0; vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { /* vm_entry_intr_info_field is cleared on exit. Emulate this * instead of reading the real value. */ vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; /* * Transfer the event that L0 or L1 may wanted to inject into * L2 to IDT_VECTORING_INFO_FIELD. */ vmcs12_save_pending_event(vcpu, vmcs12); } /* * Drop what we picked up for L2 via vmx_complete_interrupts. It is * preserved above and would only end up incorrectly in L1. */ vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); } /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified * in vmcs12. * This function is to be called not only on normal nested exit, but also on * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry * Failures During or After Loading Guest State"). * This function should be called when the active VMCS is L1's (vmcs01). */ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); vmx_set_efer(vcpu, vcpu->arch.efer); kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); /* * Note that calling vmx_set_cr0 is important, even if cr0 hasn't * actually changed, because it depends on the current state of * fpu_active (which may have changed). * Note that vmx_set_cr0 refers to efer set above. */ vmx_set_cr0(vcpu, vmcs12->host_cr0); /* * If we did fpu_activate()/fpu_deactivate() during L2's run, we need * to apply the same changes to L1's vmcs. We just set cr0 correctly, * but we also need to update cr0_guest_host_mask and exception_bitmap. */ update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0); vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); /* * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01 * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask(); */ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); kvm_set_cr4(vcpu, vmcs12->host_cr4); nested_ept_uninit_mmu_context(vcpu); kvm_set_cr3(vcpu, vmcs12->host_cr3); kvm_mmu_reset_context(vcpu); if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; if (enable_vpid) { /* * Trivially support vpid by letting L2s share their parent * L1's vpid. TODO: move to a more elaborate solution, giving * each L2 its own vpid and exposing the vpid feature to L1. */ vmx_flush_tlb(vcpu); } vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, 0); if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); vcpu->arch.pat = vmcs12->host_ia32_pat; } if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, vmcs12->host_ia32_perf_global_ctrl); /* Set L1 segment info according to Intel SDM 27.5.2 Loading Host Segment and Descriptor-Table Registers */ seg = (struct kvm_segment) { .base = 0, .limit = 0xFFFFFFFF, .selector = vmcs12->host_cs_selector, .type = 11, .present = 1, .s = 1, .g = 1 }; if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) seg.l = 1; else seg.db = 1; vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); seg = (struct kvm_segment) { .base = 0, .limit = 0xFFFFFFFF, .type = 3, .present = 1, .s = 1, .db = 1, .g = 1 }; seg.selector = vmcs12->host_ds_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); seg.selector = vmcs12->host_es_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); seg.selector = vmcs12->host_ss_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); seg.selector = vmcs12->host_fs_selector; seg.base = vmcs12->host_fs_base; vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); seg.selector = vmcs12->host_gs_selector; seg.base = vmcs12->host_gs_base; vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); seg = (struct kvm_segment) { .base = vmcs12->host_tr_base, .limit = 0x67, .selector = vmcs12->host_tr_selector, .type = 11, .present = 1 }; vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); kvm_set_dr(vcpu, 7, 0x400); vmcs_write64(GUEST_IA32_DEBUGCTL, 0); if (cpu_has_vmx_msr_bitmap()) vmx_set_msr_bitmap(vcpu); if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, vmcs12->vm_exit_msr_load_count)) nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); } /* * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 * and modify vmcs12 to make it see what it would expect to see there if * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) */ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); /* trying to cancel vmlaunch/vmresume is a bug */ WARN_ON_ONCE(vmx->nested.nested_run_pending); leave_guest_mode(vcpu); prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, exit_qualification); if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, vmcs12->vm_exit_msr_store_count)) nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); vmx_load_vmcs01(vcpu); if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) && nested_exit_intr_ack_set(vcpu)) { int irq = kvm_cpu_get_interrupt(vcpu); WARN_ON(irq < 0); vmcs12->vm_exit_intr_info = irq | INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; } trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, vmcs12->exit_qualification, vmcs12->idt_vectoring_info_field, vmcs12->vm_exit_intr_info, vmcs12->vm_exit_intr_error_code, KVM_ISA_VMX); vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); vmx_segment_cache_clear(vmx); /* if no vmcs02 cache requested, remove the one we used */ if (VMCS02_POOL_SIZE == 0) nested_free_vmcs02(vmx, vmx->nested.current_vmptr); load_vmcs12_host_state(vcpu, vmcs12); /* Update TSC_OFFSET if TSC was changed while L2 ran */ vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); /* This is needed for same reason as it was needed in prepare_vmcs02 */ vmx->host_rsp = 0; /* Unpin physical memory we referred to in vmcs02 */ if (vmx->nested.apic_access_page) { nested_release_page(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { nested_release_page(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } if (vmx->nested.pi_desc_page) { kunmap(vmx->nested.pi_desc_page); nested_release_page(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; vmx->nested.pi_desc = NULL; } /* * We are now running in L2, mmu_notifier will force to reload the * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. */ kvm_vcpu_reload_apic_access_page(vcpu); /* * Exiting from L2 to L1, we're now back to L1 which thinks it just * finished a VMLAUNCH or VMRESUME instruction, so we need to set the * success or failure flag accordingly. */ if (unlikely(vmx->fail)) { vmx->fail = 0; nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR)); } else nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) vmx->nested.sync_shadow_vmcs = true; /* in case we halted in L2 */ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } /* * Forcibly leave nested mode in order to be able to reset the VCPU later on. */ static void vmx_leave_nested(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) nested_vmx_vmexit(vcpu, -1, 0, 0); free_nested(to_vmx(vcpu)); } /* * L1's failure to enter L2 is a subset of a normal exit, as explained in * 23.7 "VM-entry failures during or after loading guest state" (this also * lists the acceptable exit-reason and exit-qualification parameters). * It should only be called before L2 actually succeeded to run, and when * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss). */ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification) { load_vmcs12_host_state(vcpu, vmcs12); vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; vmcs12->exit_qualification = qualification; nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) to_vmx(vcpu)->nested.sync_shadow_vmcs = true; } static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return X86EMUL_CONTINUE; } static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) { if (ple_gap) shrink_ple_window(vcpu); } static void vmx_slot_enable_log_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_slot_leaf_clear_dirty(kvm, slot); kvm_mmu_slot_largepage_remove_write_access(kvm, slot); } static void vmx_slot_disable_log_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_slot_set_dirty(kvm, slot); } static void vmx_flush_log_dirty(struct kvm *kvm) { kvm_flush_pml_buffers(kvm); } static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t offset, unsigned long mask) { kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); } /* * This routine does the following things for vCPU which is going * to be blocked if VT-d PI is enabled. * - Store the vCPU to the wakeup list, so when interrupts happen * we can find the right vCPU to wake up. * - Change the Posted-interrupt descriptor as below: * 'NDST' <-- vcpu->pre_pcpu * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR * - If 'ON' is set during this process, which means at least one * interrupt is posted for this vCPU, we cannot block it, in * this case, return 1, otherwise, return 0. * */ static int vmx_pre_block(struct kvm_vcpu *vcpu) { unsigned long flags; unsigned int dest; struct pi_desc old, new; struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP)) return 0; vcpu->pre_pcpu = vcpu->cpu; spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); list_add_tail(&vcpu->blocked_vcpu_list, &per_cpu(blocked_vcpu_on_cpu, vcpu->pre_pcpu)); spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); do { old.control = new.control = pi_desc->control; /* * We should not block the vCPU if * an interrupt is posted for it. */ if (pi_test_on(pi_desc) == 1) { spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); list_del(&vcpu->blocked_vcpu_list); spin_unlock_irqrestore( &per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); vcpu->pre_pcpu = -1; return 1; } WARN((pi_desc->sn == 1), "Warning: SN field of posted-interrupts " "is set before blocking\n"); /* * Since vCPU can be preempted during this process, * vcpu->cpu could be different with pre_pcpu, we * need to set pre_pcpu as the destination of wakeup * notification event, then we can find the right vCPU * to wakeup in wakeup handler if interrupts happen * when the vCPU is in blocked state. */ dest = cpu_physical_id(vcpu->pre_pcpu); if (x2apic_enabled()) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; /* set 'NV' to 'wakeup vector' */ new.nv = POSTED_INTR_WAKEUP_VECTOR; } while (cmpxchg(&pi_desc->control, old.control, new.control) != old.control); return 0; } static void vmx_post_block(struct kvm_vcpu *vcpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); struct pi_desc old, new; unsigned int dest; unsigned long flags; if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP)) return; do { old.control = new.control = pi_desc->control; dest = cpu_physical_id(vcpu->cpu); if (x2apic_enabled()) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; /* Allow posting non-urgent interrupts */ new.sn = 0; /* set 'NV' to 'notification vector' */ new.nv = POSTED_INTR_VECTOR; } while (cmpxchg(&pi_desc->control, old.control, new.control) != old.control); if(vcpu->pre_pcpu != -1) { spin_lock_irqsave( &per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); list_del(&vcpu->blocked_vcpu_list); spin_unlock_irqrestore( &per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); vcpu->pre_pcpu = -1; } } /* * vmx_update_pi_irte - set IRTE for Posted-Interrupts * * @kvm: kvm * @host_irq: host irq of the interrupt * @guest_irq: gsi of the interrupt * @set: set or unset PI * returns 0 on success, < 0 on failure */ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set) { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; struct kvm_lapic_irq irq; struct kvm_vcpu *vcpu; struct vcpu_data vcpu_info; int idx, ret = -EINVAL; if (!kvm_arch_has_assigned_device(kvm) || !irq_remapping_cap(IRQ_POSTING_CAP)) return 0; idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); BUG_ON(guest_irq >= irq_rt->nr_rt_entries); hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { if (e->type != KVM_IRQ_ROUTING_MSI) continue; /* * VT-d PI cannot support posting multicast/broadcast * interrupts to a vCPU, we still use interrupt remapping * for these kind of interrupts. * * For lowest-priority interrupts, we only support * those with single CPU as the destination, e.g. user * configures the interrupts via /proc/irq or uses * irqbalance to make the interrupts single-CPU. * * We will support full lowest-priority interrupt later. */ kvm_set_msi_irq(e, &irq); if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { /* * Make sure the IRTE is in remapped mode if * we don't handle it in posted mode. */ ret = irq_set_vcpu_affinity(host_irq, NULL); if (ret < 0) { printk(KERN_INFO "failed to back to remapped mode, irq: %u\n", host_irq); goto out; } continue; } vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); if (set) ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); else { /* suppress notification event before unposting */ pi_set_sn(vcpu_to_pi_desc(vcpu)); ret = irq_set_vcpu_affinity(host_irq, NULL); pi_clear_sn(vcpu_to_pi_desc(vcpu)); } if (ret < 0) { printk(KERN_INFO "%s: failed to update PI IRTE\n", __func__); goto out; } } ret = 0; out: srcu_read_unlock(&kvm->irq_srcu, idx); return ret; } static struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, .hardware_setup = hardware_setup, .hardware_unsetup = hardware_unsetup, .check_processor_compatibility = vmx_check_processor_compat, .hardware_enable = hardware_enable, .hardware_disable = hardware_disable, .cpu_has_accelerated_tpr = report_flexpriority, .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase, .vcpu_create = vmx_create_vcpu, .vcpu_free = vmx_free_vcpu, .vcpu_reset = vmx_vcpu_reset, .prepare_guest_switch = vmx_save_host_state, .vcpu_load = vmx_vcpu_load, .vcpu_put = vmx_vcpu_put, .update_bp_intercept = update_exception_bitmap, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, .get_segment = vmx_get_segment, .set_segment = vmx_set_segment, .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = vmx_get_cs_db_l_bits, .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, .decache_cr3 = vmx_decache_cr3, .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, .set_cr0 = vmx_set_cr0, .set_cr3 = vmx_set_cr3, .set_cr4 = vmx_set_cr4, .set_efer = vmx_set_efer, .get_idt = vmx_get_idt, .set_idt = vmx_set_idt, .get_gdt = vmx_get_gdt, .set_gdt = vmx_set_gdt, .get_dr6 = vmx_get_dr6, .set_dr6 = vmx_set_dr6, .set_dr7 = vmx_set_dr7, .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, .cache_reg = vmx_cache_reg, .get_rflags = vmx_get_rflags, .set_rflags = vmx_set_rflags, .get_pkru = vmx_get_pkru, .fpu_activate = vmx_fpu_activate, .fpu_deactivate = vmx_fpu_deactivate, .tlb_flush = vmx_flush_tlb, .run = vmx_vcpu_run, .handle_exit = vmx_handle_exit, .skip_emulated_instruction = skip_emulated_instruction, .set_interrupt_shadow = vmx_set_interrupt_shadow, .get_interrupt_shadow = vmx_get_interrupt_shadow, .patch_hypercall = vmx_patch_hypercall, .set_irq = vmx_inject_irq, .set_nmi = vmx_inject_nmi, .queue_exception = vmx_queue_exception, .cancel_injection = vmx_cancel_injection, .interrupt_allowed = vmx_interrupt_allowed, .nmi_allowed = vmx_nmi_allowed, .get_nmi_mask = vmx_get_nmi_mask, .set_nmi_mask = vmx_set_nmi_mask, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .get_enable_apicv = vmx_get_enable_apicv, .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, .load_eoi_exitmap = vmx_load_eoi_exitmap, .hwapic_irr_update = vmx_hwapic_irr_update, .hwapic_isr_update = vmx_hwapic_isr_update, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, .get_mt_mask = vmx_get_mt_mask, .get_exit_info = vmx_get_exit_info, .get_lpage_level = vmx_get_lpage_level, .cpuid_update = vmx_cpuid_update, .rdtscp_supported = vmx_rdtscp_supported, .invpcid_supported = vmx_invpcid_supported, .set_supported_cpuid = vmx_set_supported_cpuid, .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, .read_tsc_offset = vmx_read_tsc_offset, .write_tsc_offset = vmx_write_tsc_offset, .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest, .read_l1_tsc = vmx_read_l1_tsc, .set_tdp_cr3 = vmx_set_cr3, .check_intercept = vmx_check_intercept, .handle_external_intr = vmx_handle_external_intr, .mpx_supported = vmx_mpx_supported, .xsaves_supported = vmx_xsaves_supported, .check_nested_events = vmx_check_nested_events, .sched_in = vmx_sched_in, .slot_enable_log_dirty = vmx_slot_enable_log_dirty, .slot_disable_log_dirty = vmx_slot_disable_log_dirty, .flush_log_dirty = vmx_flush_log_dirty, .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, .pre_block = vmx_pre_block, .post_block = vmx_post_block, .pmu_ops = &intel_pmu_ops, .update_pi_irte = vmx_update_pi_irte, }; static int __init vmx_init(void) { int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx), THIS_MODULE); if (r) return r; #ifdef CONFIG_KEXEC_CORE rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); #endif return 0; } static void __exit vmx_exit(void) { #ifdef CONFIG_KEXEC_CORE RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); synchronize_rcu(); #endif kvm_exit(); } module_init(vmx_init) module_exit(vmx_exit)
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5040_0
crossvul-cpp_data_good_1661_2
/* * chfn.c -- change your finger information * (c) 1994 by salvatore valente <svalente@athena.mit.edu> * (c) 2012 by Cody Maloney <cmaloney@theoreticalchaos.com> * * this program is free software. you can redistribute it and * modify it under the terms of the gnu general public license. * there is no warranty. * * $Author: aebr $ * $Revision: 1.18 $ * $Date: 1998/06/11 22:30:11 $ * * Updated Thu Oct 12 09:19:26 1995 by faith@cs.unc.edu with security * patches from Zefram <A.Main@dcs.warwick.ac.uk> * * Hacked by Peter Breitenlohner, peb@mppmu.mpg.de, * to remove trailing empty fields. Oct 5, 96. * * 1999-02-22 Arkadiusz Miśkiewicz <misiek@pld.ORG.PL> * - added Native Language Support */ #include <ctype.h> #include <errno.h> #include <getopt.h> #include <pwd.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <unistd.h> #include "c.h" #include "env.h" #include "closestream.h" #include "islocal.h" #include "nls.h" #include "setpwnam.h" #include "strutils.h" #include "xalloc.h" #include "logindefs.h" #include "ch-common.h" #ifdef HAVE_LIBSELINUX # include <selinux/selinux.h> # include <selinux/av_permissions.h> # include "selinux_utils.h" #endif #ifdef HAVE_LIBUSER # include <libuser/user.h> # include "libuser.h" #elif CHFN_CHSH_PASSWORD # include "auth.h" #endif struct finfo { char *full_name; char *office; char *office_phone; char *home_phone; char *other; }; struct chfn_control { struct passwd *pw; char *username; /* "oldf" Contains the users original finger information. * "newf" Contains the changed finger information, and contains * NULL in fields that haven't been changed. * In the end, "newf" is folded into "oldf". */ struct finfo oldf, newf; unsigned int allow_fullname:1, /* The login.defs restriction */ allow_room:1, /* see: man login.defs(5) */ allow_work:1, /* and look for CHFN_RESTRICT */ allow_home:1, /* keyword for these four. */ changed:1, /* is change requested */ interactive:1; /* whether to prompt for fields or not */ }; /* we do not accept gecos field sizes longer than MAX_FIELD_SIZE */ #define MAX_FIELD_SIZE 256 static void __attribute__((__noreturn__)) usage(FILE *fp) { fputs(USAGE_HEADER, fp); fprintf(fp, _(" %s [options] [<username>]\n"), program_invocation_short_name); fputs(USAGE_SEPARATOR, fp); fputs(_("Change your finger information.\n"), fp); fputs(USAGE_OPTIONS, fp); fputs(_(" -f, --full-name <full-name> real name\n"), fp); fputs(_(" -o, --office <office> office number\n"), fp); fputs(_(" -p, --office-phone <phone> office phone number\n"), fp); fputs(_(" -h, --home-phone <phone> home phone number\n"), fp); fputs(USAGE_SEPARATOR, fp); fputs(_(" -u, --help display this help and exit\n"), fp); fputs(_(" -v, --version output version information and exit\n"), fp); fprintf(fp, USAGE_MAN_TAIL("chfn(1)")); exit(fp == stderr ? EXIT_FAILURE : EXIT_SUCCESS); } /* * check_gecos_string () -- * check that the given gecos string is legal. if it's not legal, * output "msg" followed by a description of the problem, and return (-1). */ static int check_gecos_string(const char *msg, char *gecos) { const size_t len = strlen(gecos); if (MAX_FIELD_SIZE < len) { warnx(_("field %s is too long"), msg); return -1; } if (illegal_passwd_chars(gecos)) { warnx(_("%s: has illegal characters"), gecos); return -1; } return 0; } /* * parse_argv () -- * parse the command line arguments. * returns true if no information beyond the username was given. */ static void parse_argv(struct chfn_control *ctl, int argc, char **argv) { int index, c, status = 0; static const struct option long_options[] = { {"full-name", required_argument, 0, 'f'}, {"office", required_argument, 0, 'o'}, {"office-phone", required_argument, 0, 'p'}, {"home-phone", required_argument, 0, 'h'}, {"help", no_argument, 0, 'u'}, {"version", no_argument, 0, 'v'}, {NULL, no_argument, 0, '0'}, }; while ((c = getopt_long(argc, argv, "f:r:p:h:o:uv", long_options, &index)) != -1) { switch (c) { case 'f': if (!ctl->allow_fullname) errx(EXIT_FAILURE, _("login.defs forbids setting %s"), _("Name")); ctl->newf.full_name = optarg; status += check_gecos_string(_("Name"), optarg); break; case 'o': if (!ctl->allow_room) errx(EXIT_FAILURE, _("login.defs forbids setting %s"), _("Office")); ctl->newf.office = optarg; status += check_gecos_string(_("Office"), optarg); break; case 'p': if (!ctl->allow_work) errx(EXIT_FAILURE, _("login.defs forbids setting %s"), _("Office Phone")); ctl->newf.office_phone = optarg; status += check_gecos_string(_("Office Phone"), optarg); break; case 'h': if (!ctl->allow_home) errx(EXIT_FAILURE, _("login.defs forbids setting %s"), _("Home Phone")); ctl->newf.home_phone = optarg; status += check_gecos_string(_("Home Phone"), optarg); break; case 'v': printf(UTIL_LINUX_VERSION); exit(EXIT_SUCCESS); case 'u': usage(stdout); default: usage(stderr); } ctl->changed = 1; ctl->interactive = 0; } if (status != 0) exit(EXIT_FAILURE); /* done parsing arguments. check for a username. */ if (optind < argc) { if (optind + 1 < argc) usage(stderr); ctl->username = argv[optind]; } return; } /* * parse_passwd () -- * take a struct password and fill in the fields of the struct finfo. */ static void parse_passwd(struct chfn_control *ctl) { char *gecos; if (!ctl->pw) return; /* use pw_gecos - we take a copy since PAM destroys the original */ gecos = xstrdup(ctl->pw->pw_gecos); /* extract known fields */ ctl->oldf.full_name = strsep(&gecos, ","); ctl->oldf.office = strsep(&gecos, ","); ctl->oldf.office_phone = strsep(&gecos, ","); ctl->oldf.home_phone = strsep(&gecos, ","); /* extra fields contain site-specific information, and can * not be changed by this version of chfn. */ ctl->oldf.other = strsep(&gecos, ","); } /* * ask_new_field () -- * ask the user for a given field and check that the string is legal. */ static char *ask_new_field(struct chfn_control *ctl, const char *question, char *def_val) { int len; char *ans; char buf[MAX_FIELD_SIZE + 2]; if (!def_val) def_val = ""; while (true) { printf("%s [%s]: ", question, def_val); __fpurge(stdin); if (fgets(buf, sizeof(buf), stdin) == NULL) errx(EXIT_FAILURE, _("Aborted.")); ans = buf; /* remove white spaces from string end */ ltrim_whitespace((unsigned char *) ans); len = rtrim_whitespace((unsigned char *) ans); if (len == 0) return xstrdup(def_val); if (!strcasecmp(ans, "none")) { ctl->changed = 1; return xstrdup(""); } if (check_gecos_string(question, ans) >= 0) break; } ctl->changed = 1; return xstrdup(ans); } /* * get_login_defs() * find /etc/login.defs CHFN_RESTRICT and save restrictions to run time */ static void get_login_defs(struct chfn_control *ctl) { const char *s; size_t i; int broken = 0; /* real root does not have restrictions */ if (geteuid() == getuid() && getuid() == 0) { ctl->allow_fullname = ctl->allow_room = ctl->allow_work = ctl->allow_home = 1; return; } s = getlogindefs_str("CHFN_RESTRICT", ""); if (!strcmp(s, "yes")) { ctl->allow_room = ctl->allow_work = ctl->allow_home = 1; return; } if (!strcmp(s, "no")) { ctl->allow_fullname = ctl->allow_room = ctl->allow_work = ctl->allow_home = 1; return; } for (i = 0; s[i]; i++) { switch (s[i]) { case 'f': ctl->allow_fullname = 1; break; case 'r': ctl->allow_room = 1; break; case 'w': ctl->allow_work = 1; break; case 'h': ctl->allow_home = 1; break; default: broken = 1; } } if (broken) warnx(_("%s: CHFN_RESTRICT has unexpected value: %s"), _PATH_LOGINDEFS, s); if (!ctl->allow_fullname && !ctl->allow_room && !ctl->allow_work && !ctl->allow_home) errx(EXIT_FAILURE, _("%s: CHFN_RESTRICT does not allow any changes"), _PATH_LOGINDEFS); return; } /* * ask_info () -- * prompt the user for the finger information and store it. */ static void ask_info(struct chfn_control *ctl) { if (ctl->allow_fullname) ctl->newf.full_name = ask_new_field(ctl, _("Name"), ctl->oldf.full_name); if (ctl->allow_room) ctl->newf.office = ask_new_field(ctl, _("Office"), ctl->oldf.office); if (ctl->allow_work) ctl->newf.office_phone = ask_new_field(ctl, _("Office Phone"), ctl->oldf.office_phone); if (ctl->allow_home) ctl->newf.home_phone = ask_new_field(ctl, _("Home Phone"), ctl->oldf.home_phone); putchar('\n'); } /* * find_field () -- * find field value in uninteractive mode; can be new, old, or blank */ static char *find_field(char *nf, char *of) { if (nf) return nf; if (of) return of; return xstrdup(""); } /* * add_missing () -- * add not supplied field values when in uninteractive mode */ static void add_missing(struct chfn_control *ctl) { ctl->newf.full_name = find_field(ctl->newf.full_name, ctl->oldf.full_name); ctl->newf.office = find_field(ctl->newf.office, ctl->oldf.office); ctl->newf.office_phone = find_field(ctl->newf.office_phone, ctl->oldf.office_phone); ctl->newf.home_phone = find_field(ctl->newf.home_phone, ctl->oldf.home_phone); ctl->newf.other = find_field(ctl->newf.other, ctl->oldf.other); printf("\n"); } /* * save_new_data () -- * save the given finger info in /etc/passwd. * return zero on success. */ static int save_new_data(struct chfn_control *ctl) { char *gecos; int len; /* create the new gecos string */ len = xasprintf(&gecos, "%s,%s,%s,%s,%s", ctl->newf.full_name, ctl->newf.office, ctl->newf.office_phone, ctl->newf.home_phone, ctl->newf.other); /* remove trailing empty fields (but not subfields of ctl->newf.other) */ if (!ctl->newf.other) { while (len > 0 && gecos[len - 1] == ',') len--; gecos[len] = 0; } #ifdef HAVE_LIBUSER if (set_value_libuser("chfn", ctl->username, ctl->pw->pw_uid, LU_GECOS, gecos) < 0) { #else /* HAVE_LIBUSER */ /* write the new struct passwd to the passwd file. */ ctl->pw->pw_gecos = gecos; if (setpwnam(ctl->pw, ".chfn") < 0) { warn("setpwnam failed"); #endif printf(_ ("Finger information *NOT* changed. Try again later.\n")); return -1; } free(gecos); printf(_("Finger information changed.\n")); return 0; } int main(int argc, char **argv) { uid_t uid; struct chfn_control ctl = { .interactive = 1 }; sanitize_env(); setlocale(LC_ALL, ""); /* both for messages and for iscntrl() below */ bindtextdomain(PACKAGE, LOCALEDIR); textdomain(PACKAGE); atexit(close_stdout); uid = getuid(); /* check /etc/login.defs CHFN_RESTRICT */ get_login_defs(&ctl); parse_argv(&ctl, argc, argv); if (!ctl.username) { ctl.pw = getpwuid(uid); if (!ctl.pw) errx(EXIT_FAILURE, _("you (user %d) don't exist."), uid); ctl.username = ctl.pw->pw_name; } else { ctl.pw = getpwnam(ctl.username); if (!ctl.pw) errx(EXIT_FAILURE, _("user \"%s\" does not exist."), ctl.username); } parse_passwd(&ctl); #ifndef HAVE_LIBUSER if (!(is_local(ctl.username))) errx(EXIT_FAILURE, _("can only change local entries")); #endif #ifdef HAVE_LIBSELINUX if (is_selinux_enabled() > 0) { if (uid == 0) { if (checkAccess(ctl.username, PASSWD__CHFN) != 0) { security_context_t user_context; if (getprevcon(&user_context) < 0) user_context = NULL; errx(EXIT_FAILURE, _("%s is not authorized to change " "the finger info of %s"), user_context ? : _("Unknown user context"), ctl.username); } } if (setupDefaultContext(_PATH_PASSWD)) errx(EXIT_FAILURE, _("can't set default context for %s"), _PATH_PASSWD); } #endif #ifdef HAVE_LIBUSER /* If we're setuid and not really root, disallow the password change. */ if (geteuid() != getuid() && uid != ctl.pw->pw_uid) { #else if (uid != 0 && uid != ctl.pw->pw_uid) { #endif errno = EACCES; err(EXIT_FAILURE, _("running UID doesn't match UID of user we're " "altering, change denied")); } printf(_("Changing finger information for %s.\n"), ctl.username); #if !defined(HAVE_LIBUSER) && defined(CHFN_CHSH_PASSWORD) if (!auth_pam("chfn", uid, ctl.username)) { return EXIT_FAILURE; } #endif if (ctl.interactive) ask_info(&ctl); add_missing(&ctl); if (!ctl.changed) { printf(_("Finger information not changed.\n")); return EXIT_SUCCESS; } return save_new_data(&ctl) == 0 ? EXIT_SUCCESS : EXIT_FAILURE; }
./CrossVul/dataset_final_sorted/CWE-264/c/good_1661_2
crossvul-cpp_data_good_3454_1
/* * fs/cifs/connect.c * * Copyright (C) International Business Machines Corp., 2002,2009 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/net.h> #include <linux/string.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/ctype.h> #include <linux/utsname.h> #include <linux/mempool.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/kthread.h> #include <linux/pagevec.h> #include <linux/freezer.h> #include <linux/namei.h> #include <asm/uaccess.h> #include <asm/processor.h> #include <linux/inet.h> #include <net/ipv6.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "ntlmssp.h" #include "nterr.h" #include "rfc1002pdu.h" #include "cn_cifs.h" #include "fscache.h" #define CIFS_PORT 445 #define RFC1001_PORT 139 extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24); extern mempool_t *cifs_req_poolp; struct smb_vol { char *username; char *password; char *domainname; char *UNC; char *UNCip; char *iocharset; /* local code page for mapping to and from Unicode */ char source_rfc1001_name[16]; /* netbios name of client */ char target_rfc1001_name[16]; /* netbios name of server for Win9x/ME */ uid_t linux_uid; gid_t linux_gid; mode_t file_mode; mode_t dir_mode; unsigned secFlg; bool retry:1; bool intr:1; bool setuids:1; bool override_uid:1; bool override_gid:1; bool dynperm:1; bool noperm:1; bool no_psx_acl:1; /* set if posix acl support should be disabled */ bool cifs_acl:1; bool no_xattr:1; /* set if xattr (EA) support should be disabled*/ bool server_ino:1; /* use inode numbers from server ie UniqueId */ bool direct_io:1; bool remap:1; /* set to remap seven reserved chars in filenames */ bool posix_paths:1; /* unset to not ask for posix pathnames. */ bool no_linux_ext:1; bool sfu_emul:1; bool nullauth:1; /* attempt to authenticate with null user */ bool nocase:1; /* request case insensitive filenames */ bool nobrl:1; /* disable sending byte range locks to srv */ bool mand_lock:1; /* send mandatory not posix byte range lock reqs */ bool seal:1; /* request transport encryption on share */ bool nodfs:1; /* Do not request DFS, even if available */ bool local_lease:1; /* check leases only on local system, not remote */ bool noblocksnd:1; bool noautotune:1; bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ unsigned int rsize; unsigned int wsize; bool sockopt_tcp_nodelay:1; unsigned short int port; char *prepath; struct nls_table *local_nls; }; static int ipv4_connect(struct TCP_Server_Info *server); static int ipv6_connect(struct TCP_Server_Info *server); /* * cifs tcp session reconnection * * mark tcp session as reconnecting so temporarily locked * mark all smb sessions as reconnecting for tcp session * reconnect tcp session * wake up waiters on reconnection? - (not needed currently) */ static int cifs_reconnect(struct TCP_Server_Info *server) { int rc = 0; struct list_head *tmp, *tmp2; struct cifsSesInfo *ses; struct cifsTconInfo *tcon; struct mid_q_entry *mid_entry; spin_lock(&GlobalMid_Lock); if (server->tcpStatus == CifsExiting) { /* the demux thread will exit normally next time through the loop */ spin_unlock(&GlobalMid_Lock); return rc; } else server->tcpStatus = CifsNeedReconnect; spin_unlock(&GlobalMid_Lock); server->maxBuf = 0; cFYI(1, "Reconnecting tcp session"); /* before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they are not used until reconnected */ read_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); ses->need_reconnect = true; ses->ipc_tid = 0; list_for_each(tmp2, &ses->tcon_list) { tcon = list_entry(tmp2, struct cifsTconInfo, tcon_list); tcon->need_reconnect = true; } } read_unlock(&cifs_tcp_ses_lock); /* do not want to be sending data on a socket we are freeing */ mutex_lock(&server->srv_mutex); if (server->ssocket) { cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state, server->ssocket->flags); kernel_sock_shutdown(server->ssocket, SHUT_WR); cFYI(1, "Post shutdown state: 0x%x Flags: 0x%lx", server->ssocket->state, server->ssocket->flags); sock_release(server->ssocket); server->ssocket = NULL; } spin_lock(&GlobalMid_Lock); list_for_each(tmp, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); if (mid_entry->midState == MID_REQUEST_SUBMITTED) { /* Mark other intransit requests as needing retry so we do not immediately mark the session bad again (ie after we reconnect below) as they timeout too */ mid_entry->midState = MID_RETRY_NEEDED; } } spin_unlock(&GlobalMid_Lock); mutex_unlock(&server->srv_mutex); while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) { try_to_freeze(); if (server->addr.sockAddr6.sin6_family == AF_INET6) rc = ipv6_connect(server); else rc = ipv4_connect(server); if (rc) { cFYI(1, "reconnect error %d", rc); msleep(3000); } else { atomic_inc(&tcpSesReconnectCount); spin_lock(&GlobalMid_Lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsGood; server->sequence_number = 0; spin_unlock(&GlobalMid_Lock); /* atomic_set(&server->inFlight,0);*/ wake_up(&server->response_q); } } return rc; } /* return codes: 0 not a transact2, or all data present >0 transact2 with that much data missing -EINVAL = invalid transact2 */ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) { struct smb_t2_rsp *pSMBt; int total_data_size; int data_in_this_rsp; int remaining; if (pSMB->Command != SMB_COM_TRANSACTION2) return 0; /* check for plausible wct, bcc and t2 data and parm sizes */ /* check for parm and data offset going beyond end of smb */ if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */ cFYI(1, "invalid transact2 word count"); return -EINVAL; } pSMBt = (struct smb_t2_rsp *)pSMB; total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount); data_in_this_rsp = le16_to_cpu(pSMBt->t2_rsp.DataCount); remaining = total_data_size - data_in_this_rsp; if (remaining == 0) return 0; else if (remaining < 0) { cFYI(1, "total data %d smaller than data in frame %d", total_data_size, data_in_this_rsp); return -EINVAL; } else { cFYI(1, "missing %d bytes from transact2, check next response", remaining); if (total_data_size > maxBufSize) { cERROR(1, "TotalDataSize %d is over maximum buffer %d", total_data_size, maxBufSize); return -EINVAL; } return remaining; } } static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) { struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond; struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB; int total_data_size; int total_in_buf; int remaining; int total_in_buf2; char *data_area_of_target; char *data_area_of_buf2; __u16 byte_count; total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount); if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) { cFYI(1, "total data size of primary and secondary t2 differ"); } total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount); remaining = total_data_size - total_in_buf; if (remaining < 0) return -EINVAL; if (remaining == 0) /* nothing to do, ignore */ return 0; total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount); if (remaining < total_in_buf2) { cFYI(1, "transact2 2nd response contains too much data"); } /* find end of first SMB data area */ data_area_of_target = (char *)&pSMBt->hdr.Protocol + le16_to_cpu(pSMBt->t2_rsp.DataOffset); /* validate target area */ data_area_of_buf2 = (char *) &pSMB2->hdr.Protocol + le16_to_cpu(pSMB2->t2_rsp.DataOffset); data_area_of_target += total_in_buf; /* copy second buffer into end of first buffer */ memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); total_in_buf += total_in_buf2; pSMBt->t2_rsp.DataCount = cpu_to_le16(total_in_buf); byte_count = le16_to_cpu(BCC_LE(pTargetSMB)); byte_count += total_in_buf2; BCC_LE(pTargetSMB) = cpu_to_le16(byte_count); byte_count = pTargetSMB->smb_buf_length; byte_count += total_in_buf2; /* BB also add check that we are not beyond maximum buffer size */ pTargetSMB->smb_buf_length = byte_count; if (remaining == total_in_buf2) { cFYI(1, "found the last secondary response"); return 0; /* we are done */ } else /* more responses to go */ return 1; } static int cifs_demultiplex_thread(struct TCP_Server_Info *server) { int length; unsigned int pdu_length, total_read; struct smb_hdr *smb_buffer = NULL; struct smb_hdr *bigbuf = NULL; struct smb_hdr *smallbuf = NULL; struct msghdr smb_msg; struct kvec iov; struct socket *csocket = server->ssocket; struct list_head *tmp; struct cifsSesInfo *ses; struct task_struct *task_to_wake = NULL; struct mid_q_entry *mid_entry; char temp; bool isLargeBuf = false; bool isMultiRsp; int reconnect; current->flags |= PF_MEMALLOC; cFYI(1, "Demultiplex PID: %d", task_pid_nr(current)); length = atomic_inc_return(&tcpSesAllocCount); if (length > 1) mempool_resize(cifs_req_poolp, length + cifs_min_rcv, GFP_KERNEL); set_freezable(); while (server->tcpStatus != CifsExiting) { if (try_to_freeze()) continue; if (bigbuf == NULL) { bigbuf = cifs_buf_get(); if (!bigbuf) { cERROR(1, "No memory for large SMB response"); msleep(3000); /* retry will check if exiting */ continue; } } else if (isLargeBuf) { /* we are reusing a dirty large buf, clear its start */ memset(bigbuf, 0, sizeof(struct smb_hdr)); } if (smallbuf == NULL) { smallbuf = cifs_small_buf_get(); if (!smallbuf) { cERROR(1, "No memory for SMB response"); msleep(1000); /* retry will check if exiting */ continue; } /* beginning of smb buffer is cleared in our buf_get */ } else /* if existing small buf clear beginning */ memset(smallbuf, 0, sizeof(struct smb_hdr)); isLargeBuf = false; isMultiRsp = false; smb_buffer = smallbuf; iov.iov_base = smb_buffer; iov.iov_len = 4; smb_msg.msg_control = NULL; smb_msg.msg_controllen = 0; pdu_length = 4; /* enough to get RFC1001 header */ incomplete_rcv: length = kernel_recvmsg(csocket, &smb_msg, &iov, 1, pdu_length, 0 /* BB other flags? */); if (server->tcpStatus == CifsExiting) { break; } else if (server->tcpStatus == CifsNeedReconnect) { cFYI(1, "Reconnect after server stopped responding"); cifs_reconnect(server); cFYI(1, "call to reconnect done"); csocket = server->ssocket; continue; } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { msleep(1); /* minimum sleep to prevent looping allowing socket to clear and app threads to set tcpStatus CifsNeedReconnect if server hung */ if (pdu_length < 4) { iov.iov_base = (4 - pdu_length) + (char *)smb_buffer; iov.iov_len = pdu_length; smb_msg.msg_control = NULL; smb_msg.msg_controllen = 0; goto incomplete_rcv; } else continue; } else if (length <= 0) { if (server->tcpStatus == CifsNew) { cFYI(1, "tcp session abend after SMBnegprot"); /* some servers kill the TCP session rather than returning an SMB negprot error, in which case reconnecting here is not going to help, and so simply return error to mount */ break; } if (!try_to_freeze() && (length == -EINTR)) { cFYI(1, "cifsd thread killed"); break; } cFYI(1, "Reconnect after unexpected peek error %d", length); cifs_reconnect(server); csocket = server->ssocket; wake_up(&server->response_q); continue; } else if (length < pdu_length) { cFYI(1, "requested %d bytes but only got %d bytes", pdu_length, length); pdu_length -= length; msleep(1); goto incomplete_rcv; } /* The right amount was read from socket - 4 bytes */ /* so we can now interpret the length field */ /* the first byte big endian of the length field, is actually not part of the length but the type with the most common, zero, as regular data */ temp = *((char *) smb_buffer); /* Note that FC 1001 length is big endian on the wire, but we convert it here so it is always manipulated as host byte order */ pdu_length = be32_to_cpu((__force __be32)smb_buffer->smb_buf_length); smb_buffer->smb_buf_length = pdu_length; cFYI(1, "rfc1002 length 0x%x", pdu_length+4); if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) { continue; } else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) { cFYI(1, "Good RFC 1002 session rsp"); continue; } else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) { /* we get this from Windows 98 instead of an error on SMB negprot response */ cFYI(1, "Negative RFC1002 Session Response Error 0x%x)", pdu_length); if (server->tcpStatus == CifsNew) { /* if nack on negprot (rather than ret of smb negprot error) reconnecting not going to help, ret error to mount */ break; } else { /* give server a second to clean up before reconnect attempt */ msleep(1000); /* always try 445 first on reconnect since we get NACK on some if we ever connected to port 139 (the NACK is since we do not begin with RFC1001 session initialize frame) */ server->addr.sockAddr.sin_port = htons(CIFS_PORT); cifs_reconnect(server); csocket = server->ssocket; wake_up(&server->response_q); continue; } } else if (temp != (char) 0) { cERROR(1, "Unknown RFC 1002 frame"); cifs_dump_mem(" Received Data: ", (char *)smb_buffer, length); cifs_reconnect(server); csocket = server->ssocket; continue; } /* else we have an SMB response */ if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) || (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) { cERROR(1, "Invalid size SMB length %d pdu_length %d", length, pdu_length+4); cifs_reconnect(server); csocket = server->ssocket; wake_up(&server->response_q); continue; } /* else length ok */ reconnect = 0; if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { isLargeBuf = true; memcpy(bigbuf, smallbuf, 4); smb_buffer = bigbuf; } length = 0; iov.iov_base = 4 + (char *)smb_buffer; iov.iov_len = pdu_length; for (total_read = 0; total_read < pdu_length; total_read += length) { length = kernel_recvmsg(csocket, &smb_msg, &iov, 1, pdu_length - total_read, 0); if ((server->tcpStatus == CifsExiting) || (length == -EINTR)) { /* then will exit */ reconnect = 2; break; } else if (server->tcpStatus == CifsNeedReconnect) { cifs_reconnect(server); csocket = server->ssocket; /* Reconnect wakes up rspns q */ /* Now we will reread sock */ reconnect = 1; break; } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { msleep(1); /* minimum sleep to prevent looping, allowing socket to clear and app threads to set tcpStatus CifsNeedReconnect if server hung*/ length = 0; continue; } else if (length <= 0) { cERROR(1, "Received no data, expecting %d", pdu_length - total_read); cifs_reconnect(server); csocket = server->ssocket; reconnect = 1; break; } } if (reconnect == 2) break; else if (reconnect == 1) continue; length += 4; /* account for rfc1002 hdr */ dump_smb(smb_buffer, length); if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) { cifs_dump_mem("Bad SMB: ", smb_buffer, 48); continue; } task_to_wake = NULL; spin_lock(&GlobalMid_Lock); list_for_each(tmp, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); if ((mid_entry->mid == smb_buffer->Mid) && (mid_entry->midState == MID_REQUEST_SUBMITTED) && (mid_entry->command == smb_buffer->Command)) { if (check2ndT2(smb_buffer,server->maxBuf) > 0) { /* We have a multipart transact2 resp */ isMultiRsp = true; if (mid_entry->resp_buf) { /* merge response - fix up 1st*/ if (coalesce_t2(smb_buffer, mid_entry->resp_buf)) { mid_entry->multiRsp = true; break; } else { /* all parts received */ mid_entry->multiEnd = true; goto multi_t2_fnd; } } else { if (!isLargeBuf) { cERROR(1, "1st trans2 resp needs bigbuf"); /* BB maybe we can fix this up, switch to already allocated large buffer? */ } else { /* Have first buffer */ mid_entry->resp_buf = smb_buffer; mid_entry->largeBuf = true; bigbuf = NULL; } } break; } mid_entry->resp_buf = smb_buffer; mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: task_to_wake = mid_entry->tsk; mid_entry->midState = MID_RESPONSE_RECEIVED; #ifdef CONFIG_CIFS_STATS2 mid_entry->when_received = jiffies; #endif /* so we do not time out requests to server which is still responding (since server could be busy but not dead) */ server->lstrp = jiffies; break; } } spin_unlock(&GlobalMid_Lock); if (task_to_wake) { /* Was previous buf put in mpx struct for multi-rsp? */ if (!isMultiRsp) { /* smb buffer will be freed by user thread */ if (isLargeBuf) bigbuf = NULL; else smallbuf = NULL; } wake_up_process(task_to_wake); } else if (!is_valid_oplock_break(smb_buffer, server) && !isMultiRsp) { cERROR(1, "No task to wake, unknown frame received! " "NumMids %d", midCount.counter); cifs_dump_mem("Received Data is: ", (char *)smb_buffer, sizeof(struct smb_hdr)); #ifdef CONFIG_CIFS_DEBUG2 cifs_dump_detail(smb_buffer); cifs_dump_mids(server); #endif /* CIFS_DEBUG2 */ } } /* end while !EXITING */ /* take it off the list, if it's not already */ write_lock(&cifs_tcp_ses_lock); list_del_init(&server->tcp_ses_list); write_unlock(&cifs_tcp_ses_lock); spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); wake_up_all(&server->response_q); /* check if we have blocked requests that need to free */ /* Note that cifs_max_pending is normally 50, but can be set at module install time to as little as two */ spin_lock(&GlobalMid_Lock); if (atomic_read(&server->inFlight) >= cifs_max_pending) atomic_set(&server->inFlight, cifs_max_pending - 1); /* We do not want to set the max_pending too low or we could end up with the counter going negative */ spin_unlock(&GlobalMid_Lock); /* Although there should not be any requests blocked on this queue it can not hurt to be paranoid and try to wake up requests that may haven been blocked when more than 50 at time were on the wire to the same server - they now will see the session is in exit state and get out of SendReceive. */ wake_up_all(&server->request_q); /* give those requests time to exit */ msleep(125); if (server->ssocket) { sock_release(csocket); server->ssocket = NULL; } /* buffer usuallly freed in free_mid - need to free it here on exit */ cifs_buf_release(bigbuf); if (smallbuf) /* no sense logging a debug message if NULL */ cifs_small_buf_release(smallbuf); /* * BB: we shouldn't have to do any of this. It shouldn't be * possible to exit from the thread with active SMB sessions */ read_lock(&cifs_tcp_ses_lock); if (list_empty(&server->pending_mid_q)) { /* loop through server session structures attached to this and mark them dead */ list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); ses->status = CifsExiting; ses->server = NULL; } read_unlock(&cifs_tcp_ses_lock); } else { /* although we can not zero the server struct pointer yet, since there are active requests which may depnd on them, mark the corresponding SMB sessions as exiting too */ list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); ses->status = CifsExiting; } spin_lock(&GlobalMid_Lock); list_for_each(tmp, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); if (mid_entry->midState == MID_REQUEST_SUBMITTED) { cFYI(1, "Clearing Mid 0x%x - waking up ", mid_entry->mid); task_to_wake = mid_entry->tsk; if (task_to_wake) wake_up_process(task_to_wake); } } spin_unlock(&GlobalMid_Lock); read_unlock(&cifs_tcp_ses_lock); /* 1/8th of sec is more than enough time for them to exit */ msleep(125); } if (!list_empty(&server->pending_mid_q)) { /* mpx threads have not exited yet give them at least the smb send timeout time for long ops */ /* due to delays on oplock break requests, we need to wait at least 45 seconds before giving up on a request getting a response and going ahead and killing cifsd */ cFYI(1, "Wait for exit from demultiplex thread"); msleep(46000); /* if threads still have not exited they are probably never coming home not much else we can do but free the memory */ } /* last chance to mark ses pointers invalid if there are any pointing to this (e.g if a crazy root user tried to kill cifsd kernel thread explicitly this might happen) */ /* BB: This shouldn't be necessary, see above */ read_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); ses->server = NULL; } read_unlock(&cifs_tcp_ses_lock); kfree(server->hostname); task_to_wake = xchg(&server->tsk, NULL); kfree(server); length = atomic_dec_return(&tcpSesAllocCount); if (length > 0) mempool_resize(cifs_req_poolp, length + cifs_min_rcv, GFP_KERNEL); /* if server->tsk was NULL then wait for a signal before exiting */ if (!task_to_wake) { set_current_state(TASK_INTERRUPTIBLE); while (!signal_pending(current)) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); } module_put_and_exit(0); } /* extract the host portion of the UNC string */ static char * extract_hostname(const char *unc) { const char *src; char *dst, *delim; unsigned int len; /* skip double chars at beginning of string */ /* BB: check validity of these bytes? */ src = unc + 2; /* delimiter between hostname and sharename is always '\\' now */ delim = strchr(src, '\\'); if (!delim) return ERR_PTR(-EINVAL); len = delim - src; dst = kmalloc((len + 1), GFP_KERNEL); if (dst == NULL) return ERR_PTR(-ENOMEM); memcpy(dst, src, len); dst[len] = '\0'; return dst; } static int cifs_parse_mount_options(char *options, const char *devname, struct smb_vol *vol) { char *value; char *data; unsigned int temp_len, i, j; char separator[2]; short int override_uid = -1; short int override_gid = -1; bool uid_specified = false; bool gid_specified = false; separator[0] = ','; separator[1] = 0; if (Local_System_Name[0] != 0) memcpy(vol->source_rfc1001_name, Local_System_Name, 15); else { char *nodename = utsname()->nodename; int n = strnlen(nodename, 15); memset(vol->source_rfc1001_name, 0x20, 15); for (i = 0; i < n; i++) { /* does not have to be perfect mapping since field is informational, only used for servers that do not support port 445 and it can be overridden at mount time */ vol->source_rfc1001_name[i] = toupper(nodename[i]); } } vol->source_rfc1001_name[15] = 0; /* null target name indicates to use *SMBSERVR default called name if we end up sending RFC1001 session initialize */ vol->target_rfc1001_name[0] = 0; vol->linux_uid = current_uid(); /* use current_euid() instead? */ vol->linux_gid = current_gid(); /* default to only allowing write access to owner of the mount */ vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR; /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ /* default is always to request posix paths. */ vol->posix_paths = 1; /* default to using server inode numbers where available */ vol->server_ino = 1; if (!options) return 1; if (strncmp(options, "sep=", 4) == 0) { if (options[4] != 0) { separator[0] = options[4]; options += 5; } else { cFYI(1, "Null separator not allowed"); } } while ((data = strsep(&options, separator)) != NULL) { if (!*data) continue; if ((value = strchr(data, '=')) != NULL) *value++ = '\0'; /* Have to parse this before we parse for "user" */ if (strnicmp(data, "user_xattr", 10) == 0) { vol->no_xattr = 0; } else if (strnicmp(data, "nouser_xattr", 12) == 0) { vol->no_xattr = 1; } else if (strnicmp(data, "user", 4) == 0) { if (!value) { printk(KERN_WARNING "CIFS: invalid or missing username\n"); return 1; /* needs_arg; */ } else if (!*value) { /* null user, ie anonymous, authentication */ vol->nullauth = 1; } if (strnlen(value, 200) < 200) { vol->username = value; } else { printk(KERN_WARNING "CIFS: username too long\n"); return 1; } } else if (strnicmp(data, "pass", 4) == 0) { if (!value) { vol->password = NULL; continue; } else if (value[0] == 0) { /* check if string begins with double comma since that would mean the password really does start with a comma, and would not indicate an empty string */ if (value[1] != separator[0]) { vol->password = NULL; continue; } } temp_len = strlen(value); /* removed password length check, NTLM passwords can be arbitrarily long */ /* if comma in password, the string will be prematurely null terminated. Commas in password are specified across the cifs mount interface by a double comma ie ,, and a comma used as in other cases ie ',' as a parameter delimiter/separator is single and due to the strsep above is temporarily zeroed. */ /* NB: password legally can have multiple commas and the only illegal character in a password is null */ if ((value[temp_len] == 0) && (value[temp_len+1] == separator[0])) { /* reinsert comma */ value[temp_len] = separator[0]; temp_len += 2; /* move after second comma */ while (value[temp_len] != 0) { if (value[temp_len] == separator[0]) { if (value[temp_len+1] == separator[0]) { /* skip second comma */ temp_len++; } else { /* single comma indicating start of next parm */ break; } } temp_len++; } if (value[temp_len] == 0) { options = NULL; } else { value[temp_len] = 0; /* point option to start of next parm */ options = value + temp_len + 1; } /* go from value to value + temp_len condensing double commas to singles. Note that this ends up allocating a few bytes too many, which is ok */ vol->password = kzalloc(temp_len, GFP_KERNEL); if (vol->password == NULL) { printk(KERN_WARNING "CIFS: no memory " "for password\n"); return 1; } for (i = 0, j = 0; i < temp_len; i++, j++) { vol->password[j] = value[i]; if (value[i] == separator[0] && value[i+1] == separator[0]) { /* skip second comma */ i++; } } vol->password[j] = 0; } else { vol->password = kzalloc(temp_len+1, GFP_KERNEL); if (vol->password == NULL) { printk(KERN_WARNING "CIFS: no memory " "for password\n"); return 1; } strcpy(vol->password, value); } } else if (!strnicmp(data, "ip", 2) || !strnicmp(data, "addr", 4)) { if (!value || !*value) { vol->UNCip = NULL; } else if (strnlen(value, INET6_ADDRSTRLEN) < INET6_ADDRSTRLEN) { vol->UNCip = value; } else { printk(KERN_WARNING "CIFS: ip address " "too long\n"); return 1; } } else if (strnicmp(data, "sec", 3) == 0) { if (!value || !*value) { cERROR(1, "no security value specified"); continue; } else if (strnicmp(value, "krb5i", 5) == 0) { vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN; } else if (strnicmp(value, "krb5p", 5) == 0) { /* vol->secFlg |= CIFSSEC_MUST_SEAL | CIFSSEC_MAY_KRB5; */ cERROR(1, "Krb5 cifs privacy not supported"); return 1; } else if (strnicmp(value, "krb5", 4) == 0) { vol->secFlg |= CIFSSEC_MAY_KRB5; #ifdef CONFIG_CIFS_EXPERIMENTAL } else if (strnicmp(value, "ntlmsspi", 8) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMSSP | CIFSSEC_MUST_SIGN; } else if (strnicmp(value, "ntlmssp", 7) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMSSP; #endif } else if (strnicmp(value, "ntlmv2i", 7) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMV2 | CIFSSEC_MUST_SIGN; } else if (strnicmp(value, "ntlmv2", 6) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMV2; } else if (strnicmp(value, "ntlmi", 5) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN; } else if (strnicmp(value, "ntlm", 4) == 0) { /* ntlm is default so can be turned off too */ vol->secFlg |= CIFSSEC_MAY_NTLM; } else if (strnicmp(value, "nontlm", 6) == 0) { /* BB is there a better way to do this? */ vol->secFlg |= CIFSSEC_MAY_NTLMV2; #ifdef CONFIG_CIFS_WEAK_PW_HASH } else if (strnicmp(value, "lanman", 6) == 0) { vol->secFlg |= CIFSSEC_MAY_LANMAN; #endif } else if (strnicmp(value, "none", 4) == 0) { vol->nullauth = 1; } else { cERROR(1, "bad security option: %s", value); return 1; } } else if ((strnicmp(data, "unc", 3) == 0) || (strnicmp(data, "target", 6) == 0) || (strnicmp(data, "path", 4) == 0)) { if (!value || !*value) { printk(KERN_WARNING "CIFS: invalid path to " "network resource\n"); return 1; /* needs_arg; */ } if ((temp_len = strnlen(value, 300)) < 300) { vol->UNC = kmalloc(temp_len+1, GFP_KERNEL); if (vol->UNC == NULL) return 1; strcpy(vol->UNC, value); if (strncmp(vol->UNC, "//", 2) == 0) { vol->UNC[0] = '\\'; vol->UNC[1] = '\\'; } else if (strncmp(vol->UNC, "\\\\", 2) != 0) { printk(KERN_WARNING "CIFS: UNC Path does not begin " "with // or \\\\ \n"); return 1; } } else { printk(KERN_WARNING "CIFS: UNC name too long\n"); return 1; } } else if ((strnicmp(data, "domain", 3) == 0) || (strnicmp(data, "workgroup", 5) == 0)) { if (!value || !*value) { printk(KERN_WARNING "CIFS: invalid domain name\n"); return 1; /* needs_arg; */ } /* BB are there cases in which a comma can be valid in a domain name and need special handling? */ if (strnlen(value, 256) < 256) { vol->domainname = value; cFYI(1, "Domain name set"); } else { printk(KERN_WARNING "CIFS: domain name too " "long\n"); return 1; } } else if (strnicmp(data, "prefixpath", 10) == 0) { if (!value || !*value) { printk(KERN_WARNING "CIFS: invalid path prefix\n"); return 1; /* needs_argument */ } if ((temp_len = strnlen(value, 1024)) < 1024) { if (value[0] != '/') temp_len++; /* missing leading slash */ vol->prepath = kmalloc(temp_len+1, GFP_KERNEL); if (vol->prepath == NULL) return 1; if (value[0] != '/') { vol->prepath[0] = '/'; strcpy(vol->prepath+1, value); } else strcpy(vol->prepath, value); cFYI(1, "prefix path %s", vol->prepath); } else { printk(KERN_WARNING "CIFS: prefix too long\n"); return 1; } } else if (strnicmp(data, "iocharset", 9) == 0) { if (!value || !*value) { printk(KERN_WARNING "CIFS: invalid iocharset " "specified\n"); return 1; /* needs_arg; */ } if (strnlen(value, 65) < 65) { if (strnicmp(value, "default", 7)) vol->iocharset = value; /* if iocharset not set then load_nls_default is used by caller */ cFYI(1, "iocharset set to %s", value); } else { printk(KERN_WARNING "CIFS: iocharset name " "too long.\n"); return 1; } } else if (!strnicmp(data, "uid", 3) && value && *value) { vol->linux_uid = simple_strtoul(value, &value, 0); uid_specified = true; } else if (!strnicmp(data, "forceuid", 8)) { override_uid = 1; } else if (!strnicmp(data, "noforceuid", 10)) { override_uid = 0; } else if (!strnicmp(data, "gid", 3) && value && *value) { vol->linux_gid = simple_strtoul(value, &value, 0); gid_specified = true; } else if (!strnicmp(data, "forcegid", 8)) { override_gid = 1; } else if (!strnicmp(data, "noforcegid", 10)) { override_gid = 0; } else if (strnicmp(data, "file_mode", 4) == 0) { if (value && *value) { vol->file_mode = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "dir_mode", 4) == 0) { if (value && *value) { vol->dir_mode = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "dirmode", 4) == 0) { if (value && *value) { vol->dir_mode = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "port", 4) == 0) { if (value && *value) { vol->port = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "rsize", 5) == 0) { if (value && *value) { vol->rsize = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "wsize", 5) == 0) { if (value && *value) { vol->wsize = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "sockopt", 5) == 0) { if (!value || !*value) { cERROR(1, "no socket option specified"); continue; } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) { vol->sockopt_tcp_nodelay = 1; } } else if (strnicmp(data, "netbiosname", 4) == 0) { if (!value || !*value || (*value == ' ')) { cFYI(1, "invalid (empty) netbiosname"); } else { memset(vol->source_rfc1001_name, 0x20, 15); for (i = 0; i < 15; i++) { /* BB are there cases in which a comma can be valid in this workstation netbios name (and need special handling)? */ /* We do not uppercase netbiosname for user */ if (value[i] == 0) break; else vol->source_rfc1001_name[i] = value[i]; } /* The string has 16th byte zero still from set at top of the function */ if ((i == 15) && (value[i] != 0)) printk(KERN_WARNING "CIFS: netbiosname" " longer than 15 truncated.\n"); } } else if (strnicmp(data, "servern", 7) == 0) { /* servernetbiosname specified override *SMBSERVER */ if (!value || !*value || (*value == ' ')) { cFYI(1, "empty server netbiosname specified"); } else { /* last byte, type, is 0x20 for servr type */ memset(vol->target_rfc1001_name, 0x20, 16); for (i = 0; i < 15; i++) { /* BB are there cases in which a comma can be valid in this workstation netbios name (and need special handling)? */ /* user or mount helper must uppercase the netbiosname */ if (value[i] == 0) break; else vol->target_rfc1001_name[i] = value[i]; } /* The string has 16th byte zero still from set at top of the function */ if ((i == 15) && (value[i] != 0)) printk(KERN_WARNING "CIFS: server net" "biosname longer than 15 truncated.\n"); } } else if (strnicmp(data, "credentials", 4) == 0) { /* ignore */ } else if (strnicmp(data, "version", 3) == 0) { /* ignore */ } else if (strnicmp(data, "guest", 5) == 0) { /* ignore */ } else if (strnicmp(data, "rw", 2) == 0) { /* ignore */ } else if (strnicmp(data, "ro", 2) == 0) { /* ignore */ } else if (strnicmp(data, "noblocksend", 11) == 0) { vol->noblocksnd = 1; } else if (strnicmp(data, "noautotune", 10) == 0) { vol->noautotune = 1; } else if ((strnicmp(data, "suid", 4) == 0) || (strnicmp(data, "nosuid", 6) == 0) || (strnicmp(data, "exec", 4) == 0) || (strnicmp(data, "noexec", 6) == 0) || (strnicmp(data, "nodev", 5) == 0) || (strnicmp(data, "noauto", 6) == 0) || (strnicmp(data, "dev", 3) == 0)) { /* The mount tool or mount.cifs helper (if present) uses these opts to set flags, and the flags are read by the kernel vfs layer before we get here (ie before read super) so there is no point trying to parse these options again and set anything and it is ok to just ignore them */ continue; } else if (strnicmp(data, "hard", 4) == 0) { vol->retry = 1; } else if (strnicmp(data, "soft", 4) == 0) { vol->retry = 0; } else if (strnicmp(data, "perm", 4) == 0) { vol->noperm = 0; } else if (strnicmp(data, "noperm", 6) == 0) { vol->noperm = 1; } else if (strnicmp(data, "mapchars", 8) == 0) { vol->remap = 1; } else if (strnicmp(data, "nomapchars", 10) == 0) { vol->remap = 0; } else if (strnicmp(data, "sfu", 3) == 0) { vol->sfu_emul = 1; } else if (strnicmp(data, "nosfu", 5) == 0) { vol->sfu_emul = 0; } else if (strnicmp(data, "nodfs", 5) == 0) { vol->nodfs = 1; } else if (strnicmp(data, "posixpaths", 10) == 0) { vol->posix_paths = 1; } else if (strnicmp(data, "noposixpaths", 12) == 0) { vol->posix_paths = 0; } else if (strnicmp(data, "nounix", 6) == 0) { vol->no_linux_ext = 1; } else if (strnicmp(data, "nolinux", 7) == 0) { vol->no_linux_ext = 1; } else if ((strnicmp(data, "nocase", 6) == 0) || (strnicmp(data, "ignorecase", 10) == 0)) { vol->nocase = 1; } else if (strnicmp(data, "brl", 3) == 0) { vol->nobrl = 0; } else if ((strnicmp(data, "nobrl", 5) == 0) || (strnicmp(data, "nolock", 6) == 0)) { vol->nobrl = 1; /* turn off mandatory locking in mode if remote locking is turned off since the local vfs will do advisory */ if (vol->file_mode == (S_IALLUGO & ~(S_ISUID | S_IXGRP))) vol->file_mode = S_IALLUGO; } else if (strnicmp(data, "forcemandatorylock", 9) == 0) { /* will take the shorter form "forcemand" as well */ /* This mount option will force use of mandatory (DOS/Windows style) byte range locks, instead of using posix advisory byte range locks, even if the Unix extensions are available and posix locks would be supported otherwise. If Unix extensions are not negotiated this has no effect since mandatory locks would be used (mandatory locks is all that those those servers support) */ vol->mand_lock = 1; } else if (strnicmp(data, "setuids", 7) == 0) { vol->setuids = 1; } else if (strnicmp(data, "nosetuids", 9) == 0) { vol->setuids = 0; } else if (strnicmp(data, "dynperm", 7) == 0) { vol->dynperm = true; } else if (strnicmp(data, "nodynperm", 9) == 0) { vol->dynperm = false; } else if (strnicmp(data, "nohard", 6) == 0) { vol->retry = 0; } else if (strnicmp(data, "nosoft", 6) == 0) { vol->retry = 1; } else if (strnicmp(data, "nointr", 6) == 0) { vol->intr = 0; } else if (strnicmp(data, "intr", 4) == 0) { vol->intr = 1; } else if (strnicmp(data, "nostrictsync", 12) == 0) { vol->nostrictsync = 1; } else if (strnicmp(data, "strictsync", 10) == 0) { vol->nostrictsync = 0; } else if (strnicmp(data, "serverino", 7) == 0) { vol->server_ino = 1; } else if (strnicmp(data, "noserverino", 9) == 0) { vol->server_ino = 0; } else if (strnicmp(data, "cifsacl", 7) == 0) { vol->cifs_acl = 1; } else if (strnicmp(data, "nocifsacl", 9) == 0) { vol->cifs_acl = 0; } else if (strnicmp(data, "acl", 3) == 0) { vol->no_psx_acl = 0; } else if (strnicmp(data, "noacl", 5) == 0) { vol->no_psx_acl = 1; #ifdef CONFIG_CIFS_EXPERIMENTAL } else if (strnicmp(data, "locallease", 6) == 0) { vol->local_lease = 1; #endif } else if (strnicmp(data, "sign", 4) == 0) { vol->secFlg |= CIFSSEC_MUST_SIGN; } else if (strnicmp(data, "seal", 4) == 0) { /* we do not do the following in secFlags because seal is a per tree connection (mount) not a per socket or per-smb connection option in the protocol */ /* vol->secFlg |= CIFSSEC_MUST_SEAL; */ vol->seal = 1; } else if (strnicmp(data, "direct", 6) == 0) { vol->direct_io = 1; } else if (strnicmp(data, "forcedirectio", 13) == 0) { vol->direct_io = 1; } else if (strnicmp(data, "noac", 4) == 0) { printk(KERN_WARNING "CIFS: Mount option noac not " "supported. Instead set " "/proc/fs/cifs/LookupCacheEnabled to 0\n"); } else printk(KERN_WARNING "CIFS: Unknown mount option %s\n", data); } if (vol->UNC == NULL) { if (devname == NULL) { printk(KERN_WARNING "CIFS: Missing UNC name for mount " "target\n"); return 1; } if ((temp_len = strnlen(devname, 300)) < 300) { vol->UNC = kmalloc(temp_len+1, GFP_KERNEL); if (vol->UNC == NULL) return 1; strcpy(vol->UNC, devname); if (strncmp(vol->UNC, "//", 2) == 0) { vol->UNC[0] = '\\'; vol->UNC[1] = '\\'; } else if (strncmp(vol->UNC, "\\\\", 2) != 0) { printk(KERN_WARNING "CIFS: UNC Path does not " "begin with // or \\\\ \n"); return 1; } value = strpbrk(vol->UNC+2, "/\\"); if (value) *value = '\\'; } else { printk(KERN_WARNING "CIFS: UNC name too long\n"); return 1; } } if (vol->UNCip == NULL) vol->UNCip = &vol->UNC[2]; if (uid_specified) vol->override_uid = override_uid; else if (override_uid == 1) printk(KERN_NOTICE "CIFS: ignoring forceuid mount option " "specified with no uid= option.\n"); if (gid_specified) vol->override_gid = override_gid; else if (override_gid == 1) printk(KERN_NOTICE "CIFS: ignoring forcegid mount option " "specified with no gid= option.\n"); return 0; } static bool match_address(struct TCP_Server_Info *server, struct sockaddr *addr) { struct sockaddr_in *addr4 = (struct sockaddr_in *)addr; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr; switch (addr->sa_family) { case AF_INET: if (addr4->sin_addr.s_addr != server->addr.sockAddr.sin_addr.s_addr) return false; if (addr4->sin_port && addr4->sin_port != server->addr.sockAddr.sin_port) return false; break; case AF_INET6: if (!ipv6_addr_equal(&addr6->sin6_addr, &server->addr.sockAddr6.sin6_addr)) return false; if (addr6->sin6_scope_id != server->addr.sockAddr6.sin6_scope_id) return false; if (addr6->sin6_port && addr6->sin6_port != server->addr.sockAddr6.sin6_port) return false; break; } return true; } static bool match_security(struct TCP_Server_Info *server, struct smb_vol *vol) { unsigned int secFlags; if (vol->secFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) secFlags = vol->secFlg; else secFlags = global_secflags | vol->secFlg; switch (server->secType) { case LANMAN: if (!(secFlags & (CIFSSEC_MAY_LANMAN|CIFSSEC_MAY_PLNTXT))) return false; break; case NTLMv2: if (!(secFlags & CIFSSEC_MAY_NTLMV2)) return false; break; case NTLM: if (!(secFlags & CIFSSEC_MAY_NTLM)) return false; break; case Kerberos: if (!(secFlags & CIFSSEC_MAY_KRB5)) return false; break; case RawNTLMSSP: if (!(secFlags & CIFSSEC_MAY_NTLMSSP)) return false; break; default: /* shouldn't happen */ return false; } /* now check if signing mode is acceptible */ if ((secFlags & CIFSSEC_MAY_SIGN) == 0 && (server->secMode & SECMODE_SIGN_REQUIRED)) return false; else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) && (server->secMode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0) return false; return true; } static struct TCP_Server_Info * cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol) { struct TCP_Server_Info *server; write_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { /* * the demux thread can exit on its own while still in CifsNew * so don't accept any sockets in that state. Since the * tcpStatus never changes back to CifsNew it's safe to check * for this without a lock. */ if (server->tcpStatus == CifsNew) continue; if (!match_address(server, addr)) continue; if (!match_security(server, vol)) continue; ++server->srv_count; write_unlock(&cifs_tcp_ses_lock); cFYI(1, "Existing tcp session with server found"); return server; } write_unlock(&cifs_tcp_ses_lock); return NULL; } static void cifs_put_tcp_session(struct TCP_Server_Info *server) { struct task_struct *task; write_lock(&cifs_tcp_ses_lock); if (--server->srv_count > 0) { write_unlock(&cifs_tcp_ses_lock); return; } list_del_init(&server->tcp_ses_list); write_unlock(&cifs_tcp_ses_lock); spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); cifs_fscache_release_client_cookie(server); task = xchg(&server->tsk, NULL); if (task) force_sig(SIGKILL, task); } static struct TCP_Server_Info * cifs_get_tcp_session(struct smb_vol *volume_info) { struct TCP_Server_Info *tcp_ses = NULL; struct sockaddr_storage addr; struct sockaddr_in *sin_server = (struct sockaddr_in *) &addr; struct sockaddr_in6 *sin_server6 = (struct sockaddr_in6 *) &addr; int rc; memset(&addr, 0, sizeof(struct sockaddr_storage)); cFYI(1, "UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip); if (volume_info->UNCip && volume_info->UNC) { rc = cifs_fill_sockaddr((struct sockaddr *)&addr, volume_info->UNCip, volume_info->port); if (!rc) { /* we failed translating address */ rc = -EINVAL; goto out_err; } } else if (volume_info->UNCip) { /* BB using ip addr as tcp_ses name to connect to the DFS root below */ cERROR(1, "Connecting to DFS root not implemented yet"); rc = -EINVAL; goto out_err; } else /* which tcp_sess DFS root would we conect to */ { cERROR(1, "CIFS mount error: No UNC path (e.g. -o " "unc=//192.168.1.100/public) specified"); rc = -EINVAL; goto out_err; } /* see if we already have a matching tcp_ses */ tcp_ses = cifs_find_tcp_session((struct sockaddr *)&addr, volume_info); if (tcp_ses) return tcp_ses; tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL); if (!tcp_ses) { rc = -ENOMEM; goto out_err; } tcp_ses->hostname = extract_hostname(volume_info->UNC); if (IS_ERR(tcp_ses->hostname)) { rc = PTR_ERR(tcp_ses->hostname); goto out_err; } tcp_ses->noblocksnd = volume_info->noblocksnd; tcp_ses->noautotune = volume_info->noautotune; tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; atomic_set(&tcp_ses->inFlight, 0); init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); INIT_LIST_HEAD(&tcp_ses->pending_mid_q); mutex_init(&tcp_ses->srv_mutex); memcpy(tcp_ses->workstation_RFC1001_name, volume_info->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); memcpy(tcp_ses->server_RFC1001_name, volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); tcp_ses->sequence_number = 0; INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); /* * at this point we are the only ones with the pointer * to the struct since the kernel thread not created yet * no need to spinlock this init of tcpStatus or srv_count */ tcp_ses->tcpStatus = CifsNew; ++tcp_ses->srv_count; if (addr.ss_family == AF_INET6) { cFYI(1, "attempting ipv6 connect"); /* BB should we allow ipv6 on port 139? */ /* other OS never observed in Wild doing 139 with v6 */ memcpy(&tcp_ses->addr.sockAddr6, sin_server6, sizeof(struct sockaddr_in6)); rc = ipv6_connect(tcp_ses); } else { memcpy(&tcp_ses->addr.sockAddr, sin_server, sizeof(struct sockaddr_in)); rc = ipv4_connect(tcp_ses); } if (rc < 0) { cERROR(1, "Error connecting to socket. Aborting operation"); goto out_err; } /* * since we're in a cifs function already, we know that * this will succeed. No need for try_module_get(). */ __module_get(THIS_MODULE); tcp_ses->tsk = kthread_run((void *)(void *)cifs_demultiplex_thread, tcp_ses, "cifsd"); if (IS_ERR(tcp_ses->tsk)) { rc = PTR_ERR(tcp_ses->tsk); cERROR(1, "error %d create cifsd thread", rc); module_put(THIS_MODULE); goto out_err; } /* thread spawned, put it on the list */ write_lock(&cifs_tcp_ses_lock); list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); write_unlock(&cifs_tcp_ses_lock); cifs_fscache_get_client_cookie(tcp_ses); return tcp_ses; out_err: if (tcp_ses) { if (!IS_ERR(tcp_ses->hostname)) kfree(tcp_ses->hostname); if (tcp_ses->ssocket) sock_release(tcp_ses->ssocket); kfree(tcp_ses); } return ERR_PTR(rc); } static struct cifsSesInfo * cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) { struct cifsSesInfo *ses; write_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { switch (server->secType) { case Kerberos: if (vol->linux_uid != ses->linux_uid) continue; break; default: /* anything else takes username/password */ if (strncmp(ses->userName, vol->username, MAX_USERNAME_SIZE)) continue; if (strlen(vol->username) != 0 && strncmp(ses->password, vol->password, MAX_PASSWORD_SIZE)) continue; } ++ses->ses_count; write_unlock(&cifs_tcp_ses_lock); return ses; } write_unlock(&cifs_tcp_ses_lock); return NULL; } static void cifs_put_smb_ses(struct cifsSesInfo *ses) { int xid; struct TCP_Server_Info *server = ses->server; cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count); write_lock(&cifs_tcp_ses_lock); if (--ses->ses_count > 0) { write_unlock(&cifs_tcp_ses_lock); return; } list_del_init(&ses->smb_ses_list); write_unlock(&cifs_tcp_ses_lock); if (ses->status == CifsGood) { xid = GetXid(); CIFSSMBLogoff(xid, ses); _FreeXid(xid); } sesInfoFree(ses); cifs_put_tcp_session(server); } static struct cifsSesInfo * cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) { int rc = -ENOMEM, xid; struct cifsSesInfo *ses; xid = GetXid(); ses = cifs_find_smb_ses(server, volume_info); if (ses) { cFYI(1, "Existing smb sess found (status=%d)", ses->status); /* existing SMB ses has a server reference already */ cifs_put_tcp_session(server); mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (rc) { mutex_unlock(&ses->session_mutex); /* problem -- put our ses reference */ cifs_put_smb_ses(ses); FreeXid(xid); return ERR_PTR(rc); } if (ses->need_reconnect) { cFYI(1, "Session needs reconnect"); rc = cifs_setup_session(xid, ses, volume_info->local_nls); if (rc) { mutex_unlock(&ses->session_mutex); /* problem -- put our reference */ cifs_put_smb_ses(ses); FreeXid(xid); return ERR_PTR(rc); } } mutex_unlock(&ses->session_mutex); FreeXid(xid); return ses; } cFYI(1, "Existing smb sess not found"); ses = sesInfoAlloc(); if (ses == NULL) goto get_ses_fail; /* new SMB session uses our server ref */ ses->server = server; if (server->addr.sockAddr6.sin6_family == AF_INET6) sprintf(ses->serverName, "%pI6", &server->addr.sockAddr6.sin6_addr); else sprintf(ses->serverName, "%pI4", &server->addr.sockAddr.sin_addr.s_addr); if (volume_info->username) strncpy(ses->userName, volume_info->username, MAX_USERNAME_SIZE); /* volume_info->password freed at unmount */ if (volume_info->password) { ses->password = kstrdup(volume_info->password, GFP_KERNEL); if (!ses->password) goto get_ses_fail; } if (volume_info->domainname) { int len = strlen(volume_info->domainname); ses->domainName = kmalloc(len + 1, GFP_KERNEL); if (ses->domainName) strcpy(ses->domainName, volume_info->domainname); } ses->linux_uid = volume_info->linux_uid; ses->overrideSecFlg = volume_info->secFlg; mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (!rc) rc = cifs_setup_session(xid, ses, volume_info->local_nls); mutex_unlock(&ses->session_mutex); if (rc) goto get_ses_fail; /* success, put it on the list */ write_lock(&cifs_tcp_ses_lock); list_add(&ses->smb_ses_list, &server->smb_ses_list); write_unlock(&cifs_tcp_ses_lock); FreeXid(xid); return ses; get_ses_fail: sesInfoFree(ses); FreeXid(xid); return ERR_PTR(rc); } static struct cifsTconInfo * cifs_find_tcon(struct cifsSesInfo *ses, const char *unc) { struct list_head *tmp; struct cifsTconInfo *tcon; write_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &ses->tcon_list) { tcon = list_entry(tmp, struct cifsTconInfo, tcon_list); if (tcon->tidStatus == CifsExiting) continue; if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE)) continue; ++tcon->tc_count; write_unlock(&cifs_tcp_ses_lock); return tcon; } write_unlock(&cifs_tcp_ses_lock); return NULL; } static void cifs_put_tcon(struct cifsTconInfo *tcon) { int xid; struct cifsSesInfo *ses = tcon->ses; cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count); write_lock(&cifs_tcp_ses_lock); if (--tcon->tc_count > 0) { write_unlock(&cifs_tcp_ses_lock); return; } list_del_init(&tcon->tcon_list); write_unlock(&cifs_tcp_ses_lock); xid = GetXid(); CIFSSMBTDis(xid, tcon); _FreeXid(xid); tconInfoFree(tcon); cifs_put_smb_ses(ses); } static struct cifsTconInfo * cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info) { int rc, xid; struct cifsTconInfo *tcon; tcon = cifs_find_tcon(ses, volume_info->UNC); if (tcon) { cFYI(1, "Found match on UNC path"); /* existing tcon already has a reference */ cifs_put_smb_ses(ses); if (tcon->seal != volume_info->seal) cERROR(1, "transport encryption setting " "conflicts with existing tid"); return tcon; } tcon = tconInfoAlloc(); if (tcon == NULL) { rc = -ENOMEM; goto out_fail; } tcon->ses = ses; if (volume_info->password) { tcon->password = kstrdup(volume_info->password, GFP_KERNEL); if (!tcon->password) { rc = -ENOMEM; goto out_fail; } } if (strchr(volume_info->UNC + 3, '\\') == NULL && strchr(volume_info->UNC + 3, '/') == NULL) { cERROR(1, "Missing share name"); rc = -ENODEV; goto out_fail; } /* BB Do we need to wrap session_mutex around * this TCon call and Unix SetFS as * we do on SessSetup and reconnect? */ xid = GetXid(); rc = CIFSTCon(xid, ses, volume_info->UNC, tcon, volume_info->local_nls); FreeXid(xid); cFYI(1, "CIFS Tcon rc = %d", rc); if (rc) goto out_fail; if (volume_info->nodfs) { tcon->Flags &= ~SMB_SHARE_IS_IN_DFS; cFYI(1, "DFS disabled (%d)", tcon->Flags); } tcon->seal = volume_info->seal; /* we can have only one retry value for a connection to a share so for resources mounted more than once to the same server share the last value passed in for the retry flag is used */ tcon->retry = volume_info->retry; tcon->nocase = volume_info->nocase; tcon->local_lease = volume_info->local_lease; write_lock(&cifs_tcp_ses_lock); list_add(&tcon->tcon_list, &ses->tcon_list); write_unlock(&cifs_tcp_ses_lock); return tcon; out_fail: tconInfoFree(tcon); return ERR_PTR(rc); } int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path, const struct nls_table *nls_codepage, unsigned int *pnum_referrals, struct dfs_info3_param **preferrals, int remap) { char *temp_unc; int rc = 0; *pnum_referrals = 0; *preferrals = NULL; if (pSesInfo->ipc_tid == 0) { temp_unc = kmalloc(2 /* for slashes */ + strnlen(pSesInfo->serverName, SERVER_NAME_LEN_WITH_NULL * 2) + 1 + 4 /* slash IPC$ */ + 2, GFP_KERNEL); if (temp_unc == NULL) return -ENOMEM; temp_unc[0] = '\\'; temp_unc[1] = '\\'; strcpy(temp_unc + 2, pSesInfo->serverName); strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$"); rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage); cFYI(1, "CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid); kfree(temp_unc); } if (rc == 0) rc = CIFSGetDFSRefer(xid, pSesInfo, old_path, preferrals, pnum_referrals, nls_codepage, remap); /* BB map targetUNCs to dfs_info3 structures, here or in CIFSGetDFSRefer BB */ return rc; } #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key cifs_key[2]; static struct lock_class_key cifs_slock_key[2]; static inline void cifs_reclassify_socket4(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(sock_owned_by_user(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS", &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]); } static inline void cifs_reclassify_socket6(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(sock_owned_by_user(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS", &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]); } #else static inline void cifs_reclassify_socket4(struct socket *sock) { } static inline void cifs_reclassify_socket6(struct socket *sock) { } #endif /* See RFC1001 section 14 on representation of Netbios names */ static void rfc1002mangle(char *target, char *source, unsigned int length) { unsigned int i, j; for (i = 0, j = 0; i < (length); i++) { /* mask a nibble at a time and encode */ target[j] = 'A' + (0x0F & (source[i] >> 4)); target[j+1] = 'A' + (0x0F & source[i]); j += 2; } } static int ipv4_connect(struct TCP_Server_Info *server) { int rc = 0; int val; bool connected = false; __be16 orig_port = 0; struct socket *socket = server->ssocket; if (socket == NULL) { rc = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &socket); if (rc < 0) { cERROR(1, "Error %d creating socket", rc); return rc; } /* BB other socket options to set KEEPALIVE, NODELAY? */ cFYI(1, "Socket created"); server->ssocket = socket; socket->sk->sk_allocation = GFP_NOFS; cifs_reclassify_socket4(socket); } /* user overrode default port */ if (server->addr.sockAddr.sin_port) { rc = socket->ops->connect(socket, (struct sockaddr *) &server->addr.sockAddr, sizeof(struct sockaddr_in), 0); if (rc >= 0) connected = true; } if (!connected) { /* save original port so we can retry user specified port later if fall back ports fail this time */ orig_port = server->addr.sockAddr.sin_port; /* do not retry on the same port we just failed on */ if (server->addr.sockAddr.sin_port != htons(CIFS_PORT)) { server->addr.sockAddr.sin_port = htons(CIFS_PORT); rc = socket->ops->connect(socket, (struct sockaddr *) &server->addr.sockAddr, sizeof(struct sockaddr_in), 0); if (rc >= 0) connected = true; } } if (!connected) { server->addr.sockAddr.sin_port = htons(RFC1001_PORT); rc = socket->ops->connect(socket, (struct sockaddr *) &server->addr.sockAddr, sizeof(struct sockaddr_in), 0); if (rc >= 0) connected = true; } /* give up here - unless we want to retry on different protocol families some day */ if (!connected) { if (orig_port) server->addr.sockAddr.sin_port = orig_port; cFYI(1, "Error %d connecting to server via ipv4", rc); sock_release(socket); server->ssocket = NULL; return rc; } /* * Eventually check for other socket options to change from * the default. sock_setsockopt not used because it expects * user space buffer */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; /* make the bufsizes depend on wsize/rsize and max requests */ if (server->noautotune) { if (socket->sk->sk_sndbuf < (200 * 1024)) socket->sk->sk_sndbuf = 200 * 1024; if (socket->sk->sk_rcvbuf < (140 * 1024)) socket->sk->sk_rcvbuf = 140 * 1024; } if (server->tcp_nodelay) { val = 1; rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, (char *)&val, sizeof(val)); if (rc) cFYI(1, "set TCP_NODELAY socket option error %d", rc); } cFYI(1, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx", socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); /* send RFC1001 sessinit */ if (server->addr.sockAddr.sin_port == htons(RFC1001_PORT)) { /* some servers require RFC1001 sessinit before sending negprot - BB check reconnection in case where second sessinit is sent but no second negprot */ struct rfc1002_session_packet *ses_init_buf; struct smb_hdr *smb_buf; ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet), GFP_KERNEL); if (ses_init_buf) { ses_init_buf->trailer.session_req.called_len = 32; if (server->server_RFC1001_name && server->server_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.called_name, server->server_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); else rfc1002mangle(ses_init_buf->trailer. session_req.called_name, DEFAULT_CIFS_CALLED_NAME, RFC1001_NAME_LEN_WITH_NULL); ses_init_buf->trailer.session_req.calling_len = 32; /* calling name ends in null (byte 16) from old smb convention. */ if (server->workstation_RFC1001_name && server->workstation_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, server->workstation_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); else rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, "LINUX_CIFS_CLNT", RFC1001_NAME_LEN_WITH_NULL); ses_init_buf->trailer.session_req.scope1 = 0; ses_init_buf->trailer.session_req.scope2 = 0; smb_buf = (struct smb_hdr *)ses_init_buf; /* sizeof RFC1002_SESSION_REQUEST with no scope */ smb_buf->smb_buf_length = 0x81000044; rc = smb_send(server, smb_buf, 0x44); kfree(ses_init_buf); msleep(1); /* RFC1001 layer in at least one server requires very short break before negprot presumably because not expecting negprot to follow so fast. This is a simple solution that works without complicating the code and causes no significant slowing down on mount for everyone else */ } /* else the negprot may still work without this even though malloc failed */ } return rc; } static int ipv6_connect(struct TCP_Server_Info *server) { int rc = 0; int val; bool connected = false; __be16 orig_port = 0; struct socket *socket = server->ssocket; if (socket == NULL) { rc = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &socket); if (rc < 0) { cERROR(1, "Error %d creating ipv6 socket", rc); socket = NULL; return rc; } /* BB other socket options to set KEEPALIVE, NODELAY? */ cFYI(1, "ipv6 Socket created"); server->ssocket = socket; socket->sk->sk_allocation = GFP_NOFS; cifs_reclassify_socket6(socket); } /* user overrode default port */ if (server->addr.sockAddr6.sin6_port) { rc = socket->ops->connect(socket, (struct sockaddr *) &server->addr.sockAddr6, sizeof(struct sockaddr_in6), 0); if (rc >= 0) connected = true; } if (!connected) { /* save original port so we can retry user specified port later if fall back ports fail this time */ orig_port = server->addr.sockAddr6.sin6_port; /* do not retry on the same port we just failed on */ if (server->addr.sockAddr6.sin6_port != htons(CIFS_PORT)) { server->addr.sockAddr6.sin6_port = htons(CIFS_PORT); rc = socket->ops->connect(socket, (struct sockaddr *) &server->addr.sockAddr6, sizeof(struct sockaddr_in6), 0); if (rc >= 0) connected = true; } } if (!connected) { server->addr.sockAddr6.sin6_port = htons(RFC1001_PORT); rc = socket->ops->connect(socket, (struct sockaddr *) &server->addr.sockAddr6, sizeof(struct sockaddr_in6), 0); if (rc >= 0) connected = true; } /* give up here - unless we want to retry on different protocol families some day */ if (!connected) { if (orig_port) server->addr.sockAddr6.sin6_port = orig_port; cFYI(1, "Error %d connecting to server via ipv6", rc); sock_release(socket); server->ssocket = NULL; return rc; } /* * Eventually check for other socket options to change from * the default. sock_setsockopt not used because it expects * user space buffer */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; if (server->tcp_nodelay) { val = 1; rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, (char *)&val, sizeof(val)); if (rc) cFYI(1, "set TCP_NODELAY socket option error %d", rc); } server->ssocket = socket; return rc; } void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, struct super_block *sb, struct smb_vol *vol_info) { /* if we are reconnecting then should we check to see if * any requested capabilities changed locally e.g. via * remount but we can not do much about it here * if they have (even if we could detect it by the following) * Perhaps we could add a backpointer to array of sb from tcon * or if we change to make all sb to same share the same * sb as NFS - then we only have one backpointer to sb. * What if we wanted to mount the server share twice once with * and once without posixacls or posix paths? */ __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); if (vol_info && vol_info->no_linux_ext) { tcon->fsUnixInfo.Capability = 0; tcon->unix_ext = 0; /* Unix Extensions disabled */ cFYI(1, "Linux protocol extensions disabled"); return; } else if (vol_info) tcon->unix_ext = 1; /* Unix Extensions supported */ if (tcon->unix_ext == 0) { cFYI(1, "Unix extensions disabled so not set on reconnect"); return; } if (!CIFSSMBQFSUnixInfo(xid, tcon)) { __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); /* check for reconnect case in which we do not want to change the mount behavior if we can avoid it */ if (vol_info == NULL) { /* turn off POSIX ACL and PATHNAMES if not set originally at mount time */ if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cERROR(1, "POSIXPATH support change"); cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { cERROR(1, "possible reconnect error"); cERROR(1, "server disabled POSIX path support"); } } cap &= CIFS_UNIX_CAP_MASK; if (vol_info && vol_info->no_psx_acl) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { cFYI(1, "negotiated posix acl support"); if (sb) sb->s_flags |= MS_POSIXACL; } if (vol_info && vol_info->posix_paths == 0) cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { cFYI(1, "negotiate posix pathnames"); if (sb) CIFS_SB(sb)->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; } /* We might be setting the path sep back to a different form if we are reconnecting and the server switched its posix path capability for this share */ if (sb && (CIFS_SB(sb)->prepathlen > 0)) CIFS_SB(sb)->prepath[0] = CIFS_DIR_SEP(CIFS_SB(sb)); if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { CIFS_SB(sb)->rsize = 127 * 1024; cFYI(DBG2, "larger reads not supported by srv"); } } cFYI(1, "Negotiate caps 0x%x", (int)cap); #ifdef CONFIG_CIFS_DEBUG2 if (cap & CIFS_UNIX_FCNTL_CAP) cFYI(1, "FCNTL cap"); if (cap & CIFS_UNIX_EXTATTR_CAP) cFYI(1, "EXTATTR cap"); if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cFYI(1, "POSIX path cap"); if (cap & CIFS_UNIX_XATTR_CAP) cFYI(1, "XATTR cap"); if (cap & CIFS_UNIX_POSIX_ACL_CAP) cFYI(1, "POSIX ACL cap"); if (cap & CIFS_UNIX_LARGE_READ_CAP) cFYI(1, "very large read cap"); if (cap & CIFS_UNIX_LARGE_WRITE_CAP) cFYI(1, "very large write cap"); #endif /* CIFS_DEBUG2 */ if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { if (vol_info == NULL) { cFYI(1, "resetting capabilities failed"); } else cERROR(1, "Negotiating Unix capabilities " "with the server failed. Consider " "mounting with the Unix Extensions\n" "disabled, if problems are found, " "by specifying the nounix mount " "option."); } } } static void convert_delimiter(char *path, char delim) { int i; char old_delim; if (path == NULL) return; if (delim == '/') old_delim = '\\'; else old_delim = '/'; for (i = 0; path[i] != '\0'; i++) { if (path[i] == old_delim) path[i] = delim; } } static void setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb) { if (pvolume_info->rsize > CIFSMaxBufSize) { cERROR(1, "rsize %d too large, using MaxBufSize", pvolume_info->rsize); cifs_sb->rsize = CIFSMaxBufSize; } else if ((pvolume_info->rsize) && (pvolume_info->rsize <= CIFSMaxBufSize)) cifs_sb->rsize = pvolume_info->rsize; else /* default */ cifs_sb->rsize = CIFSMaxBufSize; if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) { cERROR(1, "wsize %d too large, using 4096 instead", pvolume_info->wsize); cifs_sb->wsize = 4096; } else if (pvolume_info->wsize) cifs_sb->wsize = pvolume_info->wsize; else cifs_sb->wsize = min_t(const int, PAGEVEC_SIZE * PAGE_CACHE_SIZE, 127*1024); /* old default of CIFSMaxBufSize was too small now that SMB Write2 can send multiple pages in kvec. RFC1001 does not describe what happens when frame bigger than 128K is sent so use that as max in conjunction with 52K kvec constraint on arch with 4K page size */ if (cifs_sb->rsize < 2048) { cifs_sb->rsize = 2048; /* Windows ME may prefer this */ cFYI(1, "readsize set to minimum: 2048"); } /* calculate prepath */ cifs_sb->prepath = pvolume_info->prepath; if (cifs_sb->prepath) { cifs_sb->prepathlen = strlen(cifs_sb->prepath); /* we can not convert the / to \ in the path separators in the prefixpath yet because we do not know (until reset_cifs_unix_caps is called later) whether POSIX PATH CAP is available. We normalize the / to \ after reset_cifs_unix_caps is called */ pvolume_info->prepath = NULL; } else cifs_sb->prepathlen = 0; cifs_sb->mnt_uid = pvolume_info->linux_uid; cifs_sb->mnt_gid = pvolume_info->linux_gid; cifs_sb->mnt_file_mode = pvolume_info->file_mode; cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; cFYI(1, "file mode: 0x%x dir mode: 0x%x", cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); if (pvolume_info->noperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; if (pvolume_info->setuids) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID; if (pvolume_info->server_ino) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM; if (pvolume_info->remap) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR; if (pvolume_info->no_xattr) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR; if (pvolume_info->sfu_emul) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; if (pvolume_info->nobrl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; if (pvolume_info->nostrictsync) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC; if (pvolume_info->mand_lock) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; if (pvolume_info->cifs_acl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; if (pvolume_info->override_uid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; if (pvolume_info->override_gid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; if (pvolume_info->dynperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; if (pvolume_info->direct_io) { cFYI(1, "mounting share using direct i/o"); cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; } if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) cERROR(1, "mount option dynperm ignored if cifsacl " "mount option supported"); } static int is_path_accessible(int xid, struct cifsTconInfo *tcon, struct cifs_sb_info *cifs_sb, const char *full_path) { int rc; FILE_ALL_INFO *pfile_info; pfile_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (pfile_info == NULL) return -ENOMEM; rc = CIFSSMBQPathInfo(xid, tcon, full_path, pfile_info, 0 /* not legacy */, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); kfree(pfile_info); return rc; } static void cleanup_volume_info(struct smb_vol **pvolume_info) { struct smb_vol *volume_info; if (!pvolume_info || !*pvolume_info) return; volume_info = *pvolume_info; kzfree(volume_info->password); kfree(volume_info->UNC); kfree(volume_info->prepath); kfree(volume_info); *pvolume_info = NULL; return; } #ifdef CONFIG_CIFS_DFS_UPCALL /* build_path_to_root returns full path to root when * we do not have an exiting connection (tcon) */ static char * build_unc_path_to_root(const struct smb_vol *volume_info, const struct cifs_sb_info *cifs_sb) { char *full_path; int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1); full_path = kmalloc(unc_len + cifs_sb->prepathlen + 1, GFP_KERNEL); if (full_path == NULL) return ERR_PTR(-ENOMEM); strncpy(full_path, volume_info->UNC, unc_len); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { int i; for (i = 0; i < unc_len; i++) { if (full_path[i] == '\\') full_path[i] = '/'; } } if (cifs_sb->prepathlen) strncpy(full_path + unc_len, cifs_sb->prepath, cifs_sb->prepathlen); full_path[unc_len + cifs_sb->prepathlen] = 0; /* add trailing null */ return full_path; } #endif int cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, char *mount_data_global, const char *devname) { int rc; int xid; struct smb_vol *volume_info; struct cifsSesInfo *pSesInfo; struct cifsTconInfo *tcon; struct TCP_Server_Info *srvTcp; char *full_path; char *mount_data = mount_data_global; #ifdef CONFIG_CIFS_DFS_UPCALL struct dfs_info3_param *referrals = NULL; unsigned int num_referrals = 0; int referral_walks_count = 0; try_mount_again: #endif rc = 0; tcon = NULL; pSesInfo = NULL; srvTcp = NULL; full_path = NULL; xid = GetXid(); volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL); if (!volume_info) { rc = -ENOMEM; goto out; } if (cifs_parse_mount_options(mount_data, devname, volume_info)) { rc = -EINVAL; goto out; } if (volume_info->nullauth) { cFYI(1, "null user"); volume_info->username = ""; } else if (volume_info->username) { /* BB fixme parse for domain name here */ cFYI(1, "Username: %s", volume_info->username); } else { cifserror("No username specified"); /* In userspace mount helper we can get user name from alternate locations such as env variables and files on disk */ rc = -EINVAL; goto out; } /* this is needed for ASCII cp to Unicode converts */ if (volume_info->iocharset == NULL) { /* load_nls_default cannot return null */ volume_info->local_nls = load_nls_default(); } else { volume_info->local_nls = load_nls(volume_info->iocharset); if (volume_info->local_nls == NULL) { cERROR(1, "CIFS mount error: iocharset %s not found", volume_info->iocharset); rc = -ELIBACC; goto out; } } cifs_sb->local_nls = volume_info->local_nls; /* get a reference to a tcp session */ srvTcp = cifs_get_tcp_session(volume_info); if (IS_ERR(srvTcp)) { rc = PTR_ERR(srvTcp); goto out; } /* get a reference to a SMB session */ pSesInfo = cifs_get_smb_ses(srvTcp, volume_info); if (IS_ERR(pSesInfo)) { rc = PTR_ERR(pSesInfo); pSesInfo = NULL; goto mount_fail_check; } setup_cifs_sb(volume_info, cifs_sb); if (pSesInfo->capabilities & CAP_LARGE_FILES) sb->s_maxbytes = MAX_LFS_FILESIZE; else sb->s_maxbytes = MAX_NON_LFS; /* BB FIXME fix time_gran to be larger for LANMAN sessions */ sb->s_time_gran = 100; /* search for existing tcon to this server share */ tcon = cifs_get_tcon(pSesInfo, volume_info); if (IS_ERR(tcon)) { rc = PTR_ERR(tcon); tcon = NULL; goto remote_path_check; } cifs_sb->tcon = tcon; /* do not care if following two calls succeed - informational */ if (!tcon->ipc) { CIFSSMBQFSDeviceInfo(xid, tcon); CIFSSMBQFSAttributeInfo(xid, tcon); } /* tell server which Unix caps we support */ if (tcon->ses->capabilities & CAP_UNIX) /* reset of caps checks mount to see if unix extensions disabled for just this mount */ reset_cifs_unix_caps(xid, tcon, sb, volume_info); else tcon->unix_ext = 0; /* server does not support them */ /* convert forward to back slashes in prepath here if needed */ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { cifs_sb->rsize = 1024 * 127; cFYI(DBG2, "no very large read support, rsize now 127K"); } if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X)) cifs_sb->wsize = min(cifs_sb->wsize, (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); if (!(tcon->ses->capabilities & CAP_LARGE_READ_X)) cifs_sb->rsize = min(cifs_sb->rsize, (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); remote_path_check: /* check if a whole path (including prepath) is not remote */ if (!rc && cifs_sb->prepathlen && tcon) { /* build_path_to_root works only when we have a valid tcon */ full_path = cifs_build_path_to_root(cifs_sb); if (full_path == NULL) { rc = -ENOMEM; goto mount_fail_check; } rc = is_path_accessible(xid, tcon, cifs_sb, full_path); if (rc != -EREMOTE) { kfree(full_path); goto mount_fail_check; } kfree(full_path); } /* get referral if needed */ if (rc == -EREMOTE) { #ifdef CONFIG_CIFS_DFS_UPCALL if (referral_walks_count > MAX_NESTED_LINKS) { /* * BB: when we implement proper loop detection, * we will remove this check. But now we need it * to prevent an indefinite loop if 'DFS tree' is * misconfigured (i.e. has loops). */ rc = -ELOOP; goto mount_fail_check; } /* convert forward to back slashes in prepath here if needed */ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); full_path = build_unc_path_to_root(volume_info, cifs_sb); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto mount_fail_check; } cFYI(1, "Getting referral for: %s", full_path); rc = get_dfs_path(xid, pSesInfo , full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (!rc && num_referrals > 0) { char *fake_devname = NULL; if (mount_data != mount_data_global) kfree(mount_data); mount_data = cifs_compose_mount_options( cifs_sb->mountdata, full_path + 1, referrals, &fake_devname); free_dfs_info_array(referrals, num_referrals); kfree(fake_devname); kfree(full_path); if (IS_ERR(mount_data)) { rc = PTR_ERR(mount_data); mount_data = NULL; goto mount_fail_check; } if (tcon) cifs_put_tcon(tcon); else if (pSesInfo) cifs_put_smb_ses(pSesInfo); cleanup_volume_info(&volume_info); referral_walks_count++; FreeXid(xid); goto try_mount_again; } #else /* No DFS support, return error on mount */ rc = -EOPNOTSUPP; #endif } mount_fail_check: /* on error free sesinfo and tcon struct if needed */ if (rc) { if (mount_data != mount_data_global) kfree(mount_data); /* If find_unc succeeded then rc == 0 so we can not end */ /* up accidently freeing someone elses tcon struct */ if (tcon) cifs_put_tcon(tcon); else if (pSesInfo) cifs_put_smb_ses(pSesInfo); else cifs_put_tcp_session(srvTcp); goto out; } /* volume_info->password is freed above when existing session found (in which case it is not needed anymore) but when new sesion is created the password ptr is put in the new session structure (in which case the password will be freed at unmount time) */ out: /* zero out password before freeing */ cleanup_volume_info(&volume_info); FreeXid(xid); return rc; } int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, const char *tree, struct cifsTconInfo *tcon, const struct nls_table *nls_codepage) { struct smb_hdr *smb_buffer; struct smb_hdr *smb_buffer_response; TCONX_REQ *pSMB; TCONX_RSP *pSMBr; unsigned char *bcc_ptr; int rc = 0; int length, bytes_left; __u16 count; if (ses == NULL) return -EIO; smb_buffer = cifs_buf_get(); if (smb_buffer == NULL) return -ENOMEM; smb_buffer_response = smb_buffer; header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, NULL /*no tid */ , 4 /*wct */ ); smb_buffer->Mid = GetNextMid(ses->server); smb_buffer->Uid = ses->Suid; pSMB = (TCONX_REQ *) smb_buffer; pSMBr = (TCONX_RSP *) smb_buffer_response; pSMB->AndXCommand = 0xFF; pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); bcc_ptr = &pSMB->Password[0]; if ((ses->server->secMode) & SECMODE_USER) { pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ *bcc_ptr = 0; /* password is null byte */ bcc_ptr++; /* skip password */ /* already aligned so no need to do it below */ } else { pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); /* BB FIXME add code to fail this if NTLMv2 or Kerberos specified as required (when that support is added to the vfs in the future) as only NTLM or the much weaker LANMAN (which we do not send by default) is accepted by Samba (not sure whether other servers allow NTLMv2 password here) */ #ifdef CONFIG_CIFS_WEAK_PW_HASH if ((global_secflags & CIFSSEC_MAY_LANMAN) && (ses->server->secType == LANMAN)) calc_lanman_hash(tcon->password, ses->server->cryptKey, ses->server->secMode & SECMODE_PW_ENCRYPT ? true : false, bcc_ptr); else #endif /* CIFS_WEAK_PW_HASH */ SMBNTencrypt(tcon->password, ses->server->cryptKey, bcc_ptr); bcc_ptr += CIFS_SESS_KEY_SIZE; if (ses->capabilities & CAP_UNICODE) { /* must align unicode strings */ *bcc_ptr = 0; /* null byte password */ bcc_ptr++; } } if (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; if (ses->capabilities & CAP_STATUS32) { smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS; } if (ses->capabilities & CAP_DFS) { smb_buffer->Flags2 |= SMBFLG2_DFS; } if (ses->capabilities & CAP_UNICODE) { smb_buffer->Flags2 |= SMBFLG2_UNICODE; length = cifs_strtoUCS((__le16 *) bcc_ptr, tree, 6 /* max utf8 char length in bytes */ * (/* server len*/ + 256 /* share len */), nls_codepage); bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ bcc_ptr += 2; /* skip trailing null */ } else { /* ASCII */ strcpy(bcc_ptr, tree); bcc_ptr += strlen(tree) + 1; } strcpy(bcc_ptr, "?????"); bcc_ptr += strlen("?????"); bcc_ptr += 1; count = bcc_ptr - &pSMB->Password[0]; pSMB->hdr.smb_buf_length += count; pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, CIFS_STD_OP); /* above now done in SendReceive */ if ((rc == 0) && (tcon != NULL)) { bool is_unicode; tcon->tidStatus = CifsGood; tcon->need_reconnect = false; tcon->tid = smb_buffer_response->Tid; bcc_ptr = pByteArea(smb_buffer_response); bytes_left = BCC(smb_buffer_response); length = strnlen(bcc_ptr, bytes_left - 2); if (smb_buffer->Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* skip service field (NB: this field is always ASCII) */ if (length == 3) { if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && (bcc_ptr[2] == 'C')) { cFYI(1, "IPC connection"); tcon->ipc = 1; } } else if (length == 2) { if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { /* the most common case */ cFYI(1, "disk share connection"); } } bcc_ptr += length + 1; bytes_left -= (length + 1); strncpy(tcon->treeName, tree, MAX_TREE_SIZE); /* mostly informational -- no need to fail on error here */ kfree(tcon->nativeFileSystem); tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr, bytes_left, is_unicode, nls_codepage); cFYI(1, "nativeFileSystem=%s", tcon->nativeFileSystem); if ((smb_buffer_response->WordCount == 3) || (smb_buffer_response->WordCount == 7)) /* field is in same location */ tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); else tcon->Flags = 0; cFYI(1, "Tcon flags: 0x%x ", tcon->Flags); } else if ((rc == 0) && tcon == NULL) { /* all we need to save for IPC$ connection */ ses->ipc_tid = smb_buffer_response->Tid; } cifs_buf_release(smb_buffer); return rc; } int cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) { int rc = 0; char *tmp; if (cifs_sb->tcon) cifs_put_tcon(cifs_sb->tcon); cifs_sb->tcon = NULL; tmp = cifs_sb->prepath; cifs_sb->prepathlen = 0; cifs_sb->prepath = NULL; kfree(tmp); return rc; } int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses) { int rc = 0; struct TCP_Server_Info *server = ses->server; /* only send once per connect */ if (server->maxBuf != 0) return 0; rc = CIFSSMBNegotiate(xid, ses); if (rc == -EAGAIN) { /* retry only once on 1st time connection */ rc = CIFSSMBNegotiate(xid, ses); if (rc == -EAGAIN) rc = -EHOSTDOWN; } if (rc == 0) { spin_lock(&GlobalMid_Lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsGood; else rc = -EHOSTDOWN; spin_unlock(&GlobalMid_Lock); } return rc; } int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses, struct nls_table *nls_info) { int rc = 0; struct TCP_Server_Info *server = ses->server; ses->flags = 0; ses->capabilities = server->capabilities; if (linuxExtEnabled == 0) ses->capabilities &= (~CAP_UNIX); cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d", server->secMode, server->capabilities, server->timeAdj); rc = CIFS_SessSetup(xid, ses, nls_info); if (rc) { cERROR(1, "Send error in SessSetup = %d", rc); } else { cFYI(1, "CIFS Session Established successfully"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); } return rc; }
./CrossVul/dataset_final_sorted/CWE-264/c/good_3454_1
crossvul-cpp_data_good_5616_0
/* * linux/fs/namespace.c * * (C) Copyright Al Viro 2000, 2001 * Released under GPL v2. * * Based on code from fs/super.c, copyright Linus Torvalds and others. * Heavily rewritten. */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/capability.h> #include <linux/mnt_namespace.h> #include <linux/user_namespace.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/idr.h> #include <linux/acct.h> /* acct_auto_close_mnt */ #include <linux/ramfs.h> /* init_rootfs */ #include <linux/fs_struct.h> /* get_fs_root et.al. */ #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ #include <linux/uaccess.h> #include <linux/proc_fs.h> #include "pnode.h" #include "internal.h" #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) #define HASH_SIZE (1UL << HASH_SHIFT) static int event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); static DEFINE_SPINLOCK(mnt_id_lock); static int mnt_id_start = 0; static int mnt_group_start = 1; static struct list_head *mount_hashtable __read_mostly; static struct kmem_cache *mnt_cache __read_mostly; static struct rw_semaphore namespace_sem; /* /sys/fs */ struct kobject *fs_kobj; EXPORT_SYMBOL_GPL(fs_kobj); /* * vfsmount lock may be taken for read to prevent changes to the * vfsmount hash, ie. during mountpoint lookups or walking back * up the tree. * * It should be taken for write in all cases where the vfsmount * tree or hash is modified or when a vfsmount structure is modified. */ DEFINE_BRLOCK(vfsmount_lock); static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) { unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); tmp += ((unsigned long)dentry / L1_CACHE_BYTES); tmp = tmp + (tmp >> HASH_SHIFT); return tmp & (HASH_SIZE - 1); } #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) /* * allocation is serialized by namespace_sem, but we need the spinlock to * serialize with freeing. */ static int mnt_alloc_id(struct mount *mnt) { int res; retry: ida_pre_get(&mnt_id_ida, GFP_KERNEL); spin_lock(&mnt_id_lock); res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); if (!res) mnt_id_start = mnt->mnt_id + 1; spin_unlock(&mnt_id_lock); if (res == -EAGAIN) goto retry; return res; } static void mnt_free_id(struct mount *mnt) { int id = mnt->mnt_id; spin_lock(&mnt_id_lock); ida_remove(&mnt_id_ida, id); if (mnt_id_start > id) mnt_id_start = id; spin_unlock(&mnt_id_lock); } /* * Allocate a new peer group ID * * mnt_group_ida is protected by namespace_sem */ static int mnt_alloc_group_id(struct mount *mnt) { int res; if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) return -ENOMEM; res = ida_get_new_above(&mnt_group_ida, mnt_group_start, &mnt->mnt_group_id); if (!res) mnt_group_start = mnt->mnt_group_id + 1; return res; } /* * Release a peer group ID */ void mnt_release_group_id(struct mount *mnt) { int id = mnt->mnt_group_id; ida_remove(&mnt_group_ida, id); if (mnt_group_start > id) mnt_group_start = id; mnt->mnt_group_id = 0; } /* * vfsmount lock must be held for read */ static inline void mnt_add_count(struct mount *mnt, int n) { #ifdef CONFIG_SMP this_cpu_add(mnt->mnt_pcp->mnt_count, n); #else preempt_disable(); mnt->mnt_count += n; preempt_enable(); #endif } /* * vfsmount lock must be held for write */ unsigned int mnt_get_count(struct mount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; } return count; #else return mnt->mnt_count; #endif } static struct mount *alloc_vfsmnt(const char *name) { struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); if (mnt) { int err; err = mnt_alloc_id(mnt); if (err) goto out_free_cache; if (name) { mnt->mnt_devname = kstrdup(name, GFP_KERNEL); if (!mnt->mnt_devname) goto out_free_id; } #ifdef CONFIG_SMP mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); if (!mnt->mnt_pcp) goto out_free_devname; this_cpu_add(mnt->mnt_pcp->mnt_count, 1); #else mnt->mnt_count = 1; mnt->mnt_writers = 0; #endif INIT_LIST_HEAD(&mnt->mnt_hash); INIT_LIST_HEAD(&mnt->mnt_child); INIT_LIST_HEAD(&mnt->mnt_mounts); INIT_LIST_HEAD(&mnt->mnt_list); INIT_LIST_HEAD(&mnt->mnt_expire); INIT_LIST_HEAD(&mnt->mnt_share); INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); #endif } return mnt; #ifdef CONFIG_SMP out_free_devname: kfree(mnt->mnt_devname); #endif out_free_id: mnt_free_id(mnt); out_free_cache: kmem_cache_free(mnt_cache, mnt); return NULL; } /* * Most r/o checks on a fs are for operations that take * discrete amounts of time, like a write() or unlink(). * We must keep track of when those operations start * (for permission checks) and when they end, so that * we can determine when writes are able to occur to * a filesystem. */ /* * __mnt_is_readonly: check whether a mount is read-only * @mnt: the mount to check for its write status * * This shouldn't be used directly ouside of the VFS. * It does not guarantee that the filesystem will stay * r/w, just that it is right *now*. This can not and * should not be used in place of IS_RDONLY(inode). * mnt_want/drop_write() will _keep_ the filesystem * r/w. */ int __mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_flags & MNT_READONLY) return 1; if (mnt->mnt_sb->s_flags & MS_RDONLY) return 1; return 0; } EXPORT_SYMBOL_GPL(__mnt_is_readonly); static inline void mnt_inc_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_inc(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers++; #endif } static inline void mnt_dec_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_dec(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers--; #endif } static unsigned int mnt_get_writers(struct mount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; } return count; #else return mnt->mnt_writers; #endif } static int mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_sb->s_readonly_remount) return 1; /* Order wrt setting s_flags/s_readonly_remount in do_remount() */ smp_rmb(); return __mnt_is_readonly(mnt); } /* * Most r/o & frozen checks on a fs are for operations that take discrete * amounts of time, like a write() or unlink(). We must keep track of when * those operations start (for permission checks) and when they end, so that we * can determine when writes are able to occur to a filesystem. */ /** * __mnt_want_write - get write access to a mount without freeze protection * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mnt it read-write) before * returning success. This operation does not protect against filesystem being * frozen. When the write operation is finished, __mnt_drop_write() must be * called. This is effectively a refcount. */ int __mnt_want_write(struct vfsmount *m) { struct mount *mnt = real_mount(m); int ret = 0; preempt_disable(); mnt_inc_writers(mnt); /* * The store to mnt_inc_writers must be visible before we pass * MNT_WRITE_HOLD loop below, so that the slowpath can see our * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) cpu_relax(); /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until * MNT_WRITE_HOLD is cleared. */ smp_rmb(); if (mnt_is_readonly(m)) { mnt_dec_writers(mnt); ret = -EROFS; } preempt_enable(); return ret; } /** * mnt_want_write - get write access to a mount * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mount is read-write, filesystem * is not frozen) before returning success. When the write operation is * finished, mnt_drop_write() must be called. This is effectively a refcount. */ int mnt_want_write(struct vfsmount *m) { int ret; sb_start_write(m->mnt_sb); ret = __mnt_want_write(m); if (ret) sb_end_write(m->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write); /** * mnt_clone_write - get write access to a mount * @mnt: the mount on which to take a write * * This is effectively like mnt_want_write, except * it must only be used to take an extra write reference * on a mountpoint that we already know has a write reference * on it. This allows some optimisation. * * After finished, mnt_drop_write must be called as usual to * drop the reference. */ int mnt_clone_write(struct vfsmount *mnt) { /* superblock may be r/o */ if (__mnt_is_readonly(mnt)) return -EROFS; preempt_disable(); mnt_inc_writers(real_mount(mnt)); preempt_enable(); return 0; } EXPORT_SYMBOL_GPL(mnt_clone_write); /** * __mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like __mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int __mnt_want_write_file(struct file *file) { struct inode *inode = file_inode(file); if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) return __mnt_want_write(file->f_path.mnt); else return mnt_clone_write(file->f_path.mnt); } /** * mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int mnt_want_write_file(struct file *file) { int ret; sb_start_write(file->f_path.mnt->mnt_sb); ret = __mnt_want_write_file(file); if (ret) sb_end_write(file->f_path.mnt->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write_file); /** * __mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done * performing writes to it. Must be matched with * __mnt_want_write() call above. */ void __mnt_drop_write(struct vfsmount *mnt) { preempt_disable(); mnt_dec_writers(real_mount(mnt)); preempt_enable(); } /** * mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done performing writes to it and * also allows filesystem to be frozen again. Must be matched with * mnt_want_write() call above. */ void mnt_drop_write(struct vfsmount *mnt) { __mnt_drop_write(mnt); sb_end_write(mnt->mnt_sb); } EXPORT_SYMBOL_GPL(mnt_drop_write); void __mnt_drop_write_file(struct file *file) { __mnt_drop_write(file->f_path.mnt); } void mnt_drop_write_file(struct file *file) { mnt_drop_write(file->f_path.mnt); } EXPORT_SYMBOL(mnt_drop_write_file); static int mnt_make_readonly(struct mount *mnt) { int ret = 0; br_write_lock(&vfsmount_lock); mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; /* * After storing MNT_WRITE_HOLD, we'll read the counters. This store * should be visible before we do. */ smp_mb(); /* * With writers on hold, if this value is zero, then there are * definitely no active writers (although held writers may subsequently * increment the count, they'll have to wait, and decrement it after * seeing MNT_READONLY). * * It is OK to have counter incremented on one CPU and decremented on * another: the sum will add up correctly. The danger would be when we * sum up each counter, if we read a counter before it is incremented, * but then read another CPU's count which it has been subsequently * decremented from -- we would see more decrements than we should. * MNT_WRITE_HOLD protects against this scenario, because * mnt_want_write first increments count, then smp_mb, then spins on * MNT_WRITE_HOLD, so it can't be decremented by another CPU while * we're counting up here. */ if (mnt_get_writers(mnt) > 0) ret = -EBUSY; else mnt->mnt.mnt_flags |= MNT_READONLY; /* * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers * that become unheld will see MNT_READONLY. */ smp_wmb(); mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; br_write_unlock(&vfsmount_lock); return ret; } static void __mnt_unmake_readonly(struct mount *mnt) { br_write_lock(&vfsmount_lock); mnt->mnt.mnt_flags &= ~MNT_READONLY; br_write_unlock(&vfsmount_lock); } int sb_prepare_remount_readonly(struct super_block *sb) { struct mount *mnt; int err = 0; /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ if (atomic_long_read(&sb->s_remove_count)) return -EBUSY; br_write_lock(&vfsmount_lock); list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; smp_mb(); if (mnt_get_writers(mnt) > 0) { err = -EBUSY; break; } } } if (!err && atomic_long_read(&sb->s_remove_count)) err = -EBUSY; if (!err) { sb->s_readonly_remount = 1; smp_wmb(); } list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; } br_write_unlock(&vfsmount_lock); return err; } static void free_vfsmnt(struct mount *mnt) { kfree(mnt->mnt_devname); mnt_free_id(mnt); #ifdef CONFIG_SMP free_percpu(mnt->mnt_pcp); #endif kmem_cache_free(mnt_cache, mnt); } /* * find the first or last mount at @dentry on vfsmount @mnt depending on * @dir. If @dir is set return the first mount else return the last mount. * vfsmount_lock must be held for read or write. */ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, int dir) { struct list_head *head = mount_hashtable + hash(mnt, dentry); struct list_head *tmp = head; struct mount *p, *found = NULL; for (;;) { tmp = dir ? tmp->next : tmp->prev; p = NULL; if (tmp == head) break; p = list_entry(tmp, struct mount, mnt_hash); if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) { found = p; break; } } return found; } /* * lookup_mnt - Return the first child mount mounted at path * * "First" means first mounted chronologically. If you create the * following mounts: * * mount /dev/sda1 /mnt * mount /dev/sda2 /mnt * mount /dev/sda3 /mnt * * Then lookup_mnt() on the base /mnt dentry in the root mount will * return successively the root dentry and vfsmount of /dev/sda1, then * /dev/sda2, then /dev/sda3, then NULL. * * lookup_mnt takes a reference to the found vfsmount. */ struct vfsmount *lookup_mnt(struct path *path) { struct mount *child_mnt; br_read_lock(&vfsmount_lock); child_mnt = __lookup_mnt(path->mnt, path->dentry, 1); if (child_mnt) { mnt_add_count(child_mnt, 1); br_read_unlock(&vfsmount_lock); return &child_mnt->mnt; } else { br_read_unlock(&vfsmount_lock); return NULL; } } static inline int check_mnt(struct mount *mnt) { return mnt->mnt_ns == current->nsproxy->mnt_ns; } /* * vfsmount lock must be held for write */ static void touch_mnt_namespace(struct mnt_namespace *ns) { if (ns) { ns->event = ++event; wake_up_interruptible(&ns->poll); } } /* * vfsmount lock must be held for write */ static void __touch_mnt_namespace(struct mnt_namespace *ns) { if (ns && ns->event != event) { ns->event = event; wake_up_interruptible(&ns->poll); } } /* * Clear dentry's mounted state if it has no remaining mounts. * vfsmount_lock must be held for write. */ static void dentry_reset_mounted(struct dentry *dentry) { unsigned u; for (u = 0; u < HASH_SIZE; u++) { struct mount *p; list_for_each_entry(p, &mount_hashtable[u], mnt_hash) { if (p->mnt_mountpoint == dentry) return; } } spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_MOUNTED; spin_unlock(&dentry->d_lock); } /* * vfsmount lock must be held for write */ static void detach_mnt(struct mount *mnt, struct path *old_path) { old_path->dentry = mnt->mnt_mountpoint; old_path->mnt = &mnt->mnt_parent->mnt; mnt->mnt_parent = mnt; mnt->mnt_mountpoint = mnt->mnt.mnt_root; list_del_init(&mnt->mnt_child); list_del_init(&mnt->mnt_hash); dentry_reset_mounted(old_path->dentry); } /* * vfsmount lock must be held for write */ void mnt_set_mountpoint(struct mount *mnt, struct dentry *dentry, struct mount *child_mnt) { mnt_add_count(mnt, 1); /* essentially, that's mntget */ child_mnt->mnt_mountpoint = dget(dentry); child_mnt->mnt_parent = mnt; spin_lock(&dentry->d_lock); dentry->d_flags |= DCACHE_MOUNTED; spin_unlock(&dentry->d_lock); } /* * vfsmount lock must be held for write */ static void attach_mnt(struct mount *mnt, struct path *path) { mnt_set_mountpoint(real_mount(path->mnt), path->dentry, mnt); list_add_tail(&mnt->mnt_hash, mount_hashtable + hash(path->mnt, path->dentry)); list_add_tail(&mnt->mnt_child, &real_mount(path->mnt)->mnt_mounts); } /* * vfsmount lock must be held for write */ static void commit_tree(struct mount *mnt) { struct mount *parent = mnt->mnt_parent; struct mount *m; LIST_HEAD(head); struct mnt_namespace *n = parent->mnt_ns; BUG_ON(parent == mnt); list_add_tail(&head, &mnt->mnt_list); list_for_each_entry(m, &head, mnt_list) m->mnt_ns = n; list_splice(&head, n->list.prev); list_add_tail(&mnt->mnt_hash, mount_hashtable + hash(&parent->mnt, mnt->mnt_mountpoint)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); touch_mnt_namespace(n); } static struct mount *next_mnt(struct mount *p, struct mount *root) { struct list_head *next = p->mnt_mounts.next; if (next == &p->mnt_mounts) { while (1) { if (p == root) return NULL; next = p->mnt_child.next; if (next != &p->mnt_parent->mnt_mounts) break; p = p->mnt_parent; } } return list_entry(next, struct mount, mnt_child); } static struct mount *skip_mnt_tree(struct mount *p) { struct list_head *prev = p->mnt_mounts.prev; while (prev != &p->mnt_mounts) { p = list_entry(prev, struct mount, mnt_child); prev = p->mnt_mounts.prev; } return p; } struct vfsmount * vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) { struct mount *mnt; struct dentry *root; if (!type) return ERR_PTR(-ENODEV); mnt = alloc_vfsmnt(name); if (!mnt) return ERR_PTR(-ENOMEM); if (flags & MS_KERNMOUNT) mnt->mnt.mnt_flags = MNT_INTERNAL; root = mount_fs(type, flags, name, data); if (IS_ERR(root)) { free_vfsmnt(mnt); return ERR_CAST(root); } mnt->mnt.mnt_root = root; mnt->mnt.mnt_sb = root->d_sb; mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; br_write_lock(&vfsmount_lock); list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); br_write_unlock(&vfsmount_lock); return &mnt->mnt; } EXPORT_SYMBOL_GPL(vfs_kern_mount); static struct mount *clone_mnt(struct mount *old, struct dentry *root, int flag) { struct super_block *sb = old->mnt.mnt_sb; struct mount *mnt; int err; mnt = alloc_vfsmnt(old->mnt_devname); if (!mnt) return ERR_PTR(-ENOMEM); if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) mnt->mnt_group_id = 0; /* not a peer of original */ else mnt->mnt_group_id = old->mnt_group_id; if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { err = mnt_alloc_group_id(mnt); if (err) goto out_free; } mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; /* Don't allow unprivileged users to change mount flags */ if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; atomic_inc(&sb->s_active); mnt->mnt.mnt_sb = sb; mnt->mnt.mnt_root = dget(root); mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; br_write_lock(&vfsmount_lock); list_add_tail(&mnt->mnt_instance, &sb->s_mounts); br_write_unlock(&vfsmount_lock); if ((flag & CL_SLAVE) || ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { list_add(&mnt->mnt_slave, &old->mnt_slave_list); mnt->mnt_master = old; CLEAR_MNT_SHARED(mnt); } else if (!(flag & CL_PRIVATE)) { if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) list_add(&mnt->mnt_share, &old->mnt_share); if (IS_MNT_SLAVE(old)) list_add(&mnt->mnt_slave, &old->mnt_slave); mnt->mnt_master = old->mnt_master; } if (flag & CL_MAKE_SHARED) set_mnt_shared(mnt); /* stick the duplicate mount on the same expiry list * as the original if that was on one */ if (flag & CL_EXPIRE) { if (!list_empty(&old->mnt_expire)) list_add(&mnt->mnt_expire, &old->mnt_expire); } return mnt; out_free: free_vfsmnt(mnt); return ERR_PTR(err); } static inline void mntfree(struct mount *mnt) { struct vfsmount *m = &mnt->mnt; struct super_block *sb = m->mnt_sb; /* * This probably indicates that somebody messed * up a mnt_want/drop_write() pair. If this * happens, the filesystem was probably unable * to make r/w->r/o transitions. */ /* * The locking used to deal with mnt_count decrement provides barriers, * so mnt_get_writers() below is safe. */ WARN_ON(mnt_get_writers(mnt)); fsnotify_vfsmount_delete(m); dput(m->mnt_root); free_vfsmnt(mnt); deactivate_super(sb); } static void mntput_no_expire(struct mount *mnt) { put_again: #ifdef CONFIG_SMP br_read_lock(&vfsmount_lock); if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ mnt_add_count(mnt, -1); br_read_unlock(&vfsmount_lock); return; } br_read_unlock(&vfsmount_lock); br_write_lock(&vfsmount_lock); mnt_add_count(mnt, -1); if (mnt_get_count(mnt)) { br_write_unlock(&vfsmount_lock); return; } #else mnt_add_count(mnt, -1); if (likely(mnt_get_count(mnt))) return; br_write_lock(&vfsmount_lock); #endif if (unlikely(mnt->mnt_pinned)) { mnt_add_count(mnt, mnt->mnt_pinned + 1); mnt->mnt_pinned = 0; br_write_unlock(&vfsmount_lock); acct_auto_close_mnt(&mnt->mnt); goto put_again; } list_del(&mnt->mnt_instance); br_write_unlock(&vfsmount_lock); mntfree(mnt); } void mntput(struct vfsmount *mnt) { if (mnt) { struct mount *m = real_mount(mnt); /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ if (unlikely(m->mnt_expiry_mark)) m->mnt_expiry_mark = 0; mntput_no_expire(m); } } EXPORT_SYMBOL(mntput); struct vfsmount *mntget(struct vfsmount *mnt) { if (mnt) mnt_add_count(real_mount(mnt), 1); return mnt; } EXPORT_SYMBOL(mntget); void mnt_pin(struct vfsmount *mnt) { br_write_lock(&vfsmount_lock); real_mount(mnt)->mnt_pinned++; br_write_unlock(&vfsmount_lock); } EXPORT_SYMBOL(mnt_pin); void mnt_unpin(struct vfsmount *m) { struct mount *mnt = real_mount(m); br_write_lock(&vfsmount_lock); if (mnt->mnt_pinned) { mnt_add_count(mnt, 1); mnt->mnt_pinned--; } br_write_unlock(&vfsmount_lock); } EXPORT_SYMBOL(mnt_unpin); static inline void mangle(struct seq_file *m, const char *s) { seq_escape(m, s, " \t\n\\"); } /* * Simple .show_options callback for filesystems which don't want to * implement more complex mount option showing. * * See also save_mount_options(). */ int generic_show_options(struct seq_file *m, struct dentry *root) { const char *options; rcu_read_lock(); options = rcu_dereference(root->d_sb->s_options); if (options != NULL && options[0]) { seq_putc(m, ','); mangle(m, options); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(generic_show_options); /* * If filesystem uses generic_show_options(), this function should be * called from the fill_super() callback. * * The .remount_fs callback usually needs to be handled in a special * way, to make sure, that previous options are not overwritten if the * remount fails. * * Also note, that if the filesystem's .remount_fs function doesn't * reset all options to their default value, but changes only newly * given options, then the displayed options will not reflect reality * any more. */ void save_mount_options(struct super_block *sb, char *options) { BUG_ON(sb->s_options); rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL)); } EXPORT_SYMBOL(save_mount_options); void replace_mount_options(struct super_block *sb, char *options) { char *old = sb->s_options; rcu_assign_pointer(sb->s_options, options); if (old) { synchronize_rcu(); kfree(old); } } EXPORT_SYMBOL(replace_mount_options); #ifdef CONFIG_PROC_FS /* iterator; we want it to have access to namespace_sem, thus here... */ static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_mounts *p = proc_mounts(m); down_read(&namespace_sem); return seq_list_start(&p->ns->list, *pos); } static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_mounts *p = proc_mounts(m); return seq_list_next(v, &p->ns->list, pos); } static void m_stop(struct seq_file *m, void *v) { up_read(&namespace_sem); } static int m_show(struct seq_file *m, void *v) { struct proc_mounts *p = proc_mounts(m); struct mount *r = list_entry(v, struct mount, mnt_list); return p->show(m, &r->mnt); } const struct seq_operations mounts_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = m_show, }; #endif /* CONFIG_PROC_FS */ /** * may_umount_tree - check if a mount tree is busy * @mnt: root of mount tree * * This is called to check if a tree of mounts has any * open files, pwds, chroots or sub mounts that are * busy. */ int may_umount_tree(struct vfsmount *m) { struct mount *mnt = real_mount(m); int actual_refs = 0; int minimum_refs = 0; struct mount *p; BUG_ON(!m); /* write lock needed for mnt_get_count */ br_write_lock(&vfsmount_lock); for (p = mnt; p; p = next_mnt(p, mnt)) { actual_refs += mnt_get_count(p); minimum_refs += 2; } br_write_unlock(&vfsmount_lock); if (actual_refs > minimum_refs) return 0; return 1; } EXPORT_SYMBOL(may_umount_tree); /** * may_umount - check if a mount point is busy * @mnt: root of mount * * This is called to check if a mount point has any * open files, pwds, chroots or sub mounts. If the * mount has sub mounts this will return busy * regardless of whether the sub mounts are busy. * * Doesn't take quota and stuff into account. IOW, in some cases it will * give false negatives. The main reason why it's here is that we need * a non-destructive way to look for easily umountable filesystems. */ int may_umount(struct vfsmount *mnt) { int ret = 1; down_read(&namespace_sem); br_write_lock(&vfsmount_lock); if (propagate_mount_busy(real_mount(mnt), 2)) ret = 0; br_write_unlock(&vfsmount_lock); up_read(&namespace_sem); return ret; } EXPORT_SYMBOL(may_umount); void release_mounts(struct list_head *head) { struct mount *mnt; while (!list_empty(head)) { mnt = list_first_entry(head, struct mount, mnt_hash); list_del_init(&mnt->mnt_hash); if (mnt_has_parent(mnt)) { struct dentry *dentry; struct mount *m; br_write_lock(&vfsmount_lock); dentry = mnt->mnt_mountpoint; m = mnt->mnt_parent; mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; m->mnt_ghosts--; br_write_unlock(&vfsmount_lock); dput(dentry); mntput(&m->mnt); } mntput(&mnt->mnt); } } /* * vfsmount lock must be held for write * namespace_sem must be held for write */ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill) { LIST_HEAD(tmp_list); struct mount *p; for (p = mnt; p; p = next_mnt(p, mnt)) list_move(&p->mnt_hash, &tmp_list); if (propagate) propagate_umount(&tmp_list); list_for_each_entry(p, &tmp_list, mnt_hash) { list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; list_del_init(&p->mnt_child); if (mnt_has_parent(p)) { p->mnt_parent->mnt_ghosts++; dentry_reset_mounted(p->mnt_mountpoint); } change_mnt_propagation(p, MS_PRIVATE); } list_splice(&tmp_list, kill); } static void shrink_submounts(struct mount *mnt, struct list_head *umounts); static int do_umount(struct mount *mnt, int flags) { struct super_block *sb = mnt->mnt.mnt_sb; int retval; LIST_HEAD(umount_list); retval = security_sb_umount(&mnt->mnt, flags); if (retval) return retval; /* * Allow userspace to request a mountpoint be expired rather than * unmounting unconditionally. Unmount only happens if: * (1) the mark is already set (the mark is cleared by mntput()) * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] */ if (flags & MNT_EXPIRE) { if (&mnt->mnt == current->fs->root.mnt || flags & (MNT_FORCE | MNT_DETACH)) return -EINVAL; /* * probably don't strictly need the lock here if we examined * all race cases, but it's a slowpath. */ br_write_lock(&vfsmount_lock); if (mnt_get_count(mnt) != 2) { br_write_unlock(&vfsmount_lock); return -EBUSY; } br_write_unlock(&vfsmount_lock); if (!xchg(&mnt->mnt_expiry_mark, 1)) return -EAGAIN; } /* * If we may have to abort operations to get out of this * mount, and they will themselves hold resources we must * allow the fs to do things. In the Unix tradition of * 'Gee thats tricky lets do it in userspace' the umount_begin * might fail to complete on the first run through as other tasks * must return, and the like. Thats for the mount program to worry * about for the moment. */ if (flags & MNT_FORCE && sb->s_op->umount_begin) { sb->s_op->umount_begin(sb); } /* * No sense to grab the lock for this test, but test itself looks * somewhat bogus. Suggestions for better replacement? * Ho-hum... In principle, we might treat that as umount + switch * to rootfs. GC would eventually take care of the old vfsmount. * Actually it makes sense, especially if rootfs would contain a * /reboot - static binary that would close all descriptors and * call reboot(9). Then init(8) could umount root and exec /reboot. */ if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { /* * Special case for "unmounting" root ... * we just try to remount it readonly. */ down_write(&sb->s_umount); if (!(sb->s_flags & MS_RDONLY)) retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); up_write(&sb->s_umount); return retval; } down_write(&namespace_sem); br_write_lock(&vfsmount_lock); event++; if (!(flags & MNT_DETACH)) shrink_submounts(mnt, &umount_list); retval = -EBUSY; if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) { if (!list_empty(&mnt->mnt_list)) umount_tree(mnt, 1, &umount_list); retval = 0; } br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); return retval; } /* * Is the caller allowed to modify his namespace? */ static inline bool may_mount(void) { return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); } /* * Now umount can handle mount points as well as block devices. * This is important for filesystems which use unnamed block devices. * * We now support a flag for forced unmount like the other 'big iron' * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD */ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) { struct path path; struct mount *mnt; int retval; int lookup_flags = 0; if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) return -EINVAL; if (!may_mount()) return -EPERM; if (!(flags & UMOUNT_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; retval = user_path_at(AT_FDCWD, name, lookup_flags, &path); if (retval) goto out; mnt = real_mount(path.mnt); retval = -EINVAL; if (path.dentry != path.mnt->mnt_root) goto dput_and_out; if (!check_mnt(mnt)) goto dput_and_out; retval = do_umount(mnt, flags); dput_and_out: /* we mustn't call path_put() as that would clear mnt_expiry_mark */ dput(path.dentry); mntput_no_expire(mnt); out: return retval; } #ifdef __ARCH_WANT_SYS_OLDUMOUNT /* * The 2.0 compatible umount. No flags. */ SYSCALL_DEFINE1(oldumount, char __user *, name) { return sys_umount(name, 0); } #endif static bool mnt_ns_loop(struct path *path) { /* Could bind mounting the mount namespace inode cause a * mount namespace loop? */ struct inode *inode = path->dentry->d_inode; struct proc_inode *ei; struct mnt_namespace *mnt_ns; if (!proc_ns_inode(inode)) return false; ei = PROC_I(inode); if (ei->ns_ops != &mntns_operations) return false; mnt_ns = ei->ns; return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; } struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, int flag) { struct mount *res, *p, *q, *r; struct path path; if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt)) return ERR_PTR(-EINVAL); res = q = clone_mnt(mnt, dentry, flag); if (IS_ERR(q)) return q; q->mnt_mountpoint = mnt->mnt_mountpoint; p = mnt; list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { struct mount *s; if (!is_subdir(r->mnt_mountpoint, dentry)) continue; for (s = r; s; s = next_mnt(s, r)) { if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) { s = skip_mnt_tree(s); continue; } while (p != s->mnt_parent) { p = p->mnt_parent; q = q->mnt_parent; } p = s; path.mnt = &q->mnt; path.dentry = p->mnt_mountpoint; q = clone_mnt(p, p->mnt.mnt_root, flag); if (IS_ERR(q)) goto out; br_write_lock(&vfsmount_lock); list_add_tail(&q->mnt_list, &res->mnt_list); attach_mnt(q, &path); br_write_unlock(&vfsmount_lock); } } return res; out: if (res) { LIST_HEAD(umount_list); br_write_lock(&vfsmount_lock); umount_tree(res, 0, &umount_list); br_write_unlock(&vfsmount_lock); release_mounts(&umount_list); } return q; } /* Caller should check returned pointer for errors */ struct vfsmount *collect_mounts(struct path *path) { struct mount *tree; down_write(&namespace_sem); tree = copy_tree(real_mount(path->mnt), path->dentry, CL_COPY_ALL | CL_PRIVATE); up_write(&namespace_sem); if (IS_ERR(tree)) return NULL; return &tree->mnt; } void drop_collected_mounts(struct vfsmount *mnt) { LIST_HEAD(umount_list); down_write(&namespace_sem); br_write_lock(&vfsmount_lock); umount_tree(real_mount(mnt), 0, &umount_list); br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); } int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, struct vfsmount *root) { struct mount *mnt; int res = f(root, arg); if (res) return res; list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { res = f(&mnt->mnt, arg); if (res) return res; } return 0; } static void cleanup_group_ids(struct mount *mnt, struct mount *end) { struct mount *p; for (p = mnt; p != end; p = next_mnt(p, mnt)) { if (p->mnt_group_id && !IS_MNT_SHARED(p)) mnt_release_group_id(p); } } static int invent_group_ids(struct mount *mnt, bool recurse) { struct mount *p; for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { int err = mnt_alloc_group_id(p); if (err) { cleanup_group_ids(mnt, p); return err; } } } return 0; } /* * @source_mnt : mount tree to be attached * @nd : place the mount tree @source_mnt is attached * @parent_nd : if non-null, detach the source_mnt from its parent and * store the parent mount and mountpoint dentry. * (done when source_mnt is moved) * * NOTE: in the table below explains the semantics when a source mount * of a given type is attached to a destination mount of a given type. * --------------------------------------------------------------------------- * | BIND MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (++) | shared (+) | shared(+++)| invalid | * | | | | | | * |non-shared| shared (+) | private | slave (*) | invalid | * *************************************************************************** * A bind operation clones the source mount and mounts the clone on the * destination mount. * * (++) the cloned mount is propagated to all the mounts in the propagation * tree of the destination mount and the cloned mount is added to * the peer group of the source mount. * (+) the cloned mount is created under the destination mount and is marked * as shared. The cloned mount is added to the peer group of the source * mount. * (+++) the mount is propagated to all the mounts in the propagation tree * of the destination mount and the cloned mount is made slave * of the same master as that of the source mount. The cloned mount * is marked as 'shared and slave'. * (*) the cloned mount is made a slave of the same master as that of the * source mount. * * --------------------------------------------------------------------------- * | MOVE MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (+) | shared (+) | shared(+++) | invalid | * | | | | | | * |non-shared| shared (+*) | private | slave (*) | unbindable | * *************************************************************************** * * (+) the mount is moved to the destination. And is then propagated to * all the mounts in the propagation tree of the destination mount. * (+*) the mount is moved to the destination. * (+++) the mount is moved to the destination and is then propagated to * all the mounts belonging to the destination mount's propagation tree. * the mount is marked as 'shared and slave'. * (*) the mount continues to be a slave at the new location. * * if the source mount is a tree, the operations explained above is * applied to each mount in the tree. * Must be called without spinlocks held, since this function can sleep * in allocations. */ static int attach_recursive_mnt(struct mount *source_mnt, struct path *path, struct path *parent_path) { LIST_HEAD(tree_list); struct mount *dest_mnt = real_mount(path->mnt); struct dentry *dest_dentry = path->dentry; struct mount *child, *p; int err; if (IS_MNT_SHARED(dest_mnt)) { err = invent_group_ids(source_mnt, true); if (err) goto out; } err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list); if (err) goto out_cleanup_ids; br_write_lock(&vfsmount_lock); if (IS_MNT_SHARED(dest_mnt)) { for (p = source_mnt; p; p = next_mnt(p, source_mnt)) set_mnt_shared(p); } if (parent_path) { detach_mnt(source_mnt, parent_path); attach_mnt(source_mnt, path); touch_mnt_namespace(source_mnt->mnt_ns); } else { mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt); commit_tree(source_mnt); } list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { list_del_init(&child->mnt_hash); commit_tree(child); } br_write_unlock(&vfsmount_lock); return 0; out_cleanup_ids: if (IS_MNT_SHARED(dest_mnt)) cleanup_group_ids(source_mnt, NULL); out: return err; } static int lock_mount(struct path *path) { struct vfsmount *mnt; retry: mutex_lock(&path->dentry->d_inode->i_mutex); if (unlikely(cant_mount(path->dentry))) { mutex_unlock(&path->dentry->d_inode->i_mutex); return -ENOENT; } down_write(&namespace_sem); mnt = lookup_mnt(path); if (likely(!mnt)) return 0; up_write(&namespace_sem); mutex_unlock(&path->dentry->d_inode->i_mutex); path_put(path); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); goto retry; } static void unlock_mount(struct path *path) { up_write(&namespace_sem); mutex_unlock(&path->dentry->d_inode->i_mutex); } static int graft_tree(struct mount *mnt, struct path *path) { if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) return -EINVAL; if (S_ISDIR(path->dentry->d_inode->i_mode) != S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode)) return -ENOTDIR; if (d_unlinked(path->dentry)) return -ENOENT; return attach_recursive_mnt(mnt, path, NULL); } /* * Sanity check the flags to change_mnt_propagation. */ static int flags_to_propagation_type(int flags) { int type = flags & ~(MS_REC | MS_SILENT); /* Fail if any non-propagation flags are set */ if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) return 0; /* Only one propagation flag should be set */ if (!is_power_of_2(type)) return 0; return type; } /* * recursively change the type of the mountpoint. */ static int do_change_type(struct path *path, int flag) { struct mount *m; struct mount *mnt = real_mount(path->mnt); int recurse = flag & MS_REC; int type; int err = 0; if (path->dentry != path->mnt->mnt_root) return -EINVAL; type = flags_to_propagation_type(flag); if (!type) return -EINVAL; down_write(&namespace_sem); if (type == MS_SHARED) { err = invent_group_ids(mnt, recurse); if (err) goto out_unlock; } br_write_lock(&vfsmount_lock); for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) change_mnt_propagation(m, type); br_write_unlock(&vfsmount_lock); out_unlock: up_write(&namespace_sem); return err; } /* * do loopback mount. */ static int do_loopback(struct path *path, const char *old_name, int recurse) { LIST_HEAD(umount_list); struct path old_path; struct mount *mnt = NULL, *old; int err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); if (err) return err; err = -EINVAL; if (mnt_ns_loop(&old_path)) goto out; err = lock_mount(path); if (err) goto out; old = real_mount(old_path.mnt); err = -EINVAL; if (IS_MNT_UNBINDABLE(old)) goto out2; if (!check_mnt(real_mount(path->mnt)) || !check_mnt(old)) goto out2; if (recurse) mnt = copy_tree(old, old_path.dentry, 0); else mnt = clone_mnt(old, old_path.dentry, 0); if (IS_ERR(mnt)) { err = PTR_ERR(mnt); goto out; } err = graft_tree(mnt, path); if (err) { br_write_lock(&vfsmount_lock); umount_tree(mnt, 0, &umount_list); br_write_unlock(&vfsmount_lock); } out2: unlock_mount(path); release_mounts(&umount_list); out: path_put(&old_path); return err; } static int change_mount_flags(struct vfsmount *mnt, int ms_flags) { int error = 0; int readonly_request = 0; if (ms_flags & MS_RDONLY) readonly_request = 1; if (readonly_request == __mnt_is_readonly(mnt)) return 0; if (mnt->mnt_flags & MNT_LOCK_READONLY) return -EPERM; if (readonly_request) error = mnt_make_readonly(real_mount(mnt)); else __mnt_unmake_readonly(real_mount(mnt)); return error; } /* * change filesystem flags. dir should be a physical root of filesystem. * If you've mounted a non-root directory somewhere and want to do remount * on it - tough luck. */ static int do_remount(struct path *path, int flags, int mnt_flags, void *data) { int err; struct super_block *sb = path->mnt->mnt_sb; struct mount *mnt = real_mount(path->mnt); if (!check_mnt(mnt)) return -EINVAL; if (path->dentry != path->mnt->mnt_root) return -EINVAL; err = security_sb_remount(sb, data); if (err) return err; down_write(&sb->s_umount); if (flags & MS_BIND) err = change_mount_flags(path->mnt, flags); else if (!capable(CAP_SYS_ADMIN)) err = -EPERM; else err = do_remount_sb(sb, flags, data, 0); if (!err) { br_write_lock(&vfsmount_lock); mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; mnt->mnt.mnt_flags = mnt_flags; br_write_unlock(&vfsmount_lock); } up_write(&sb->s_umount); if (!err) { br_write_lock(&vfsmount_lock); touch_mnt_namespace(mnt->mnt_ns); br_write_unlock(&vfsmount_lock); } return err; } static inline int tree_contains_unbindable(struct mount *mnt) { struct mount *p; for (p = mnt; p; p = next_mnt(p, mnt)) { if (IS_MNT_UNBINDABLE(p)) return 1; } return 0; } static int do_move_mount(struct path *path, const char *old_name) { struct path old_path, parent_path; struct mount *p; struct mount *old; int err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); if (err) return err; err = lock_mount(path); if (err < 0) goto out; old = real_mount(old_path.mnt); p = real_mount(path->mnt); err = -EINVAL; if (!check_mnt(p) || !check_mnt(old)) goto out1; if (d_unlinked(path->dentry)) goto out1; err = -EINVAL; if (old_path.dentry != old_path.mnt->mnt_root) goto out1; if (!mnt_has_parent(old)) goto out1; if (S_ISDIR(path->dentry->d_inode->i_mode) != S_ISDIR(old_path.dentry->d_inode->i_mode)) goto out1; /* * Don't move a mount residing in a shared parent. */ if (IS_MNT_SHARED(old->mnt_parent)) goto out1; /* * Don't move a mount tree containing unbindable mounts to a destination * mount which is shared. */ if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) goto out1; err = -ELOOP; for (; mnt_has_parent(p); p = p->mnt_parent) if (p == old) goto out1; err = attach_recursive_mnt(old, path, &parent_path); if (err) goto out1; /* if the mount is moved, it should no longer be expire * automatically */ list_del_init(&old->mnt_expire); out1: unlock_mount(path); out: if (!err) path_put(&parent_path); path_put(&old_path); return err; } static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) { int err; const char *subtype = strchr(fstype, '.'); if (subtype) { subtype++; err = -EINVAL; if (!subtype[0]) goto err; } else subtype = ""; mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); err = -ENOMEM; if (!mnt->mnt_sb->s_subtype) goto err; return mnt; err: mntput(mnt); return ERR_PTR(err); } /* * add a mount into a namespace's mount tree */ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) { int err; mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL); err = lock_mount(path); if (err) return err; err = -EINVAL; if (unlikely(!check_mnt(real_mount(path->mnt)))) { /* that's acceptable only for automounts done in private ns */ if (!(mnt_flags & MNT_SHRINKABLE)) goto unlock; /* ... and for those we'd better have mountpoint still alive */ if (!real_mount(path->mnt)->mnt_ns) goto unlock; } /* Refuse the same filesystem on the same mount point */ err = -EBUSY; if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path->mnt->mnt_root == path->dentry) goto unlock; err = -EINVAL; if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode)) goto unlock; newmnt->mnt.mnt_flags = mnt_flags; err = graft_tree(newmnt, path); unlock: unlock_mount(path); return err; } /* * create a new mount for userspace and request it to be added into the * namespace's tree */ static int do_new_mount(struct path *path, const char *fstype, int flags, int mnt_flags, const char *name, void *data) { struct file_system_type *type; struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; struct vfsmount *mnt; int err; if (!fstype) return -EINVAL; type = get_fs_type(fstype); if (!type) return -ENODEV; if (user_ns != &init_user_ns) { if (!(type->fs_flags & FS_USERNS_MOUNT)) { put_filesystem(type); return -EPERM; } /* Only in special cases allow devices from mounts * created outside the initial user namespace. */ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { flags |= MS_NODEV; mnt_flags |= MNT_NODEV; } } mnt = vfs_kern_mount(type, flags, name, data); if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && !mnt->mnt_sb->s_subtype) mnt = fs_set_subtype(mnt, fstype); put_filesystem(type); if (IS_ERR(mnt)) return PTR_ERR(mnt); err = do_add_mount(real_mount(mnt), path, mnt_flags); if (err) mntput(mnt); return err; } int finish_automount(struct vfsmount *m, struct path *path) { struct mount *mnt = real_mount(m); int err; /* The new mount record should have at least 2 refs to prevent it being * expired before we get a chance to add it */ BUG_ON(mnt_get_count(mnt) < 2); if (m->mnt_sb == path->mnt->mnt_sb && m->mnt_root == path->dentry) { err = -ELOOP; goto fail; } err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); if (!err) return 0; fail: /* remove m from any expiration list it may be on */ if (!list_empty(&mnt->mnt_expire)) { down_write(&namespace_sem); br_write_lock(&vfsmount_lock); list_del_init(&mnt->mnt_expire); br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); } mntput(m); mntput(m); return err; } /** * mnt_set_expiry - Put a mount on an expiration list * @mnt: The mount to list. * @expiry_list: The list to add the mount to. */ void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) { down_write(&namespace_sem); br_write_lock(&vfsmount_lock); list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); } EXPORT_SYMBOL(mnt_set_expiry); /* * process a list of expirable mountpoints with the intent of discarding any * mountpoints that aren't in use and haven't been touched since last we came * here */ void mark_mounts_for_expiry(struct list_head *mounts) { struct mount *mnt, *next; LIST_HEAD(graveyard); LIST_HEAD(umounts); if (list_empty(mounts)) return; down_write(&namespace_sem); br_write_lock(&vfsmount_lock); /* extract from the expiration list every vfsmount that matches the * following criteria: * - only referenced by its parent vfsmount * - still marked for expiry (marked on the last call here; marks are * cleared by mntput()) */ list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { if (!xchg(&mnt->mnt_expiry_mark, 1) || propagate_mount_busy(mnt, 1)) continue; list_move(&mnt->mnt_expire, &graveyard); } while (!list_empty(&graveyard)) { mnt = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(mnt->mnt_ns); umount_tree(mnt, 1, &umounts); } br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umounts); } EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); /* * Ripoff of 'select_parent()' * * search the list of submounts for a given mountpoint, and move any * shrinkable submounts to the 'graveyard' list. */ static int select_submounts(struct mount *parent, struct list_head *graveyard) { struct mount *this_parent = parent; struct list_head *next; int found = 0; repeat: next = this_parent->mnt_mounts.next; resume: while (next != &this_parent->mnt_mounts) { struct list_head *tmp = next; struct mount *mnt = list_entry(tmp, struct mount, mnt_child); next = tmp->next; if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) continue; /* * Descend a level if the d_mounts list is non-empty. */ if (!list_empty(&mnt->mnt_mounts)) { this_parent = mnt; goto repeat; } if (!propagate_mount_busy(mnt, 1)) { list_move_tail(&mnt->mnt_expire, graveyard); found++; } } /* * All done at this level ... ascend and resume the search */ if (this_parent != parent) { next = this_parent->mnt_child.next; this_parent = this_parent->mnt_parent; goto resume; } return found; } /* * process a list of expirable mountpoints with the intent of discarding any * submounts of a specific parent mountpoint * * vfsmount_lock must be held for write */ static void shrink_submounts(struct mount *mnt, struct list_head *umounts) { LIST_HEAD(graveyard); struct mount *m; /* extract submounts of 'mountpoint' from the expiration list */ while (select_submounts(mnt, &graveyard)) { while (!list_empty(&graveyard)) { m = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(m->mnt_ns); umount_tree(m, 1, umounts); } } } /* * Some copy_from_user() implementations do not return the exact number of * bytes remaining to copy on a fault. But copy_mount_options() requires that. * Note that this function differs from copy_from_user() in that it will oops * on bad values of `to', rather than returning a short copy. */ static long exact_copy_from_user(void *to, const void __user * from, unsigned long n) { char *t = to; const char __user *f = from; char c; if (!access_ok(VERIFY_READ, from, n)) return n; while (n) { if (__get_user(c, f)) { memset(t, 0, n); break; } *t++ = c; f++; n--; } return n; } int copy_mount_options(const void __user * data, unsigned long *where) { int i; unsigned long page; unsigned long size; *where = 0; if (!data) return 0; if (!(page = __get_free_page(GFP_KERNEL))) return -ENOMEM; /* We only care that *some* data at the address the user * gave us is valid. Just in case, we'll zero * the remainder of the page. */ /* copy_from_user cannot cross TASK_SIZE ! */ size = TASK_SIZE - (unsigned long)data; if (size > PAGE_SIZE) size = PAGE_SIZE; i = size - exact_copy_from_user((void *)page, data, size); if (!i) { free_page(page); return -EFAULT; } if (i != PAGE_SIZE) memset((char *)page + i, 0, PAGE_SIZE - i); *where = page; return 0; } int copy_mount_string(const void __user *data, char **where) { char *tmp; if (!data) { *where = NULL; return 0; } tmp = strndup_user(data, PAGE_SIZE); if (IS_ERR(tmp)) return PTR_ERR(tmp); *where = tmp; return 0; } /* * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to * be given to the mount() call (ie: read-only, no-dev, no-suid etc). * * data is a (void *) that can point to any structure up to * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent * information (or be NULL). * * Pre-0.97 versions of mount() didn't have a flags word. * When the flags word was introduced its top half was required * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. * Therefore, if this magic number is present, it carries no information * and must be discarded. */ long do_mount(const char *dev_name, const char *dir_name, const char *type_page, unsigned long flags, void *data_page) { struct path path; int retval = 0; int mnt_flags = 0; /* Discard magic */ if ((flags & MS_MGC_MSK) == MS_MGC_VAL) flags &= ~MS_MGC_MSK; /* Basic sanity checks */ if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE)) return -EINVAL; if (data_page) ((char *)data_page)[PAGE_SIZE - 1] = 0; /* ... and get the mountpoint */ retval = kern_path(dir_name, LOOKUP_FOLLOW, &path); if (retval) return retval; retval = security_sb_mount(dev_name, &path, type_page, flags, data_page); if (retval) goto dput_out; if (!may_mount()) return -EPERM; /* Default to relatime unless overriden */ if (!(flags & MS_NOATIME)) mnt_flags |= MNT_RELATIME; /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) mnt_flags |= MNT_NOSUID; if (flags & MS_NODEV) mnt_flags |= MNT_NODEV; if (flags & MS_NOEXEC) mnt_flags |= MNT_NOEXEC; if (flags & MS_NOATIME) mnt_flags |= MNT_NOATIME; if (flags & MS_NODIRATIME) mnt_flags |= MNT_NODIRATIME; if (flags & MS_STRICTATIME) mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, data_page); else if (flags & MS_BIND) retval = do_loopback(&path, dev_name, flags & MS_REC); else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) retval = do_change_type(&path, flags); else if (flags & MS_MOVE) retval = do_move_mount(&path, dev_name); else retval = do_new_mount(&path, type_page, flags, mnt_flags, dev_name, data_page); dput_out: path_put(&path); return retval; } static void free_mnt_ns(struct mnt_namespace *ns) { proc_free_inum(ns->proc_inum); put_user_ns(ns->user_ns); kfree(ns); } /* * Assign a sequence number so we can detect when we attempt to bind * mount a reference to an older mount namespace into the current * mount namespace, preventing reference counting loops. A 64bit * number incrementing at 10Ghz will take 12,427 years to wrap which * is effectively never, so we can ignore the possibility. */ static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) { struct mnt_namespace *new_ns; int ret; new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL); if (!new_ns) return ERR_PTR(-ENOMEM); ret = proc_alloc_inum(&new_ns->proc_inum); if (ret) { kfree(new_ns); return ERR_PTR(ret); } new_ns->seq = atomic64_add_return(1, &mnt_ns_seq); atomic_set(&new_ns->count, 1); new_ns->root = NULL; INIT_LIST_HEAD(&new_ns->list); init_waitqueue_head(&new_ns->poll); new_ns->event = 0; new_ns->user_ns = get_user_ns(user_ns); return new_ns; } /* * Allocate a new namespace structure and populate it with contents * copied from the namespace of the passed in task structure. */ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, struct user_namespace *user_ns, struct fs_struct *fs) { struct mnt_namespace *new_ns; struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; struct mount *p, *q; struct mount *old = mnt_ns->root; struct mount *new; int copy_flags; new_ns = alloc_mnt_ns(user_ns); if (IS_ERR(new_ns)) return new_ns; down_write(&namespace_sem); /* First pass: copy the tree topology */ copy_flags = CL_COPY_ALL | CL_EXPIRE; if (user_ns != mnt_ns->user_ns) copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; new = copy_tree(old, old->mnt.mnt_root, copy_flags); if (IS_ERR(new)) { up_write(&namespace_sem); free_mnt_ns(new_ns); return ERR_CAST(new); } new_ns->root = new; br_write_lock(&vfsmount_lock); list_add_tail(&new_ns->list, &new->mnt_list); br_write_unlock(&vfsmount_lock); /* * Second pass: switch the tsk->fs->* elements and mark new vfsmounts * as belonging to new namespace. We have already acquired a private * fs_struct, so tsk->fs->lock is not needed. */ p = old; q = new; while (p) { q->mnt_ns = new_ns; if (fs) { if (&p->mnt == fs->root.mnt) { fs->root.mnt = mntget(&q->mnt); rootmnt = &p->mnt; } if (&p->mnt == fs->pwd.mnt) { fs->pwd.mnt = mntget(&q->mnt); pwdmnt = &p->mnt; } } p = next_mnt(p, old); q = next_mnt(q, new); } up_write(&namespace_sem); if (rootmnt) mntput(rootmnt); if (pwdmnt) mntput(pwdmnt); return new_ns; } struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; BUG_ON(!ns); get_mnt_ns(ns); if (!(flags & CLONE_NEWNS)) return ns; new_ns = dup_mnt_ns(ns, user_ns, new_fs); put_mnt_ns(ns); return new_ns; } /** * create_mnt_ns - creates a private namespace and adds a root filesystem * @mnt: pointer to the new root filesystem mountpoint */ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m) { struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns); if (!IS_ERR(new_ns)) { struct mount *mnt = real_mount(m); mnt->mnt_ns = new_ns; new_ns->root = mnt; list_add(&new_ns->list, &mnt->mnt_list); } else { mntput(m); } return new_ns; } struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) { struct mnt_namespace *ns; struct super_block *s; struct path path; int err; ns = create_mnt_ns(mnt); if (IS_ERR(ns)) return ERR_CAST(ns); err = vfs_path_lookup(mnt->mnt_root, mnt, name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); put_mnt_ns(ns); if (err) return ERR_PTR(err); /* trade a vfsmount reference for active sb one */ s = path.mnt->mnt_sb; atomic_inc(&s->s_active); mntput(path.mnt); /* lock the sucker */ down_write(&s->s_umount); /* ... and return the root of (sub)tree on it */ return path.dentry; } EXPORT_SYMBOL(mount_subtree); SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, char __user *, type, unsigned long, flags, void __user *, data) { int ret; char *kernel_type; struct filename *kernel_dir; char *kernel_dev; unsigned long data_page; ret = copy_mount_string(type, &kernel_type); if (ret < 0) goto out_type; kernel_dir = getname(dir_name); if (IS_ERR(kernel_dir)) { ret = PTR_ERR(kernel_dir); goto out_dir; } ret = copy_mount_string(dev_name, &kernel_dev); if (ret < 0) goto out_dev; ret = copy_mount_options(data, &data_page); if (ret < 0) goto out_data; ret = do_mount(kernel_dev, kernel_dir->name, kernel_type, flags, (void *) data_page); free_page(data_page); out_data: kfree(kernel_dev); out_dev: putname(kernel_dir); out_dir: kfree(kernel_type); out_type: return ret; } /* * Return true if path is reachable from root * * namespace_sem or vfsmount_lock is held */ bool is_path_reachable(struct mount *mnt, struct dentry *dentry, const struct path *root) { while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { dentry = mnt->mnt_mountpoint; mnt = mnt->mnt_parent; } return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); } int path_is_under(struct path *path1, struct path *path2) { int res; br_read_lock(&vfsmount_lock); res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); br_read_unlock(&vfsmount_lock); return res; } EXPORT_SYMBOL(path_is_under); /* * pivot_root Semantics: * Moves the root file system of the current process to the directory put_old, * makes new_root as the new root file system of the current process, and sets * root/cwd of all processes which had them on the current root to new_root. * * Restrictions: * The new_root and put_old must be directories, and must not be on the * same file system as the current process root. The put_old must be * underneath new_root, i.e. adding a non-zero number of /.. to the string * pointed to by put_old must yield the same directory as new_root. No other * file system may be mounted on put_old. After all, new_root is a mountpoint. * * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives * in this situation. * * Notes: * - we don't move root/cwd if they are not at the root (reason: if something * cared enough to change them, it's probably wrong to force them elsewhere) * - it's okay to pick a root that isn't the root of a file system, e.g. * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root * first. */ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, const char __user *, put_old) { struct path new, old, parent_path, root_parent, root; struct mount *new_mnt, *root_mnt; int error; if (!may_mount()) return -EPERM; error = user_path_dir(new_root, &new); if (error) goto out0; error = user_path_dir(put_old, &old); if (error) goto out1; error = security_sb_pivotroot(&old, &new); if (error) goto out2; get_fs_root(current->fs, &root); error = lock_mount(&old); if (error) goto out3; error = -EINVAL; new_mnt = real_mount(new.mnt); root_mnt = real_mount(root.mnt); if (IS_MNT_SHARED(real_mount(old.mnt)) || IS_MNT_SHARED(new_mnt->mnt_parent) || IS_MNT_SHARED(root_mnt->mnt_parent)) goto out4; if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) goto out4; error = -ENOENT; if (d_unlinked(new.dentry)) goto out4; if (d_unlinked(old.dentry)) goto out4; error = -EBUSY; if (new.mnt == root.mnt || old.mnt == root.mnt) goto out4; /* loop, on the same file system */ error = -EINVAL; if (root.mnt->mnt_root != root.dentry) goto out4; /* not a mountpoint */ if (!mnt_has_parent(root_mnt)) goto out4; /* not attached */ if (new.mnt->mnt_root != new.dentry) goto out4; /* not a mountpoint */ if (!mnt_has_parent(new_mnt)) goto out4; /* not attached */ /* make sure we can reach put_old from new_root */ if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new)) goto out4; br_write_lock(&vfsmount_lock); detach_mnt(new_mnt, &parent_path); detach_mnt(root_mnt, &root_parent); /* mount old root on put_old */ attach_mnt(root_mnt, &old); /* mount new_root on / */ attach_mnt(new_mnt, &root_parent); touch_mnt_namespace(current->nsproxy->mnt_ns); br_write_unlock(&vfsmount_lock); chroot_fs_refs(&root, &new); error = 0; out4: unlock_mount(&old); if (!error) { path_put(&root_parent); path_put(&parent_path); } out3: path_put(&root); out2: path_put(&old); out1: path_put(&new); out0: return error; } static void __init init_mount_tree(void) { struct vfsmount *mnt; struct mnt_namespace *ns; struct path root; struct file_system_type *type; type = get_fs_type("rootfs"); if (!type) panic("Can't find rootfs type"); mnt = vfs_kern_mount(type, 0, "rootfs", NULL); put_filesystem(type); if (IS_ERR(mnt)) panic("Can't create rootfs"); ns = create_mnt_ns(mnt); if (IS_ERR(ns)) panic("Can't allocate initial namespace"); init_task.nsproxy->mnt_ns = ns; get_mnt_ns(ns); root.mnt = mnt; root.dentry = mnt->mnt_root; set_fs_pwd(current->fs, &root); set_fs_root(current->fs, &root); } void __init mnt_init(void) { unsigned u; int err; init_rwsem(&namespace_sem); mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); if (!mount_hashtable) panic("Failed to allocate mount hash table\n"); printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE); for (u = 0; u < HASH_SIZE; u++) INIT_LIST_HEAD(&mount_hashtable[u]); br_lock_init(&vfsmount_lock); err = sysfs_init(); if (err) printk(KERN_WARNING "%s: sysfs_init error: %d\n", __func__, err); fs_kobj = kobject_create_and_add("fs", NULL); if (!fs_kobj) printk(KERN_WARNING "%s: kobj create error\n", __func__); init_rootfs(); init_mount_tree(); } void put_mnt_ns(struct mnt_namespace *ns) { LIST_HEAD(umount_list); if (!atomic_dec_and_test(&ns->count)) return; down_write(&namespace_sem); br_write_lock(&vfsmount_lock); umount_tree(ns->root, 0, &umount_list); br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); free_mnt_ns(ns); } struct vfsmount *kern_mount_data(struct file_system_type *type, void *data) { struct vfsmount *mnt; mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data); if (!IS_ERR(mnt)) { /* * it is a longterm mount, don't release mnt until * we unmount before file sys is unregistered */ real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; } return mnt; } EXPORT_SYMBOL_GPL(kern_mount_data); void kern_unmount(struct vfsmount *mnt) { /* release long term mount so mount point can be released */ if (!IS_ERR_OR_NULL(mnt)) { br_write_lock(&vfsmount_lock); real_mount(mnt)->mnt_ns = NULL; br_write_unlock(&vfsmount_lock); mntput(mnt); } } EXPORT_SYMBOL(kern_unmount); bool our_mnt(struct vfsmount *mnt) { return check_mnt(real_mount(mnt)); } bool current_chrooted(void) { /* Does the current process have a non-standard root */ struct path ns_root; struct path fs_root; bool chrooted; /* Find the namespace root */ ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt; ns_root.dentry = ns_root.mnt->mnt_root; path_get(&ns_root); while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) ; get_fs_root(current->fs, &fs_root); chrooted = !path_equal(&fs_root, &ns_root); path_put(&fs_root); path_put(&ns_root); return chrooted; } static void *mntns_get(struct task_struct *task) { struct mnt_namespace *ns = NULL; struct nsproxy *nsproxy; rcu_read_lock(); nsproxy = task_nsproxy(task); if (nsproxy) { ns = nsproxy->mnt_ns; get_mnt_ns(ns); } rcu_read_unlock(); return ns; } static void mntns_put(void *ns) { put_mnt_ns(ns); } static int mntns_install(struct nsproxy *nsproxy, void *ns) { struct fs_struct *fs = current->fs; struct mnt_namespace *mnt_ns = ns; struct path root; if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || !nsown_capable(CAP_SYS_CHROOT) || !nsown_capable(CAP_SYS_ADMIN)) return -EPERM; if (fs->users != 1) return -EINVAL; get_mnt_ns(mnt_ns); put_mnt_ns(nsproxy->mnt_ns); nsproxy->mnt_ns = mnt_ns; /* Find the root */ root.mnt = &mnt_ns->root->mnt; root.dentry = mnt_ns->root->mnt.mnt_root; path_get(&root); while(d_mountpoint(root.dentry) && follow_down_one(&root)) ; /* Update the pwd and root */ set_fs_pwd(fs, &root); set_fs_root(fs, &root); path_put(&root); return 0; } static unsigned int mntns_inum(void *ns) { struct mnt_namespace *mnt_ns = ns; return mnt_ns->proc_inum; } const struct proc_ns_operations mntns_operations = { .name = "mnt", .type = CLONE_NEWNS, .get = mntns_get, .put = mntns_put, .install = mntns_install, .inum = mntns_inum, };
./CrossVul/dataset_final_sorted/CWE-264/c/good_5616_0
crossvul-cpp_data_good_5861_2
/* * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using * ARM NEON instructions. * * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This file is based on sha1_generic.c and sha1_ssse3_glue.c: * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> * Copyright (c) Mathias Krause <minipli@googlemail.com> * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/cryptohash.h> #include <linux/types.h> #include <crypto/sha.h> #include <asm/byteorder.h> #include <asm/neon.h> #include <asm/simd.h> #include <asm/crypto/sha1.h> asmlinkage void sha1_transform_neon(void *state_h, const char *data, unsigned int rounds); static int sha1_neon_init(struct shash_desc *desc) { struct sha1_state *sctx = shash_desc_ctx(desc); *sctx = (struct sha1_state){ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, }; return 0; } static int __sha1_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len, unsigned int partial) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int done = 0; sctx->count += len; if (partial) { done = SHA1_BLOCK_SIZE - partial; memcpy(sctx->buffer + partial, data, done); sha1_transform_neon(sctx->state, sctx->buffer, 1); } if (len - done >= SHA1_BLOCK_SIZE) { const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; sha1_transform_neon(sctx->state, data + done, rounds); done += rounds * SHA1_BLOCK_SIZE; } memcpy(sctx->buffer, data + done, len - done); return 0; } static int sha1_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; int res; /* Handle the fast case right here */ if (partial + len < SHA1_BLOCK_SIZE) { sctx->count += len; memcpy(sctx->buffer + partial, data, len); return 0; } if (!may_use_simd()) { res = sha1_update_arm(desc, data, len); } else { kernel_neon_begin(); res = __sha1_neon_update(desc, data, len, partial); kernel_neon_end(); } return res; } /* Add padding and return the message digest. */ static int sha1_neon_final(struct shash_desc *desc, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; __be32 *dst = (__be32 *)out; __be64 bits; static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64 and append length */ index = sctx->count % SHA1_BLOCK_SIZE; padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); if (!may_use_simd()) { sha1_update_arm(desc, padding, padlen); sha1_update_arm(desc, (const u8 *)&bits, sizeof(bits)); } else { kernel_neon_begin(); /* We need to fill a whole block for __sha1_neon_update() */ if (padlen <= 56) { sctx->count += padlen; memcpy(sctx->buffer + index, padding, padlen); } else { __sha1_neon_update(desc, padding, padlen, index); } __sha1_neon_update(desc, (const u8 *)&bits, sizeof(bits), 56); kernel_neon_end(); } /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_be32(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int sha1_neon_export(struct shash_desc *desc, void *out) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int sha1_neon_import(struct shash_desc *desc, const void *in) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_neon_init, .update = sha1_neon_update, .final = sha1_neon_final, .export = sha1_neon_export, .import = sha1_neon_import, .descsize = sizeof(struct sha1_state), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-neon", .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha1_neon_mod_init(void) { if (!cpu_has_neon()) return -ENODEV; return crypto_register_shash(&alg); } static void __exit sha1_neon_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_neon_mod_init); module_exit(sha1_neon_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated"); MODULE_ALIAS_CRYPTO("sha1");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_2
crossvul-cpp_data_bad_1455_0
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "uv.h" #include "internal.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <errno.h> #include <sys/types.h> #include <sys/wait.h> #include <unistd.h> #include <fcntl.h> #include <poll.h> #if defined(__APPLE__) && !TARGET_OS_IPHONE # include <crt_externs.h> # define environ (*_NSGetEnviron()) #else extern char **environ; #endif static QUEUE* uv__process_queue(uv_loop_t* loop, int pid) { assert(pid > 0); return loop->process_handles + pid % ARRAY_SIZE(loop->process_handles); } static void uv__chld(uv_signal_t* handle, int signum) { uv_process_t* process; uv_loop_t* loop; int exit_status; int term_signal; unsigned int i; int status; pid_t pid; QUEUE pending; QUEUE* h; QUEUE* q; assert(signum == SIGCHLD); QUEUE_INIT(&pending); loop = handle->loop; for (i = 0; i < ARRAY_SIZE(loop->process_handles); i++) { h = loop->process_handles + i; q = QUEUE_HEAD(h); while (q != h) { process = QUEUE_DATA(q, uv_process_t, queue); q = QUEUE_NEXT(q); do pid = waitpid(process->pid, &status, WNOHANG); while (pid == -1 && errno == EINTR); if (pid == 0) continue; if (pid == -1) { if (errno != ECHILD) abort(); continue; } process->status = status; QUEUE_REMOVE(&process->queue); QUEUE_INSERT_TAIL(&pending, &process->queue); } while (!QUEUE_EMPTY(&pending)) { q = QUEUE_HEAD(&pending); QUEUE_REMOVE(q); QUEUE_INIT(q); process = QUEUE_DATA(q, uv_process_t, queue); uv__handle_stop(process); if (process->exit_cb == NULL) continue; exit_status = 0; if (WIFEXITED(process->status)) exit_status = WEXITSTATUS(process->status); term_signal = 0; if (WIFSIGNALED(process->status)) term_signal = WTERMSIG(process->status); process->exit_cb(process, exit_status, term_signal); } } } int uv__make_socketpair(int fds[2], int flags) { #if defined(__linux__) static int no_cloexec; if (no_cloexec) goto skip; if (socketpair(AF_UNIX, SOCK_STREAM | UV__SOCK_CLOEXEC | flags, 0, fds) == 0) return 0; /* Retry on EINVAL, it means SOCK_CLOEXEC is not supported. * Anything else is a genuine error. */ if (errno != EINVAL) return -errno; no_cloexec = 1; skip: #endif if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) return -errno; uv__cloexec(fds[0], 1); uv__cloexec(fds[1], 1); if (flags & UV__F_NONBLOCK) { uv__nonblock(fds[0], 1); uv__nonblock(fds[1], 1); } return 0; } int uv__make_pipe(int fds[2], int flags) { #if defined(__linux__) static int no_pipe2; if (no_pipe2) goto skip; if (uv__pipe2(fds, flags | UV__O_CLOEXEC) == 0) return 0; if (errno != ENOSYS) return -errno; no_pipe2 = 1; skip: #endif if (pipe(fds)) return -errno; uv__cloexec(fds[0], 1); uv__cloexec(fds[1], 1); if (flags & UV__F_NONBLOCK) { uv__nonblock(fds[0], 1); uv__nonblock(fds[1], 1); } return 0; } /* * Used for initializing stdio streams like options.stdin_stream. Returns * zero on success. See also the cleanup section in uv_spawn(). */ static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) { int mask; int fd; mask = UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD | UV_INHERIT_STREAM; switch (container->flags & mask) { case UV_IGNORE: return 0; case UV_CREATE_PIPE: assert(container->data.stream != NULL); if (container->data.stream->type != UV_NAMED_PIPE) return -EINVAL; else return uv__make_socketpair(fds, 0); case UV_INHERIT_FD: case UV_INHERIT_STREAM: if (container->flags & UV_INHERIT_FD) fd = container->data.fd; else fd = uv__stream_fd(container->data.stream); if (fd == -1) return -EINVAL; fds[1] = fd; return 0; default: assert(0 && "Unexpected flags"); return -EINVAL; } } static int uv__process_open_stream(uv_stdio_container_t* container, int pipefds[2], int writable) { int flags; if (!(container->flags & UV_CREATE_PIPE) || pipefds[0] < 0) return 0; if (uv__close(pipefds[1])) if (errno != EINTR && errno != EINPROGRESS) abort(); pipefds[1] = -1; uv__nonblock(pipefds[0], 1); if (container->data.stream->type == UV_NAMED_PIPE && ((uv_pipe_t*)container->data.stream)->ipc) flags = UV_STREAM_READABLE | UV_STREAM_WRITABLE; else if (writable) flags = UV_STREAM_WRITABLE; else flags = UV_STREAM_READABLE; return uv__stream_open(container->data.stream, pipefds[0], flags); } static void uv__process_close_stream(uv_stdio_container_t* container) { if (!(container->flags & UV_CREATE_PIPE)) return; uv__stream_close((uv_stream_t*)container->data.stream); } static void uv__write_int(int fd, int val) { ssize_t n; do n = write(fd, &val, sizeof(val)); while (n == -1 && errno == EINTR); if (n == -1 && errno == EPIPE) return; /* parent process has quit */ assert(n == sizeof(val)); } static void uv__process_child_init(const uv_process_options_t* options, int stdio_count, int (*pipes)[2], int error_fd) { int close_fd; int use_fd; int fd; if (options->flags & UV_PROCESS_DETACHED) setsid(); for (fd = 0; fd < stdio_count; fd++) { close_fd = pipes[fd][0]; use_fd = pipes[fd][1]; if (use_fd < 0) { if (fd >= 3) continue; else { /* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is * set */ use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR); close_fd = use_fd; if (use_fd == -1) { uv__write_int(error_fd, -errno); perror("failed to open stdio"); _exit(127); } } } if (fd == use_fd) uv__cloexec(use_fd, 0); else dup2(use_fd, fd); if (fd <= 2) uv__nonblock(fd, 0); if (close_fd != -1) uv__close(close_fd); } for (fd = 0; fd < stdio_count; fd++) { use_fd = pipes[fd][1]; if (use_fd >= 0 && fd != use_fd) close(use_fd); } if (options->cwd != NULL && chdir(options->cwd)) { uv__write_int(error_fd, -errno); perror("chdir()"); _exit(127); } if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid)) { uv__write_int(error_fd, -errno); perror("setgid()"); _exit(127); } if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid)) { uv__write_int(error_fd, -errno); perror("setuid()"); _exit(127); } if (options->env != NULL) { environ = options->env; } execvp(options->file, options->args); uv__write_int(error_fd, -errno); perror("execvp()"); _exit(127); } int uv_spawn(uv_loop_t* loop, uv_process_t* process, const uv_process_options_t* options) { int signal_pipe[2] = { -1, -1 }; int (*pipes)[2]; int stdio_count; QUEUE* q; ssize_t r; pid_t pid; int err; int exec_errorno; int i; assert(options->file != NULL); assert(!(options->flags & ~(UV_PROCESS_DETACHED | UV_PROCESS_SETGID | UV_PROCESS_SETUID | UV_PROCESS_WINDOWS_HIDE | UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS))); uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS); QUEUE_INIT(&process->queue); stdio_count = options->stdio_count; if (stdio_count < 3) stdio_count = 3; err = -ENOMEM; pipes = malloc(stdio_count * sizeof(*pipes)); if (pipes == NULL) goto error; for (i = 0; i < stdio_count; i++) { pipes[i][0] = -1; pipes[i][1] = -1; } for (i = 0; i < options->stdio_count; i++) { err = uv__process_init_stdio(options->stdio + i, pipes[i]); if (err) goto error; } /* This pipe is used by the parent to wait until * the child has called `execve()`. We need this * to avoid the following race condition: * * if ((pid = fork()) > 0) { * kill(pid, SIGTERM); * } * else if (pid == 0) { * execve("/bin/cat", argp, envp); * } * * The parent sends a signal immediately after forking. * Since the child may not have called `execve()` yet, * there is no telling what process receives the signal, * our fork or /bin/cat. * * To avoid ambiguity, we create a pipe with both ends * marked close-on-exec. Then, after the call to `fork()`, * the parent polls the read end until it EOFs or errors with EPIPE. */ err = uv__make_pipe(signal_pipe, 0); if (err) goto error; uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD); /* Acquire write lock to prevent opening new fds in worker threads */ uv_rwlock_wrlock(&loop->cloexec_lock); pid = fork(); if (pid == -1) { err = -errno; uv_rwlock_wrunlock(&loop->cloexec_lock); uv__close(signal_pipe[0]); uv__close(signal_pipe[1]); goto error; } if (pid == 0) { uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]); abort(); } /* Release lock in parent process */ uv_rwlock_wrunlock(&loop->cloexec_lock); uv__close(signal_pipe[1]); process->status = 0; exec_errorno = 0; do r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno)); while (r == -1 && errno == EINTR); if (r == 0) ; /* okay, EOF */ else if (r == sizeof(exec_errorno)) ; /* okay, read errorno */ else if (r == -1 && errno == EPIPE) ; /* okay, got EPIPE */ else abort(); uv__close(signal_pipe[0]); for (i = 0; i < options->stdio_count; i++) { err = uv__process_open_stream(options->stdio + i, pipes[i], i == 0); if (err == 0) continue; while (i--) uv__process_close_stream(options->stdio + i); goto error; } /* Only activate this handle if exec() happened successfully */ if (exec_errorno == 0) { q = uv__process_queue(loop, pid); QUEUE_INSERT_TAIL(q, &process->queue); uv__handle_start(process); } process->pid = pid; process->exit_cb = options->exit_cb; free(pipes); return exec_errorno; error: if (pipes != NULL) { for (i = 0; i < stdio_count; i++) { if (i < options->stdio_count) if (options->stdio[i].flags & (UV_INHERIT_FD | UV_INHERIT_STREAM)) continue; if (pipes[i][0] != -1) close(pipes[i][0]); if (pipes[i][1] != -1) close(pipes[i][1]); } free(pipes); } return err; } int uv_process_kill(uv_process_t* process, int signum) { return uv_kill(process->pid, signum); } int uv_kill(int pid, int signum) { if (kill(pid, signum)) return -errno; else return 0; } void uv__process_close(uv_process_t* handle) { /* TODO stop signal watcher when this is the last handle */ QUEUE_REMOVE(&handle->queue); uv__handle_stop(handle); }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_1455_0
crossvul-cpp_data_bad_3636_1
/* * Copyright (c) 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Copyright (C) 2006-2008 Intel Corporation * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Author: Allen M. Kay <allen.m.kay@intel.com> * Author: Weidong Han <weidong.han@intel.com> * Author: Ben-Ami Yassour <benami@il.ibm.com> */ #include <linux/list.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/dmar.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> static bool allow_unsafe_assigned_interrupts; module_param_named(allow_unsafe_assigned_interrupts, allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(allow_unsafe_assigned_interrupts, "Enable device assignment on platforms without interrupt remapping support."); static int kvm_iommu_unmap_memslots(struct kvm *kvm); static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long size) { gfn_t end_gfn; pfn_t pfn; pfn = gfn_to_pfn_memslot(kvm, slot, gfn); end_gfn = gfn + (size >> PAGE_SHIFT); gfn += 1; if (is_error_pfn(pfn)) return pfn; while (gfn < end_gfn) gfn_to_pfn_memslot(kvm, slot, gfn++); return pfn; } int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn, end_gfn; pfn_t pfn; int r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; /* check if iommu exists and in use */ if (!domain) return 0; gfn = slot->base_gfn; end_gfn = gfn + slot->npages; flags = IOMMU_READ | IOMMU_WRITE; if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) flags |= IOMMU_CACHE; while (gfn < end_gfn) { unsigned long page_size; /* Check if already mapped */ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { gfn += 1; continue; } /* Get the page size we could use to map */ page_size = kvm_host_page_size(kvm, gfn); /* Make sure the page_size does not exceed the memslot */ while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) page_size >>= 1; /* Make sure gfn is aligned to the page size we want to map */ while ((gfn << PAGE_SHIFT) & (page_size - 1)) page_size >>= 1; /* * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ pfn = kvm_pin_pages(kvm, slot, gfn, page_size); if (is_error_pfn(pfn)) { gfn += 1; continue; } /* Map into IO address space */ r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), page_size, flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); goto unmap_pages; } gfn += page_size >> PAGE_SHIFT; } return 0; unmap_pages: kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); return r; } static int kvm_iommu_map_memslots(struct kvm *kvm) { int idx, r = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { r = kvm_iommu_map_pages(kvm, memslot); if (r) break; } srcu_read_unlock(&kvm->srcu, idx); return r; } int kvm_assign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct pci_dev *pdev = NULL; struct iommu_domain *domain = kvm->arch.iommu_domain; int r, last_flags; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; r = iommu_attach_device(domain, &pdev->dev); if (r) { printk(KERN_ERR "assign device %x:%x:%x.%x failed", pci_domain_nr(pdev->bus), pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); return r; } last_flags = kvm->arch.iommu_flags; if (iommu_domain_has_cap(kvm->arch.iommu_domain, IOMMU_CAP_CACHE_COHERENCY)) kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY; /* Check if need to update IOMMU page table for guest memory */ if ((last_flags ^ kvm->arch.iommu_flags) == KVM_IOMMU_CACHE_COHERENCY) { kvm_iommu_unmap_memslots(kvm); r = kvm_iommu_map_memslots(kvm); if (r) goto out_unmap; } pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; printk(KERN_DEBUG "assign device %x:%x:%x.%x\n", assigned_dev->host_segnr, assigned_dev->host_busnr, PCI_SLOT(assigned_dev->host_devfn), PCI_FUNC(assigned_dev->host_devfn)); return 0; out_unmap: kvm_iommu_unmap_memslots(kvm); return r; } int kvm_deassign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct iommu_domain *domain = kvm->arch.iommu_domain; struct pci_dev *pdev = NULL; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; iommu_detach_device(domain, &pdev->dev); pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n", assigned_dev->host_segnr, assigned_dev->host_busnr, PCI_SLOT(assigned_dev->host_devfn), PCI_FUNC(assigned_dev->host_devfn)); return 0; } int kvm_iommu_map_guest(struct kvm *kvm) { int r; if (!iommu_present(&pci_bus_type)) { printk(KERN_ERR "%s: iommu not found\n", __func__); return -ENODEV; } kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); if (!kvm->arch.iommu_domain) return -ENOMEM; if (!allow_unsafe_assigned_interrupts && !iommu_domain_has_cap(kvm->arch.iommu_domain, IOMMU_CAP_INTR_REMAP)) { printk(KERN_WARNING "%s: No interrupt remapping support," " disallowing device assignment." " Re-enble with \"allow_unsafe_assigned_interrupts=1\"" " module option.\n", __func__); iommu_domain_free(kvm->arch.iommu_domain); kvm->arch.iommu_domain = NULL; return -EPERM; } r = kvm_iommu_map_memslots(kvm); if (r) goto out_unmap; return 0; out_unmap: kvm_iommu_unmap_memslots(kvm); return r; } static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) { unsigned long i; for (i = 0; i < npages; ++i) kvm_release_pfn_clean(pfn + i); } static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) { struct iommu_domain *domain; gfn_t end_gfn, gfn; pfn_t pfn; u64 phys; domain = kvm->arch.iommu_domain; end_gfn = base_gfn + npages; gfn = base_gfn; /* check if iommu exists and in use */ if (!domain) return; while (gfn < end_gfn) { unsigned long unmap_pages; size_t size; /* Get physical address */ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); pfn = phys >> PAGE_SHIFT; /* Unmap address from IO address space */ size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); unmap_pages = 1ULL << get_order(size); /* Unpin all pages we just unmapped to not leak any memory */ kvm_unpin_pages(kvm, pfn, unmap_pages); gfn += unmap_pages; } } static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int idx; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages); srcu_read_unlock(&kvm->srcu, idx); return 0; } int kvm_iommu_unmap_guest(struct kvm *kvm) { struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) return 0; kvm_iommu_unmap_memslots(kvm); iommu_domain_free(domain); return 0; }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3636_1
crossvul-cpp_data_bad_4765_0
/* $OpenBSD: serverloop.c,v 1.188 2016/11/30 03:00:05 djm Exp $ */ /* * Author: Tatu Ylonen <ylo@cs.hut.fi> * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland * All rights reserved * Server main loop for handling the interactive session. * * As far as I am concerned, the code I have written for this software * can be used freely for any purpose. Any derived versions of this * software must be clearly marked as such, and if the derived work is * incompatible with the protocol description in the RFC file, it must be * called by a name other than "ssh" or "Secure Shell". * * SSH2 support by Markus Friedl. * Copyright (c) 2000, 2001 Markus Friedl. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <sys/types.h> #include <sys/wait.h> #include <sys/socket.h> #include <sys/time.h> #include <sys/queue.h> #include <netinet/in.h> #include <errno.h> #include <fcntl.h> #include <pwd.h> #include <signal.h> #include <string.h> #include <termios.h> #include <unistd.h> #include <stdarg.h> #include "xmalloc.h" #include "packet.h" #include "buffer.h" #include "log.h" #include "misc.h" #include "servconf.h" #include "canohost.h" #include "sshpty.h" #include "channels.h" #include "compat.h" #include "ssh2.h" #include "key.h" #include "cipher.h" #include "kex.h" #include "hostfile.h" #include "auth.h" #include "session.h" #include "dispatch.h" #include "auth-options.h" #include "serverloop.h" #include "ssherr.h" extern ServerOptions options; /* XXX */ extern Authctxt *the_authctxt; extern int use_privsep; static int no_more_sessions = 0; /* Disallow further sessions. */ /* * This SIGCHLD kludge is used to detect when the child exits. The server * will exit after that, as soon as forwarded connections have terminated. */ static volatile sig_atomic_t child_terminated = 0; /* The child has terminated. */ /* Cleanup on signals (!use_privsep case only) */ static volatile sig_atomic_t received_sigterm = 0; /* prototypes */ static void server_init_dispatch(void); /* * we write to this pipe if a SIGCHLD is caught in order to avoid * the race between select() and child_terminated */ static int notify_pipe[2]; static void notify_setup(void) { if (pipe(notify_pipe) < 0) { error("pipe(notify_pipe) failed %s", strerror(errno)); } else if ((fcntl(notify_pipe[0], F_SETFD, FD_CLOEXEC) == -1) || (fcntl(notify_pipe[1], F_SETFD, FD_CLOEXEC) == -1)) { error("fcntl(notify_pipe, F_SETFD) failed %s", strerror(errno)); close(notify_pipe[0]); close(notify_pipe[1]); } else { set_nonblock(notify_pipe[0]); set_nonblock(notify_pipe[1]); return; } notify_pipe[0] = -1; /* read end */ notify_pipe[1] = -1; /* write end */ } static void notify_parent(void) { if (notify_pipe[1] != -1) (void)write(notify_pipe[1], "", 1); } static void notify_prepare(fd_set *readset) { if (notify_pipe[0] != -1) FD_SET(notify_pipe[0], readset); } static void notify_done(fd_set *readset) { char c; if (notify_pipe[0] != -1 && FD_ISSET(notify_pipe[0], readset)) while (read(notify_pipe[0], &c, 1) != -1) debug2("notify_done: reading"); } /*ARGSUSED*/ static void sigchld_handler(int sig) { int save_errno = errno; child_terminated = 1; signal(SIGCHLD, sigchld_handler); notify_parent(); errno = save_errno; } /*ARGSUSED*/ static void sigterm_handler(int sig) { received_sigterm = sig; } static void client_alive_check(void) { int channel_id; /* timeout, check to see how many we have had */ if (packet_inc_alive_timeouts() > options.client_alive_count_max) { logit("Timeout, client not responding."); cleanup_exit(255); } /* * send a bogus global/channel request with "wantreply", * we should get back a failure */ if ((channel_id = channel_find_open()) == -1) { packet_start(SSH2_MSG_GLOBAL_REQUEST); packet_put_cstring("keepalive@openssh.com"); packet_put_char(1); /* boolean: want reply */ } else { channel_request_start(channel_id, "keepalive@openssh.com", 1); } packet_send(); } /* * Sleep in select() until we can do something. This will initialize the * select masks. Upon return, the masks will indicate which descriptors * have data or can accept data. Optionally, a maximum time can be specified * for the duration of the wait (0 = infinite). */ static void wait_until_can_do_something(int connection_in, int connection_out, fd_set **readsetp, fd_set **writesetp, int *maxfdp, u_int *nallocp, u_int64_t max_time_ms) { struct timeval tv, *tvp; int ret; time_t minwait_secs = 0; int client_alive_scheduled = 0; /* Allocate and update select() masks for channel descriptors. */ channel_prepare_select(readsetp, writesetp, maxfdp, nallocp, &minwait_secs, 0); /* XXX need proper deadline system for rekey/client alive */ if (minwait_secs != 0) max_time_ms = MINIMUM(max_time_ms, (u_int)minwait_secs * 1000); /* * if using client_alive, set the max timeout accordingly, * and indicate that this particular timeout was for client * alive by setting the client_alive_scheduled flag. * * this could be randomized somewhat to make traffic * analysis more difficult, but we're not doing it yet. */ if (options.client_alive_interval) { uint64_t keepalive_ms = (uint64_t)options.client_alive_interval * 1000; client_alive_scheduled = 1; if (max_time_ms == 0 || max_time_ms > keepalive_ms) max_time_ms = keepalive_ms; } #if 0 /* wrong: bad condition XXX */ if (channel_not_very_much_buffered_data()) #endif FD_SET(connection_in, *readsetp); notify_prepare(*readsetp); /* * If we have buffered packet data going to the client, mark that * descriptor. */ if (packet_have_data_to_write()) FD_SET(connection_out, *writesetp); /* * If child has terminated and there is enough buffer space to read * from it, then read as much as is available and exit. */ if (child_terminated && packet_not_very_much_data_to_write()) if (max_time_ms == 0 || client_alive_scheduled) max_time_ms = 100; if (max_time_ms == 0) tvp = NULL; else { tv.tv_sec = max_time_ms / 1000; tv.tv_usec = 1000 * (max_time_ms % 1000); tvp = &tv; } /* Wait for something to happen, or the timeout to expire. */ ret = select((*maxfdp)+1, *readsetp, *writesetp, NULL, tvp); if (ret == -1) { memset(*readsetp, 0, *nallocp); memset(*writesetp, 0, *nallocp); if (errno != EINTR) error("select: %.100s", strerror(errno)); } else if (ret == 0 && client_alive_scheduled) client_alive_check(); notify_done(*readsetp); } /* * Processes input from the client and the program. Input data is stored * in buffers and processed later. */ static int process_input(fd_set *readset, int connection_in) { struct ssh *ssh = active_state; /* XXX */ int len; char buf[16384]; /* Read and buffer any input data from the client. */ if (FD_ISSET(connection_in, readset)) { len = read(connection_in, buf, sizeof(buf)); if (len == 0) { verbose("Connection closed by %.100s port %d", ssh_remote_ipaddr(ssh), ssh_remote_port(ssh)); return -1; } else if (len < 0) { if (errno != EINTR && errno != EAGAIN) { verbose("Read error from remote host " "%.100s port %d: %.100s", ssh_remote_ipaddr(ssh), ssh_remote_port(ssh), strerror(errno)); cleanup_exit(255); } } else { /* Buffer any received data. */ packet_process_incoming(buf, len); } } return 0; } /* * Sends data from internal buffers to client program stdin. */ static void process_output(fd_set *writeset, int connection_out) { /* Send any buffered packet data to the client. */ if (FD_ISSET(connection_out, writeset)) packet_write_poll(); } static void process_buffered_input_packets(void) { dispatch_run(DISPATCH_NONBLOCK, NULL, active_state); } static void collect_children(void) { pid_t pid; sigset_t oset, nset; int status; /* block SIGCHLD while we check for dead children */ sigemptyset(&nset); sigaddset(&nset, SIGCHLD); sigprocmask(SIG_BLOCK, &nset, &oset); if (child_terminated) { debug("Received SIGCHLD."); while ((pid = waitpid(-1, &status, WNOHANG)) > 0 || (pid < 0 && errno == EINTR)) if (pid > 0) session_close_by_pid(pid, status); child_terminated = 0; } sigprocmask(SIG_SETMASK, &oset, NULL); } void server_loop2(Authctxt *authctxt) { fd_set *readset = NULL, *writeset = NULL; int max_fd; u_int nalloc = 0, connection_in, connection_out; u_int64_t rekey_timeout_ms = 0; debug("Entering interactive session for SSH2."); signal(SIGCHLD, sigchld_handler); child_terminated = 0; connection_in = packet_get_connection_in(); connection_out = packet_get_connection_out(); if (!use_privsep) { signal(SIGTERM, sigterm_handler); signal(SIGINT, sigterm_handler); signal(SIGQUIT, sigterm_handler); } notify_setup(); max_fd = MAXIMUM(connection_in, connection_out); max_fd = MAXIMUM(max_fd, notify_pipe[0]); server_init_dispatch(); for (;;) { process_buffered_input_packets(); if (!ssh_packet_is_rekeying(active_state) && packet_not_very_much_data_to_write()) channel_output_poll(); if (options.rekey_interval > 0 && !ssh_packet_is_rekeying(active_state)) rekey_timeout_ms = packet_get_rekey_timeout() * 1000; else rekey_timeout_ms = 0; wait_until_can_do_something(connection_in, connection_out, &readset, &writeset, &max_fd, &nalloc, rekey_timeout_ms); if (received_sigterm) { logit("Exiting on signal %d", (int)received_sigterm); /* Clean up sessions, utmp, etc. */ cleanup_exit(255); } collect_children(); if (!ssh_packet_is_rekeying(active_state)) channel_after_select(readset, writeset); if (process_input(readset, connection_in) < 0) break; process_output(writeset, connection_out); } collect_children(); free(readset); free(writeset); /* free all channels, no more reads and writes */ channel_free_all(); /* free remaining sessions, e.g. remove wtmp entries */ session_destroy_all(NULL); } static int server_input_keep_alive(int type, u_int32_t seq, void *ctxt) { debug("Got %d/%u for keepalive", type, seq); /* * reset timeout, since we got a sane answer from the client. * even if this was generated by something other than * the bogus CHANNEL_REQUEST we send for keepalives. */ packet_set_alive_timeouts(0); return 0; } static Channel * server_request_direct_tcpip(void) { Channel *c = NULL; char *target, *originator; u_short target_port, originator_port; target = packet_get_string(NULL); target_port = packet_get_int(); originator = packet_get_string(NULL); originator_port = packet_get_int(); packet_check_eom(); debug("server_request_direct_tcpip: originator %s port %d, target %s " "port %d", originator, originator_port, target, target_port); /* XXX fine grained permissions */ if ((options.allow_tcp_forwarding & FORWARD_LOCAL) != 0 && !no_port_forwarding_flag && !options.disable_forwarding) { c = channel_connect_to_port(target, target_port, "direct-tcpip", "direct-tcpip"); } else { logit("refused local port forward: " "originator %s port %d, target %s port %d", originator, originator_port, target, target_port); } free(originator); free(target); return c; } static Channel * server_request_direct_streamlocal(void) { Channel *c = NULL; char *target, *originator; u_short originator_port; target = packet_get_string(NULL); originator = packet_get_string(NULL); originator_port = packet_get_int(); packet_check_eom(); debug("server_request_direct_streamlocal: originator %s port %d, target %s", originator, originator_port, target); /* XXX fine grained permissions */ if ((options.allow_streamlocal_forwarding & FORWARD_LOCAL) != 0 && !no_port_forwarding_flag && !options.disable_forwarding) { c = channel_connect_to_path(target, "direct-streamlocal@openssh.com", "direct-streamlocal"); } else { logit("refused streamlocal port forward: " "originator %s port %d, target %s", originator, originator_port, target); } free(originator); free(target); return c; } static Channel * server_request_tun(void) { Channel *c = NULL; int mode, tun; int sock; mode = packet_get_int(); switch (mode) { case SSH_TUNMODE_POINTOPOINT: case SSH_TUNMODE_ETHERNET: break; default: packet_send_debug("Unsupported tunnel device mode."); return NULL; } if ((options.permit_tun & mode) == 0) { packet_send_debug("Server has rejected tunnel device " "forwarding"); return NULL; } tun = packet_get_int(); if (forced_tun_device != -1) { if (tun != SSH_TUNID_ANY && forced_tun_device != tun) goto done; tun = forced_tun_device; } sock = tun_open(tun, mode); if (sock < 0) goto done; c = channel_new("tun", SSH_CHANNEL_OPEN, sock, sock, -1, CHAN_TCP_WINDOW_DEFAULT, CHAN_TCP_PACKET_DEFAULT, 0, "tun", 1); c->datagram = 1; done: if (c == NULL) packet_send_debug("Failed to open the tunnel device."); return c; } static Channel * server_request_session(void) { Channel *c; debug("input_session_request"); packet_check_eom(); if (no_more_sessions) { packet_disconnect("Possible attack: attempt to open a session " "after additional sessions disabled"); } /* * A server session has no fd to read or write until a * CHANNEL_REQUEST for a shell is made, so we set the type to * SSH_CHANNEL_LARVAL. Additionally, a callback for handling all * CHANNEL_REQUEST messages is registered. */ c = channel_new("session", SSH_CHANNEL_LARVAL, -1, -1, -1, /*window size*/0, CHAN_SES_PACKET_DEFAULT, 0, "server-session", 1); if (session_open(the_authctxt, c->self) != 1) { debug("session open failed, free channel %d", c->self); channel_free(c); return NULL; } channel_register_cleanup(c->self, session_close_by_channel, 0); return c; } static int server_input_channel_open(int type, u_int32_t seq, void *ctxt) { Channel *c = NULL; char *ctype; int rchan; u_int rmaxpack, rwindow, len; ctype = packet_get_string(&len); rchan = packet_get_int(); rwindow = packet_get_int(); rmaxpack = packet_get_int(); debug("server_input_channel_open: ctype %s rchan %d win %d max %d", ctype, rchan, rwindow, rmaxpack); if (strcmp(ctype, "session") == 0) { c = server_request_session(); } else if (strcmp(ctype, "direct-tcpip") == 0) { c = server_request_direct_tcpip(); } else if (strcmp(ctype, "direct-streamlocal@openssh.com") == 0) { c = server_request_direct_streamlocal(); } else if (strcmp(ctype, "tun@openssh.com") == 0) { c = server_request_tun(); } if (c != NULL) { debug("server_input_channel_open: confirm %s", ctype); c->remote_id = rchan; c->remote_window = rwindow; c->remote_maxpacket = rmaxpack; if (c->type != SSH_CHANNEL_CONNECTING) { packet_start(SSH2_MSG_CHANNEL_OPEN_CONFIRMATION); packet_put_int(c->remote_id); packet_put_int(c->self); packet_put_int(c->local_window); packet_put_int(c->local_maxpacket); packet_send(); } } else { debug("server_input_channel_open: failure %s", ctype); packet_start(SSH2_MSG_CHANNEL_OPEN_FAILURE); packet_put_int(rchan); packet_put_int(SSH2_OPEN_ADMINISTRATIVELY_PROHIBITED); if (!(datafellows & SSH_BUG_OPENFAILURE)) { packet_put_cstring("open failed"); packet_put_cstring(""); } packet_send(); } free(ctype); return 0; } static int server_input_hostkeys_prove(struct sshbuf **respp) { struct ssh *ssh = active_state; /* XXX */ struct sshbuf *resp = NULL; struct sshbuf *sigbuf = NULL; struct sshkey *key = NULL, *key_pub = NULL, *key_prv = NULL; int r, ndx, success = 0; const u_char *blob; u_char *sig = 0; size_t blen, slen; if ((resp = sshbuf_new()) == NULL || (sigbuf = sshbuf_new()) == NULL) fatal("%s: sshbuf_new", __func__); while (ssh_packet_remaining(ssh) > 0) { sshkey_free(key); key = NULL; if ((r = sshpkt_get_string_direct(ssh, &blob, &blen)) != 0 || (r = sshkey_from_blob(blob, blen, &key)) != 0) { error("%s: couldn't parse key: %s", __func__, ssh_err(r)); goto out; } /* * Better check that this is actually one of our hostkeys * before attempting to sign anything with it. */ if ((ndx = ssh->kex->host_key_index(key, 1, ssh)) == -1) { error("%s: unknown host %s key", __func__, sshkey_type(key)); goto out; } /* * XXX refactor: make kex->sign just use an index rather * than passing in public and private keys */ if ((key_prv = get_hostkey_by_index(ndx)) == NULL && (key_pub = get_hostkey_public_by_index(ndx, ssh)) == NULL) { error("%s: can't retrieve hostkey %d", __func__, ndx); goto out; } sshbuf_reset(sigbuf); free(sig); sig = NULL; if ((r = sshbuf_put_cstring(sigbuf, "hostkeys-prove-00@openssh.com")) != 0 || (r = sshbuf_put_string(sigbuf, ssh->kex->session_id, ssh->kex->session_id_len)) != 0 || (r = sshkey_puts(key, sigbuf)) != 0 || (r = ssh->kex->sign(key_prv, key_pub, &sig, &slen, sshbuf_ptr(sigbuf), sshbuf_len(sigbuf), NULL, 0)) != 0 || (r = sshbuf_put_string(resp, sig, slen)) != 0) { error("%s: couldn't prepare signature: %s", __func__, ssh_err(r)); goto out; } } /* Success */ *respp = resp; resp = NULL; /* don't free it */ success = 1; out: free(sig); sshbuf_free(resp); sshbuf_free(sigbuf); sshkey_free(key); return success; } static int server_input_global_request(int type, u_int32_t seq, void *ctxt) { char *rtype; int want_reply; int r, success = 0, allocated_listen_port = 0; struct sshbuf *resp = NULL; rtype = packet_get_string(NULL); want_reply = packet_get_char(); debug("server_input_global_request: rtype %s want_reply %d", rtype, want_reply); /* -R style forwarding */ if (strcmp(rtype, "tcpip-forward") == 0) { struct passwd *pw; struct Forward fwd; pw = the_authctxt->pw; if (pw == NULL || !the_authctxt->valid) fatal("server_input_global_request: no/invalid user"); memset(&fwd, 0, sizeof(fwd)); fwd.listen_host = packet_get_string(NULL); fwd.listen_port = (u_short)packet_get_int(); debug("server_input_global_request: tcpip-forward listen %s port %d", fwd.listen_host, fwd.listen_port); /* check permissions */ if ((options.allow_tcp_forwarding & FORWARD_REMOTE) == 0 || no_port_forwarding_flag || options.disable_forwarding || (!want_reply && fwd.listen_port == 0) || (fwd.listen_port != 0 && !bind_permitted(fwd.listen_port, pw->pw_uid))) { success = 0; packet_send_debug("Server has disabled port forwarding."); } else { /* Start listening on the port */ success = channel_setup_remote_fwd_listener(&fwd, &allocated_listen_port, &options.fwd_opts); } free(fwd.listen_host); if ((resp = sshbuf_new()) == NULL) fatal("%s: sshbuf_new", __func__); if (allocated_listen_port != 0 && (r = sshbuf_put_u32(resp, allocated_listen_port)) != 0) fatal("%s: sshbuf_put_u32: %s", __func__, ssh_err(r)); } else if (strcmp(rtype, "cancel-tcpip-forward") == 0) { struct Forward fwd; memset(&fwd, 0, sizeof(fwd)); fwd.listen_host = packet_get_string(NULL); fwd.listen_port = (u_short)packet_get_int(); debug("%s: cancel-tcpip-forward addr %s port %d", __func__, fwd.listen_host, fwd.listen_port); success = channel_cancel_rport_listener(&fwd); free(fwd.listen_host); } else if (strcmp(rtype, "streamlocal-forward@openssh.com") == 0) { struct Forward fwd; memset(&fwd, 0, sizeof(fwd)); fwd.listen_path = packet_get_string(NULL); debug("server_input_global_request: streamlocal-forward listen path %s", fwd.listen_path); /* check permissions */ if ((options.allow_streamlocal_forwarding & FORWARD_REMOTE) == 0 || no_port_forwarding_flag || options.disable_forwarding) { success = 0; packet_send_debug("Server has disabled port forwarding."); } else { /* Start listening on the socket */ success = channel_setup_remote_fwd_listener( &fwd, NULL, &options.fwd_opts); } free(fwd.listen_path); } else if (strcmp(rtype, "cancel-streamlocal-forward@openssh.com") == 0) { struct Forward fwd; memset(&fwd, 0, sizeof(fwd)); fwd.listen_path = packet_get_string(NULL); debug("%s: cancel-streamlocal-forward path %s", __func__, fwd.listen_path); success = channel_cancel_rport_listener(&fwd); free(fwd.listen_path); } else if (strcmp(rtype, "no-more-sessions@openssh.com") == 0) { no_more_sessions = 1; success = 1; } else if (strcmp(rtype, "hostkeys-prove-00@openssh.com") == 0) { success = server_input_hostkeys_prove(&resp); } if (want_reply) { packet_start(success ? SSH2_MSG_REQUEST_SUCCESS : SSH2_MSG_REQUEST_FAILURE); if (success && resp != NULL) ssh_packet_put_raw(active_state, sshbuf_ptr(resp), sshbuf_len(resp)); packet_send(); packet_write_wait(); } free(rtype); sshbuf_free(resp); return 0; } static int server_input_channel_req(int type, u_int32_t seq, void *ctxt) { Channel *c; int id, reply, success = 0; char *rtype; id = packet_get_int(); rtype = packet_get_string(NULL); reply = packet_get_char(); debug("server_input_channel_req: channel %d request %s reply %d", id, rtype, reply); if ((c = channel_lookup(id)) == NULL) packet_disconnect("server_input_channel_req: " "unknown channel %d", id); if (!strcmp(rtype, "eow@openssh.com")) { packet_check_eom(); chan_rcvd_eow(c); } else if ((c->type == SSH_CHANNEL_LARVAL || c->type == SSH_CHANNEL_OPEN) && strcmp(c->ctype, "session") == 0) success = session_input_channel_req(c, rtype); if (reply && !(c->flags & CHAN_CLOSE_SENT)) { packet_start(success ? SSH2_MSG_CHANNEL_SUCCESS : SSH2_MSG_CHANNEL_FAILURE); packet_put_int(c->remote_id); packet_send(); } free(rtype); return 0; } static void server_init_dispatch(void) { debug("server_init_dispatch"); dispatch_init(&dispatch_protocol_error); dispatch_set(SSH2_MSG_CHANNEL_CLOSE, &channel_input_oclose); dispatch_set(SSH2_MSG_CHANNEL_DATA, &channel_input_data); dispatch_set(SSH2_MSG_CHANNEL_EOF, &channel_input_ieof); dispatch_set(SSH2_MSG_CHANNEL_EXTENDED_DATA, &channel_input_extended_data); dispatch_set(SSH2_MSG_CHANNEL_OPEN, &server_input_channel_open); dispatch_set(SSH2_MSG_CHANNEL_OPEN_CONFIRMATION, &channel_input_open_confirmation); dispatch_set(SSH2_MSG_CHANNEL_OPEN_FAILURE, &channel_input_open_failure); dispatch_set(SSH2_MSG_CHANNEL_REQUEST, &server_input_channel_req); dispatch_set(SSH2_MSG_CHANNEL_WINDOW_ADJUST, &channel_input_window_adjust); dispatch_set(SSH2_MSG_GLOBAL_REQUEST, &server_input_global_request); /* client_alive */ dispatch_set(SSH2_MSG_CHANNEL_SUCCESS, &server_input_keep_alive); dispatch_set(SSH2_MSG_CHANNEL_FAILURE, &server_input_keep_alive); dispatch_set(SSH2_MSG_REQUEST_SUCCESS, &server_input_keep_alive); dispatch_set(SSH2_MSG_REQUEST_FAILURE, &server_input_keep_alive); /* rekeying */ dispatch_set(SSH2_MSG_KEXINIT, &kex_input_kexinit); }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_4765_0
crossvul-cpp_data_good_5861_30
/* * Using hardware provided CRC32 instruction to accelerate the CRC32 disposal. * CRC32C polynomial:0x1EDC6F41(BE)/0x82F63B78(LE) * CRC32 is a new instruction in Intel SSE4.2, the reference can be found at: * http://www.intel.com/products/processor/manuals/ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual * Volume 2A: Instruction Set Reference, A-M * * Copyright (C) 2008 Intel Corporation * Authors: Austin Zhang <austin_zhang@linux.intel.com> * Kent Liu <kent.liu@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <crypto/internal/hash.h> #include <asm/cpufeature.h> #include <asm/cpu_device_id.h> #include <asm/i387.h> #include <asm/fpu-internal.h> #define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_DIGEST_SIZE 4 #define SCALE_F sizeof(unsigned long) #ifdef CONFIG_X86_64 #define REX_PRE "0x48, " #else #define REX_PRE #endif #ifdef CONFIG_X86_64 /* * use carryless multiply version of crc32c when buffer * size is >= 512 (when eager fpu is enabled) or * >= 1024 (when eager fpu is disabled) to account * for fpu state save/restore overhead. */ #define CRC32C_PCL_BREAKEVEN_EAGERFPU 512 #define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024 asmlinkage unsigned int crc_pcl(const u8 *buffer, int len, unsigned int crc_init); static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU; #if defined(X86_FEATURE_EAGER_FPU) #define set_pcl_breakeven_point() \ do { \ if (!use_eager_fpu()) \ crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \ } while (0) #else #define set_pcl_breakeven_point() \ (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU) #endif #endif /* CONFIG_X86_64 */ static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length) { while (length--) { __asm__ __volatile__( ".byte 0xf2, 0xf, 0x38, 0xf0, 0xf1" :"=S"(crc) :"0"(crc), "c"(*data) ); data++; } return crc; } static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len) { unsigned int iquotient = len / SCALE_F; unsigned int iremainder = len % SCALE_F; unsigned long *ptmp = (unsigned long *)p; while (iquotient--) { __asm__ __volatile__( ".byte 0xf2, " REX_PRE "0xf, 0x38, 0xf1, 0xf1;" :"=S"(crc) :"0"(crc), "c"(*ptmp) ); ptmp++; } if (iremainder) crc = crc32c_intel_le_hw_byte(crc, (unsigned char *)ptmp, iremainder); return crc; } /* * Setting the seed allows arbitrary accumulators and flexible XOR policy * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) { crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } *mctx = le32_to_cpup((__le32 *)key); return 0; } static int crc32c_intel_init(struct shash_desc *desc) { u32 *mctx = crypto_shash_ctx(desc->tfm); u32 *crcp = shash_desc_ctx(desc); *crcp = *mctx; return 0; } static int crc32c_intel_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); *crcp = crc32c_intel_le_hw(*crcp, data, len); return 0; } static int __crc32c_intel_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { *(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len)); return 0; } static int crc32c_intel_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_intel_finup(shash_desc_ctx(desc), data, len, out); } static int crc32c_intel_final(struct shash_desc *desc, u8 *out) { u32 *crcp = shash_desc_ctx(desc); *(__le32 *)out = ~cpu_to_le32p(crcp); return 0; } static int crc32c_intel_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_intel_finup(crypto_shash_ctx(desc->tfm), data, len, out); } static int crc32c_intel_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); *key = ~0; return 0; } #ifdef CONFIG_X86_64 static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); /* * use faster PCL version if datasize is large enough to * overcome kernel fpu state save/restore overhead */ if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) { kernel_fpu_begin(); *crcp = crc_pcl(data, len, *crcp); kernel_fpu_end(); } else *crcp = crc32c_intel_le_hw(*crcp, data, len); return 0; } static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) { kernel_fpu_begin(); *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp)); kernel_fpu_end(); } else *(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len)); return 0; } static int crc32c_pcl_intel_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_pcl_intel_finup(shash_desc_ctx(desc), data, len, out); } static int crc32c_pcl_intel_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_pcl_intel_finup(crypto_shash_ctx(desc->tfm), data, len, out); } #endif /* CONFIG_X86_64 */ static struct shash_alg alg = { .setkey = crc32c_intel_setkey, .init = crc32c_intel_init, .update = crc32c_intel_update, .final = crc32c_intel_final, .finup = crc32c_intel_finup, .digest = crc32c_intel_digest, .descsize = sizeof(u32), .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32c", .cra_driver_name = "crc32c-intel", .cra_priority = 200, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, .cra_init = crc32c_intel_cra_init, } }; static const struct x86_cpu_id crc32c_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_XMM4_2), {} }; MODULE_DEVICE_TABLE(x86cpu, crc32c_cpu_id); static int __init crc32c_intel_mod_init(void) { if (!x86_match_cpu(crc32c_cpu_id)) return -ENODEV; #ifdef CONFIG_X86_64 if (cpu_has_pclmulqdq) { alg.update = crc32c_pcl_intel_update; alg.finup = crc32c_pcl_intel_finup; alg.digest = crc32c_pcl_intel_digest; set_pcl_breakeven_point(); } #endif return crypto_register_shash(&alg); } static void __exit crc32c_intel_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(crc32c_intel_mod_init); module_exit(crc32c_intel_mod_fini); MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.com>"); MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware."); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crc32c"); MODULE_ALIAS_CRYPTO("crc32c-intel");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_30
crossvul-cpp_data_bad_5078_2
/* * Packet matching code. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cache.h> #include <linux/capability.h> #include <linux/skbuff.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/icmp.h> #include <net/ip.h> #include <net/compat.h> #include <asm/uaccess.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/err.h> #include <linux/cpumask.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/nf_log.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("IPv4 packet filter"); /*#define DEBUG_IP_FIREWALL*/ /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ /*#define DEBUG_IP_FIREWALL_USER*/ #ifdef DEBUG_IP_FIREWALL #define dprintf(format, args...) pr_info(format , ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_IP_FIREWALL_USER #define duprintf(format, args...) pr_info(format , ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define IP_NF_ASSERT(x) WARN_ON(!(x)) #else #define IP_NF_ASSERT(x) #endif #if 0 /* All the better to debug you with... */ #define static #define inline #endif void *ipt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ipt, IPT); } EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool ip_packet_match(const struct iphdr *ip, const char *indev, const char *outdev, const struct ipt_ip *ipinfo, int isfrag) { unsigned long ret; #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, IPT_INV_SRCIP) || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, IPT_INV_DSTIP)) { dprintf("Source or dest mismatch.\n"); dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); if (FWINV(ret != 0, IPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, ipinfo->iniface, ipinfo->invflags & IPT_INV_VIA_IN ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, ipinfo->outiface, ipinfo->invflags & IPT_INV_VIA_OUT ? " (INV)" : ""); return false; } /* Check specific protocol */ if (ipinfo->proto && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { dprintf("Packet protocol %hi does not match %hi.%s\n", ip->protocol, ipinfo->proto, ipinfo->invflags & IPT_INV_PROTO ? " (INV)" : ""); return false; } /* If we have a fragment rule but the packet is not a fragment * then we return zero */ if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { dprintf("Fragment rule but not fragment.%s\n", ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); return false; } return true; } static bool ip_checkentry(const struct ipt_ip *ip) { if (ip->flags & ~IPT_F_MASK) { duprintf("Unknown flag bits set: %08X\n", ip->flags & ~IPT_F_MASK); return false; } if (ip->invflags & ~IPT_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", ip->invflags & ~IPT_INV_MASK); return false; } return true; } static unsigned int ipt_error(struct sk_buff *skb, const struct xt_action_param *par) { net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } /* Performance critical */ static inline struct ipt_entry * get_entry(const void *base, unsigned int offset) { return (struct ipt_entry *)(base + offset); } /* All zeroes == unconditional rule. */ /* Mildly perf critical (only if packet tracing is on) */ static inline bool unconditional(const struct ipt_entry *e) { static const struct ipt_ip uncond; return e->target_offset == sizeof(struct ipt_entry) && memcmp(&e->ip, &uncond, sizeof(uncond)) == 0; #undef FWINV } /* for const-correctness */ static inline const struct xt_entry_target * ipt_get_target_c(const struct ipt_entry *e) { return ipt_get_target((struct ipt_entry *)e); } #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) static const char *const hooknames[] = { [NF_INET_PRE_ROUTING] = "PREROUTING", [NF_INET_LOCAL_IN] = "INPUT", [NF_INET_FORWARD] = "FORWARD", [NF_INET_LOCAL_OUT] = "OUTPUT", [NF_INET_POST_ROUTING] = "POSTROUTING", }; enum nf_ip_trace_comments { NF_IP_TRACE_COMMENT_RULE, NF_IP_TRACE_COMMENT_RETURN, NF_IP_TRACE_COMMENT_POLICY, }; static const char *const comments[] = { [NF_IP_TRACE_COMMENT_RULE] = "rule", [NF_IP_TRACE_COMMENT_RETURN] = "return", [NF_IP_TRACE_COMMENT_POLICY] = "policy", }; static struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = 4, .logflags = NF_LOG_MASK, }, }, }; /* Mildly perf critical (only if packet tracing is on) */ static inline int get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ipt_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (unconditional(s) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP_TRACE_COMMENT_POLICY] : comments[NF_IP_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; } static void trace_packet(struct net *net, const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ipt_entry *e) { const struct ipt_entry *root; const char *hookname, *chainname, *comment; const struct ipt_entry *iter; unsigned int rulenum = 0; root = get_entry(private->entries, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); } #endif static inline struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) { return (void *)entry + entry->next_offset; } /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ipt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); const struct iphdr *ip; /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ipt_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; ip = ip_hdr(skb); indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; acpar.thoff = ip_hdrlen(skb); acpar.hotdrop = false; acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.family = NFPROTO_IPV4; acpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); table_base = private->entries; jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); pr_debug("Entering %s(hook %u), UF %p\n", table->name, hook, get_entry(table_base, private->underflow[hook])); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; IP_NF_ASSERT(e); if (!ip_packet_match(ip, indev, outdev, &e->ip, acpar.fragoff)) { no_match: e = ipt_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ipt_get_target(e); IP_NF_ASSERT(t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) { e = get_entry(table_base, private->underflow[hook]); pr_debug("Underflow (this is normal) " "to %p\n", e); } else { e = jumpstack[--stackidx]; pr_debug("Pulled %p out from pos %u\n", e, stackidx); e = ipt_next_entry(e); } continue; } if (table_base + v != ipt_next_entry(e) && !(e->ip.flags & IPT_F_GOTO)) { jumpstack[stackidx++] = e; pr_debug("Pushed %p into pos %u\n", e, stackidx - 1); } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); /* Target might have changed stuff. */ ip = ip_hdr(skb); if (verdict == XT_CONTINUE) e = ipt_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); pr_debug("Exiting %s; sp at %u\n", __func__, stackidx); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else if (acpar.hotdrop) return NF_DROP; else return verdict; #endif } static bool find_jump_target(const struct xt_table_info *t, const struct ipt_entry *target) { struct ipt_entry *iter; xt_entry_foreach(iter, t->entries, t->size) { if (iter == target) return true; } return false; } /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)ipt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { pr_err("iptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ if ((unconditional(e) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<<NF_INET_NUMHOOKS); #ifdef DEBUG_IP_FIREWALL_USER if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { duprintf("Back unset " "on hook %u " "rule %u\n", hook, pos); } #endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct ipt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct ipt_entry *) (entry0 + pos + size); if (pos + size >= newinfo->size) return 0; e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct ipt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); e = (struct ipt_entry *) (entry0 + newpos); if (!find_jump_target(newinfo, e)) return 0; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; if (newpos >= newinfo->size) return 0; } e = (struct ipt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static void cleanup_match(struct xt_entry_match *m, struct net *net) { struct xt_mtdtor_param par; par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV4; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); } static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ipt_ip *ip = par->entryinfo; int ret; par->match = m->u.kernel.match; par->matchinfo = m->data; ret = xt_check_match(par, m->u.match_size - sizeof(*m), ip->proto, ip->invflags & IPT_INV_PROTO); if (ret < 0) { duprintf("check failed for `%s'.\n", par->match->name); return ret; } return 0; } static int find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { struct xt_match *match; int ret; match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("find_check_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; ret = check_match(m, par); if (ret) goto err; return 0; err: module_put(m->u.kernel.match->me); return ret; } static int check_target(struct ipt_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = ipt_get_target(e); struct xt_tgchk_param par = { .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_IPV4, }; int ret; ret = xt_check_target(&par, t->u.target_size - sizeof(*t), e->ip.proto, e->ip.invflags & IPT_INV_PROTO); if (ret < 0) { duprintf("check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static int find_check_entry(struct ipt_entry *e, struct net *net, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV4; xt_ematch_foreach(ematch, e) { ret = find_check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } t = ipt_get_target(e); target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto cleanup_matches; } t->u.kernel.target = target; ret = check_target(e, net, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct ipt_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(e)) return false; t = ipt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static int check_entry_size_and_hooks(struct ipt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } if (!ip_checkentry(&e->ip)) return -EINVAL; err = xt_check_entry_offsets(e, e->target_offset, e->next_offset); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_debug("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static void cleanup_entry(struct ipt_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ipt_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV4; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in newinfo) */ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ipt_replace *repl) { struct ipt_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) return ret; ++i; if (strcmp(ipt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, net, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter, net); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct ipt_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; /* macro does multi eval of i */ } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct ipt_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; const struct xt_entry_match *m; const struct xt_entry_target *t; e = (struct ipt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ipt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ipt_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct xt_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ipt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(AF_INET, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(AF_INET, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct ipt_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_match *ematch; const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - base; xt_ematch_foreach(ematch, e) off += xt_compat_match_offset(ematch->u.kernel.match); t = ipt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_INET_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ipt_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ipt_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct ipt_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(AF_INET, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ipt_getinfo)) { duprintf("length %u != %zu\n", *len, sizeof(struct ipt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET); #endif t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), "iptable_%s", name); if (!IS_ERR_OR_NULL(t)) { struct ipt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET); #endif return ret; } static int get_entries(struct net *net, struct ipt_get_entries __user *uptr, const int *len) { int ret; struct ipt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct ipt_get_entries) + get.size) { duprintf("get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } get.name[sizeof(get.name) - 1] = '\0'; t = xt_find_table_lock(net, AF_INET, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; struct ipt_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), "iptable_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct ipt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ipt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; const char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ipt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, AF_INET, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT struct compat_ipt_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_INET_NUMHOOKS]; u32 underflow[NF_INET_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; /* struct xt_counters * */ struct compat_ipt_entry entries[0]; }; static int compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, unsigned int *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_ipt_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; origsize = *size; ce = (struct compat_ipt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ipt_entry); *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_to_user(ematch, dstptr, size); if (ret != 0) return ret; } target_offset = e->target_offset - (origsize - *size); t = ipt_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ipt_ip *ip, int *size) { struct xt_match *match; match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("compat_check_calc_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; } static void compat_release_entry(struct compat_ipt_entry *e) { struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) module_put(ematch->u.kernel.match->me); t = compat_ipt_get_target(e); module_put(t->u.kernel.target->me); } static int check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ipt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } if (!ip_checkentry(&e->ip)) return -EINVAL; ret = xt_compat_check_entry_offsets(e, e->target_offset, e->next_offset); if (ret) return ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ip, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ipt_get_target(e); target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; } static int compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct ipt_entry *de; unsigned int origsize; int ret, h; struct xt_entry_match *ematch; ret = 0; origsize = *size; de = (struct ipt_entry *)*dstptr; memcpy(de, e, sizeof(struct ipt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct ipt_entry); *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_from_user(ematch, dstptr, size); if (ret != 0) return ret; } de->target_offset = e->target_offset - (origsize - *size); t = compat_ipt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) { struct xt_entry_match *ematch; struct xt_mtchk_param mtpar; unsigned int j; int ret = 0; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV4; xt_ematch_foreach(ematch, e) { ret = check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } ret = check_target(e, net, name); if (ret) goto cleanup_matches; return 0; cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static int translate_compat_table(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ipt_entry *iter0; struct ipt_entry *iter1; unsigned int size; int ret; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET); xt_compat_init_offsets(AF_INET, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { ret = compat_check_entry(iter1, net, name); if (ret != 0) break; ++i; if (strcmp(ipt_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1, net); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); goto out; } static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_ipt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ipt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case IPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } struct compat_ipt_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_ipt_entry entrytable[0]; }; static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct ipt_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } static int compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, int *len) { int ret; struct compat_ipt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } get.name[sizeof(get.name) - 1] = '\0'; xt_compat_lock(AF_INET); t = xt_find_table_lock(net, AF_INET, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(AF_INET); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(AF_INET); return ret; } static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case IPT_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_ipt_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case IPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case IPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case IPT_SO_GET_REVISION_MATCH: case IPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; int target; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; if (cmd == IPT_SO_GET_REVISION_TARGET) target = 1; else target = 0; try_then_request_module(xt_find_revision(AF_INET, rev.name, rev.revision, target, &ret), "ipt_%s", rev.name); break; } default: duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __ipt_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct ipt_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __ipt_unregister_table(net, new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void ipt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __ipt_unregister_table(net, table); } /* Returns 1 if the type and code is matched by the range, 0 otherwise */ static inline bool icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, u_int8_t type, u_int8_t code, bool invert) { return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code)) ^ invert; } static bool icmp_match(const struct sk_buff *skb, struct xt_action_param *par) { const struct icmphdr *ic; struct icmphdr _icmph; const struct ipt_icmp *icmpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); if (ic == NULL) { /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ duprintf("Dropping evil ICMP tinygram.\n"); par->hotdrop = true; return false; } return icmp_type_code_match(icmpinfo->type, icmpinfo->code[0], icmpinfo->code[1], ic->type, ic->code, !!(icmpinfo->invflags&IPT_ICMP_INV)); } static int icmp_checkentry(const struct xt_mtchk_param *par) { const struct ipt_icmp *icmpinfo = par->matchinfo; /* Must specify no unknown invflags */ return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; } static struct xt_target ipt_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_IPV4, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = ipt_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_IPV4, }, }; static struct nf_sockopt_ops ipt_sockopts = { .pf = PF_INET, .set_optmin = IPT_BASE_CTL, .set_optmax = IPT_SO_SET_MAX+1, .set = do_ipt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_ipt_set_ctl, #endif .get_optmin = IPT_BASE_CTL, .get_optmax = IPT_SO_GET_MAX+1, .get = do_ipt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_ipt_get_ctl, #endif .owner = THIS_MODULE, }; static struct xt_match ipt_builtin_mt[] __read_mostly = { { .name = "icmp", .match = icmp_match, .matchsize = sizeof(struct ipt_icmp), .checkentry = icmp_checkentry, .proto = IPPROTO_ICMP, .family = NFPROTO_IPV4, }, }; static int __net_init ip_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_IPV4); } static void __net_exit ip_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_IPV4); } static struct pernet_operations ip_tables_net_ops = { .init = ip_tables_net_init, .exit = ip_tables_net_exit, }; static int __init ip_tables_init(void) { int ret; ret = register_pernet_subsys(&ip_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); if (ret < 0) goto err2; ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); if (ret < 0) goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ipt_sockopts); if (ret < 0) goto err5; pr_info("(C) 2000-2006 Netfilter Core Team\n"); return 0; err5: xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); err4: xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); err2: unregister_pernet_subsys(&ip_tables_net_ops); err1: return ret; } static void __exit ip_tables_fini(void) { nf_unregister_sockopt(&ipt_sockopts); xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); unregister_pernet_subsys(&ip_tables_net_ops); } EXPORT_SYMBOL(ipt_register_table); EXPORT_SYMBOL(ipt_unregister_table); EXPORT_SYMBOL(ipt_do_table); module_init(ip_tables_init); module_exit(ip_tables_fini);
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5078_2
crossvul-cpp_data_good_2423_3
/* * linux/arch/arm/kernel/process.c * * Copyright (C) 1996-2000 Russell King - Converted to ARM. * Original Copyright (C) 1995 Linus Torvalds * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <stdarg.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/user.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/interrupt.h> #include <linux/kallsyms.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/elfcore.h> #include <linux/pm.h> #include <linux/tick.h> #include <linux/utsname.h> #include <linux/uaccess.h> #include <linux/random.h> #include <linux/hw_breakpoint.h> #include <linux/cpuidle.h> #include <linux/leds.h> #include <asm/cacheflush.h> #include <asm/idmap.h> #include <asm/processor.h> #include <asm/thread_notify.h> #include <asm/stacktrace.h> #include <asm/mach/time.h> #include <asm/tls.h> #ifdef CONFIG_CC_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif static const char *processor_modes[] = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" }; static const char *isa_modes[] = { "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); typedef void (*phys_reset_t)(unsigned long); /* * A temporary stack to use for CPU reset. This is static so that we * don't clobber it with the identity mapping. When running with this * stack, any references to the current task *will not work* so you * should really do as little as possible before jumping to your reset * code. */ static u64 soft_restart_stack[16]; static void __soft_restart(void *addr) { phys_reset_t phys_reset; /* Take out a flat memory mapping. */ setup_mm_for_reboot(); /* Clean and invalidate caches */ flush_cache_all(); /* Turn off caching */ cpu_proc_fin(); /* Push out any further dirty data, and ensure cache is empty */ flush_cache_all(); /* Switch to the identity mapping. */ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset((unsigned long)addr); /* Should never get here. */ BUG(); } void soft_restart(unsigned long addr) { u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); /* Disable interrupts first */ local_irq_disable(); local_fiq_disable(); /* Disable the L2 if we're the last man standing. */ if (num_online_cpus() == 1) outer_disable(); /* Change to the new stack and continue with the reset. */ call_with_stack(__soft_restart, (void *)addr, (void *)stack); /* Should never get here. */ BUG(); } static void null_restart(char mode, const char *cmd) { } /* * Function pointers to optional machine specific functions */ void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); void (*arm_pm_restart)(char str, const char *cmd) = null_restart; EXPORT_SYMBOL_GPL(arm_pm_restart); /* * This is our default idle handler. */ void (*arm_pm_idle)(void); static void default_idle(void) { if (arm_pm_idle) arm_pm_idle(); else cpu_do_idle(); local_irq_enable(); } void arch_cpu_idle_prepare(void) { local_fiq_enable(); } void arch_cpu_idle_enter(void) { ledtrig_cpu(CPU_LED_IDLE_START); #ifdef CONFIG_PL310_ERRATA_769419 wmb(); #endif } void arch_cpu_idle_exit(void) { ledtrig_cpu(CPU_LED_IDLE_END); } #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) { cpu_die(); } #endif /* * Called from the core idle loop. */ void arch_cpu_idle(void) { if (cpuidle_idle_call()) default_idle(); } static char reboot_mode = 'h'; int __init reboot_setup(char *str) { reboot_mode = str[0]; return 1; } __setup("reboot=", reboot_setup); void machine_shutdown(void) { #ifdef CONFIG_SMP smp_send_stop(); #endif } void machine_halt(void) { machine_shutdown(); local_irq_disable(); while (1); } void machine_power_off(void) { machine_shutdown(); if (pm_power_off) pm_power_off(); } void machine_restart(char *cmd) { machine_shutdown(); arm_pm_restart(reboot_mode, cmd); /* Give a grace period for failure to restart of 1s */ mdelay(1000); /* Whoops - the platform was unable to reboot. Tell the user! */ printk("Reboot failed -- System halted\n"); local_irq_disable(); while (1); } void __show_regs(struct pt_regs *regs) { unsigned long flags; char buf[64]; show_regs_print_info(KERN_DEFAULT); print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", regs->ARM_lr); printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" "sp : %08lx ip : %08lx fp : %08lx\n", regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); printk("r10: %08lx r9 : %08lx r8 : %08lx\n", regs->ARM_r10, regs->ARM_r9, regs->ARM_r8); printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", regs->ARM_r7, regs->ARM_r6, regs->ARM_r5, regs->ARM_r4); printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", regs->ARM_r3, regs->ARM_r2, regs->ARM_r1, regs->ARM_r0); flags = regs->ARM_cpsr; buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; buf[4] = '\0'; printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", buf, interrupts_enabled(regs) ? "n" : "ff", fast_interrupts_enabled(regs) ? "n" : "ff", processor_modes[processor_mode(regs)], isa_modes[isa_mode(regs)], get_fs() == get_ds() ? "kernel" : "user"); #ifdef CONFIG_CPU_CP15 { unsigned int ctrl; buf[0] = '\0'; #ifdef CONFIG_CPU_CP15_MMU { unsigned int transbase, dac; asm("mrc p15, 0, %0, c2, c0\n\t" "mrc p15, 0, %1, c3, c0\n" : "=r" (transbase), "=r" (dac)); snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", transbase, dac); } #endif asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); printk("Control: %08x%s\n", ctrl, buf); } #endif } void show_regs(struct pt_regs * regs) { printk("\n"); __show_regs(regs); dump_stack(); } ATOMIC_NOTIFIER_HEAD(thread_notify_head); EXPORT_SYMBOL_GPL(thread_notify_head); /* * Free current thread data structures etc.. */ void exit_thread(void) { thread_notify(THREAD_NOTIFY_EXIT, current_thread_info()); } void flush_thread(void) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; flush_ptrace_hw_breakpoint(tsk); memset(thread->used_cp, 0, sizeof(thread->used_cp)); memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); thread_notify(THREAD_NOTIFY_FLUSH, thread); } void release_thread(struct task_struct *dead_task) { } asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); int copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->ARM_r0 = 0; if (stack_start) childregs->ARM_sp = stack_start; } else { memset(childregs, 0, sizeof(struct pt_regs)); thread->cpu_context.r4 = stk_sz; thread->cpu_context.r5 = stack_start; childregs->ARM_cpsr = SVC_MODE; } thread->cpu_context.pc = (unsigned long)ret_from_fork; thread->cpu_context.sp = (unsigned long)childregs; clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) thread->tp_value[0] = childregs->ARM_r3; thread->tp_value[1] = get_tpuser(); thread_notify(THREAD_NOTIFY_COPY, thread); return 0; } /* * Fill in the task's elfregs structure for a core dump. */ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) { elf_core_copy_regs(elfregs, task_pt_regs(t)); return 1; } /* * fill in the fpe structure for a core dump... */ int dump_fpu (struct pt_regs *regs, struct user_fp *fp) { struct thread_info *thread = current_thread_info(); int used_math = thread->used_cp[1] | thread->used_cp[2]; if (used_math) memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); return used_math != 0; } EXPORT_SYMBOL(dump_fpu); unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; frame.fp = thread_saved_fp(p); frame.sp = thread_saved_sp(p); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(p); do { int ret = unwind_frame(&frame); if (ret < 0) return 0; if (!in_sched_functions(frame.pc)) return frame.pc; } while (count ++ < 16); return 0; } unsigned long arch_randomize_brk(struct mm_struct *mm) { unsigned long range_end = mm->brk + 0x02000000; return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } #ifdef CONFIG_MMU /* * The vectors page is always readable from user space for the * atomic helpers and the signal restart code. Insert it into the * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. */ static struct vm_area_struct gate_vma = { .vm_start = 0xffff0000, .vm_end = 0xffff0000 + PAGE_SIZE, .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, .vm_mm = &init_mm, }; static int __init gate_vma_init(void) { gate_vma.vm_page_prot = PAGE_READONLY_EXEC; return 0; } arch_initcall(gate_vma_init); struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return &gate_vma; } int in_gate_area(struct mm_struct *mm, unsigned long addr) { return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); } int in_gate_area_no_mm(unsigned long addr) { return in_gate_area(NULL, addr); } const char *arch_vma_name(struct vm_area_struct *vma) { return (vma == &gate_vma) ? "[vectors]" : NULL; } #endif
./CrossVul/dataset_final_sorted/CWE-264/c/good_2423_3
crossvul-cpp_data_good_2304_0
#include <linux/kernel.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/user.h> #include <linux/regset.h> #include <linux/syscalls.h> #include <asm/uaccess.h> #include <asm/desc.h> #include <asm/ldt.h> #include <asm/processor.h> #include <asm/proto.h> #include "tls.h" /* * sys_alloc_thread_area: get a yet unused TLS descriptor index. */ static int get_free_idx(void) { struct thread_struct *t = &current->thread; int idx; for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) if (desc_empty(&t->tls_array[idx])) return idx + GDT_ENTRY_TLS_MIN; return -ESRCH; } static bool tls_desc_okay(const struct user_desc *info) { if (LDT_empty(info)) return true; /* * espfix is required for 16-bit data segments, but espfix * only works for LDT segments. */ if (!info->seg_32bit) return false; return true; } static void set_tls_desc(struct task_struct *p, int idx, const struct user_desc *info, int n) { struct thread_struct *t = &p->thread; struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN]; int cpu; /* * We must not get preempted while modifying the TLS. */ cpu = get_cpu(); while (n-- > 0) { if (LDT_empty(info)) desc->a = desc->b = 0; else fill_ldt(desc, info); ++info; ++desc; } if (t == &current->thread) load_TLS(t, cpu); put_cpu(); } /* * Set a given TLS descriptor: */ int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info, int can_allocate) { struct user_desc info; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; if (!tls_desc_okay(&info)) return -EINVAL; if (idx == -1) idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1 && can_allocate) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; set_tls_desc(p, idx, &info, 1); return 0; } SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, u_info) { return do_set_thread_area(current, -1, u_info, 1); } /* * Get the current Thread-Local Storage area: */ static void fill_user_desc(struct user_desc *info, int idx, const struct desc_struct *desc) { memset(info, 0, sizeof(*info)); info->entry_number = idx; info->base_addr = get_desc_base(desc); info->limit = get_desc_limit(desc); info->seg_32bit = desc->d; info->contents = desc->type >> 2; info->read_exec_only = !(desc->type & 2); info->limit_in_pages = desc->g; info->seg_not_present = !desc->p; info->useable = desc->avl; #ifdef CONFIG_X86_64 info->lm = desc->l; #endif } int do_get_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info) { struct user_desc info; if (idx == -1 && get_user(idx, &u_info->entry_number)) return -EFAULT; if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; fill_user_desc(&info, idx, &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]); if (copy_to_user(u_info, &info, sizeof(info))) return -EFAULT; return 0; } SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, u_info) { return do_get_thread_area(current, -1, u_info); } int regset_tls_active(struct task_struct *target, const struct user_regset *regset) { struct thread_struct *t = &target->thread; int n = GDT_ENTRY_TLS_ENTRIES; while (n > 0 && desc_empty(&t->tls_array[n - 1])) --n; return n; } int regset_tls_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct desc_struct *tls; if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || (pos % sizeof(struct user_desc)) != 0 || (count % sizeof(struct user_desc)) != 0) return -EINVAL; pos /= sizeof(struct user_desc); count /= sizeof(struct user_desc); tls = &target->thread.tls_array[pos]; if (kbuf) { struct user_desc *info = kbuf; while (count-- > 0) fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++, tls++); } else { struct user_desc __user *u_info = ubuf; while (count-- > 0) { struct user_desc info; fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++); if (__copy_to_user(u_info++, &info, sizeof(info))) return -EFAULT; } } return 0; } int regset_tls_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; const struct user_desc *info; int i; if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || (pos % sizeof(struct user_desc)) != 0 || (count % sizeof(struct user_desc)) != 0) return -EINVAL; if (kbuf) info = kbuf; else if (__copy_from_user(infobuf, ubuf, count)) return -EFAULT; else info = infobuf; for (i = 0; i < count / sizeof(struct user_desc); i++) if (!tls_desc_okay(info + i)) return -EINVAL; set_tls_desc(target, GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), info, count / sizeof(struct user_desc)); return 0; }
./CrossVul/dataset_final_sorted/CWE-264/c/good_2304_0
crossvul-cpp_data_good_5861_9
/* * Cryptographic API. * * s390 implementation of the GHASH algorithm for GCM (Galois/Counter Mode). * * Copyright IBM Corp. 2011 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> */ #include <crypto/internal/hash.h> #include <linux/module.h> #include "crypt_s390.h" #define GHASH_BLOCK_SIZE 16 #define GHASH_DIGEST_SIZE 16 struct ghash_ctx { u8 icv[16]; u8 key[16]; }; struct ghash_desc_ctx { u8 buffer[GHASH_BLOCK_SIZE]; u32 bytes; }; static int ghash_init(struct shash_desc *desc) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); memset(dctx, 0, sizeof(*dctx)); return 0; } static int ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct ghash_ctx *ctx = crypto_shash_ctx(tfm); if (keylen != GHASH_BLOCK_SIZE) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->key, key, GHASH_BLOCK_SIZE); memset(ctx->icv, 0, GHASH_BLOCK_SIZE); return 0; } static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); unsigned int n; u8 *buf = dctx->buffer; int ret; if (dctx->bytes) { u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes); n = min(srclen, dctx->bytes); dctx->bytes -= n; srclen -= n; memcpy(pos, src, n); src += n; if (!dctx->bytes) { ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE); if (ret != GHASH_BLOCK_SIZE) return -EIO; } } n = srclen & ~(GHASH_BLOCK_SIZE - 1); if (n) { ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n); if (ret != n) return -EIO; src += n; srclen -= n; } if (srclen) { dctx->bytes = GHASH_BLOCK_SIZE - srclen; memcpy(buf, src, srclen); } return 0; } static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) { u8 *buf = dctx->buffer; int ret; if (dctx->bytes) { u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes); memset(pos, 0, dctx->bytes); ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE); if (ret != GHASH_BLOCK_SIZE) return -EIO; } dctx->bytes = 0; return 0; } static int ghash_final(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); int ret; ret = ghash_flush(ctx, dctx); if (!ret) memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE); return ret; } static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, .final = ghash_final, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "ghash-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ghash_ctx), .cra_module = THIS_MODULE, }, }; static int __init ghash_mod_init(void) { if (!crypt_s390_func_available(KIMD_GHASH, CRYPT_S390_MSA | CRYPT_S390_MSA4)) return -EOPNOTSUPP; return crypto_register_shash(&ghash_alg); } static void __exit ghash_mod_exit(void) { crypto_unregister_shash(&ghash_alg); } module_init(ghash_mod_init); module_exit(ghash_mod_exit); MODULE_ALIAS_CRYPTO("ghash"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_9
crossvul-cpp_data_bad_3470_2
/* * utils.c - Utility functions * * nspluginwrapper (C) 2005-2009 Gwenole Beauchesne * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #define _GNU_SOURCE 1 #include "sysdeps.h" #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <glib.h> /* <glib/ghash.h> */ #include <fcntl.h> #include <dirent.h> #include <unistd.h> #include <sys/resource.h> #include "utils.h" #include "rpc.h" #ifndef TEST_UTILS #define XP_UNIX 1 #define MOZ_X11 1 #include <npapi.h> #endif #define DEBUG 1 #include "debug.h" /* ====================================================================== */ /* === Hashes === */ /* ====================================================================== */ static GHashTable *g_ids; static inline void *id_key(uint32_t id) { return (void *)(uintptr_t)id; } bool id_init(void) { if (g_ids == NULL) g_ids = g_hash_table_new(NULL, NULL); return g_ids != NULL; } void id_kill(void) { if (g_ids) { g_hash_table_destroy(g_ids); g_ids = NULL; } } void id_link(int id, void *ptr) { g_hash_table_insert(g_ids, id_key(id), ptr); } int id_create(void *ptr) { static int id = 0; id_link(++id, ptr); return id; } bool id_remove(int id) { return g_hash_table_remove(g_ids, id_key(id)); } void *id_lookup(int id) { return g_hash_table_lookup(g_ids, id_key(id)); } static gboolean id_match_value(gpointer key, gpointer value, gpointer user_data) { if (value == *(gpointer *)user_data) { *(int *)user_data = (uintptr_t)key; return true; } return false; } int id_lookup_value(void *ptr) { return g_hash_table_find(g_ids, id_match_value, &ptr) ? (uintptr_t)ptr : -1; } /* ====================================================================== */ /* === String expansions === */ /* ====================================================================== */ #ifndef TEST_UTILS const char *string_of_NPError(int error) { const char *str; switch ((NPError)error) { #define _(VAL) case VAL: str = #VAL; break; _(NPERR_NO_ERROR); _(NPERR_GENERIC_ERROR); _(NPERR_INVALID_INSTANCE_ERROR); _(NPERR_INVALID_FUNCTABLE_ERROR); _(NPERR_MODULE_LOAD_FAILED_ERROR); _(NPERR_OUT_OF_MEMORY_ERROR); _(NPERR_INVALID_PLUGIN_ERROR); _(NPERR_INVALID_PLUGIN_DIR_ERROR); _(NPERR_INCOMPATIBLE_VERSION_ERROR); _(NPERR_INVALID_PARAM); _(NPERR_INVALID_URL); _(NPERR_FILE_NOT_FOUND); _(NPERR_NO_DATA); _(NPERR_STREAM_NOT_SEEKABLE); #undef _ default: str = "<unknown error>"; break; } return str; } const char *string_of_NPReason(int reason) { const char *str; switch ((NPReason)reason) { #define _(VAL) case VAL: str = #VAL; break; _(NPRES_DONE); _(NPRES_NETWORK_ERR); _(NPRES_USER_BREAK); #undef _ default: str = "<unknown reason>"; break; } return str; } const char *string_of_NPStreamType(int stype) { const char *str; switch (stype) { #define _(VAL) case VAL: str = #VAL; break; _(NP_NORMAL); _(NP_SEEK); _(NP_ASFILE); _(NP_ASFILEONLY); #undef _ default: str = "<unknown stream type>"; break; } return str; } const char *string_of_NPEvent_type(int type) { const char *str; switch (type) { #define _(VAL) case VAL: str = #VAL; break; #ifdef MOZ_X11 _(KeyPress); _(KeyRelease); _(ButtonPress); _(ButtonRelease); _(MotionNotify); _(EnterNotify); _(LeaveNotify); _(FocusIn); _(FocusOut); _(KeymapNotify); _(Expose); _(GraphicsExpose); _(NoExpose); _(VisibilityNotify); _(CreateNotify); _(DestroyNotify); _(UnmapNotify); _(MapNotify); _(MapRequest); _(ReparentNotify); _(ConfigureNotify); _(ConfigureRequest); _(GravityNotify); _(ResizeRequest); _(CirculateNotify); _(CirculateRequest); _(PropertyNotify); _(SelectionClear); _(SelectionRequest); _(SelectionNotify); _(ColormapNotify); _(ClientMessage); _(MappingNotify); #endif #undef _ default: str = "<unknown type>"; break; } return str; } const char *string_of_NPPVariable(int variable) { const char *str; switch (variable) { #define _(VAL) case VAL: str = #VAL; break; _(NPPVpluginNameString); _(NPPVpluginDescriptionString); _(NPPVpluginWindowBool); _(NPPVpluginTransparentBool); _(NPPVjavaClass); _(NPPVpluginWindowSize); _(NPPVpluginTimerInterval); _(NPPVpluginScriptableInstance); _(NPPVpluginScriptableIID); _(NPPVjavascriptPushCallerBool); _(NPPVpluginKeepLibraryInMemory); _(NPPVpluginNeedsXEmbed); _(NPPVpluginScriptableNPObject); _(NPPVformValue); #undef _ default: switch (variable & 0xff) { #define _(VAL, VAR) case VAL: str = #VAR; break _(10, NPPVpluginScriptableInstance); #undef _ default: str = "<unknown variable>"; break; } break; } return str; } const char *string_of_NPNVariable(int variable) { const char *str; switch (variable) { #define _(VAL) case VAL: str = #VAL; break; _(NPNVxDisplay); _(NPNVxtAppContext); _(NPNVnetscapeWindow); _(NPNVjavascriptEnabledBool); _(NPNVasdEnabledBool); _(NPNVisOfflineBool); _(NPNVserviceManager); _(NPNVDOMElement); _(NPNVDOMWindow); _(NPNVToolkit); _(NPNVSupportsXEmbedBool); _(NPNVWindowNPObject); _(NPNVPluginElementNPObject); _(NPNVSupportsWindowless); #undef _ default: switch (variable & 0xff) { #define _(VAL, VAR) case VAL: str = #VAR; break _(10, NPNVserviceManager); _(11, NPNVDOMElement); _(12, NPNVDOMWindow); _(13, NPNVToolkit); #undef _ default: str = "<unknown variable>"; break; } break; } return str; } #endif /* ====================================================================== */ /* === Misc utility functions === */ /* ====================================================================== */ void npw_perror(const char *prefix, int error) { if (prefix && *prefix) npw_printf("ERROR: %s: %s\n", prefix, npw_strerror(error)); else npw_printf("ERROR: %s\n", npw_strerror(error)); } const char *npw_strerror(int error) { if (error > -1100 && error <= -1000) // RPC errors return rpc_strerror(error); switch (error) { case 0: return "No error"; } return "Unknown error"; } char *npw_asprintf(const char *format, ...) { va_list args; va_start(args, format); int alen = vsnprintf(NULL, 0, format, args); va_end(args); char *str = malloc(alen+1); if (str == NULL) return NULL; va_start(args, format); int rlen = vsnprintf(str, alen+1, format, args); va_end(args); if (rlen != alen) { free(str); return NULL; } return str; } /* Return 1 + max value the system can allocate to a new fd */ static int get_open_max(void) { int open_max = -1; /* SCO OpenServer has an fcntl() to retrieve the highest *currently open* file descriptor. */ #ifdef F_GETHFDO if ((open_max = fcntl(-1, F_GETHFDO, 0)) >= 0) return open_max + 1; #endif /* IEEE Std 1003.1-2001/Cor 1-2002 clarified the fact that return value of sysconf(_SC_OPEN_MAX) may change if setrlimit() was called to set RLIMIT_NOFILE. So, we should be on the safe side to call getrlimit() first to get the soft limit. Note: dgettablesize() was a possibility but (i) it's equivalent to getrlimit(), and (ii) it is not recommended for new code. */ struct rlimit ru; if (getrlimit(RLIMIT_NOFILE, &ru) == 0) return ru.rlim_cur; if ((open_max = sysconf(_SC_OPEN_MAX)) >= 0) return open_max; /* XXX: simply guess something reasonable. */ return 256; } void npw_close_all_open_files(void) { const int min_fd = 3; #if defined(__linux__) DIR *dir = opendir("/proc/self/fd"); if (dir) { const int dfd = dirfd(dir); struct dirent *d; while ((d = readdir(dir)) != NULL) { char *end; long n = strtol(d->d_name, &end, 10); if (*end == '\0') { int fd = n; if (fd >= min_fd && fd != dfd) close(fd); } } closedir(dir); return; } #endif const int open_max = get_open_max(); for (int fd = min_fd; fd < open_max; fd++) close(fd); } /* ====================================================================== */ /* === Test Program === */ /* ====================================================================== */ #ifdef TEST_UTILS int main(void) { char *str; int i, id; id_init(); #define N_CELLS_PER_SLOT 8 #define N_STRINGS ((2 * N_CELLS_PER_SLOT) + 3) char *strings[N_STRINGS]; int ids[N_STRINGS]; for (i = 0; i < N_STRINGS; i++) { str = malloc(10); sprintf(str, "%d", i); strings[i] = str; id = id_create(str); if (id < 0) { fprintf(stderr, "ERROR: failed to allocate ID for '%s'\n", str); return 1; } ids[i] = id; } // basic lookup id = ids[N_CELLS_PER_SLOT / 2]; str = id_lookup(id); printf("str(%d) : '%s'\n", id, str); // basic unlink id = ids[N_CELLS_PER_SLOT]; if (id_remove(id) < 0) { fprintf(stderr, "ERROR: failed to unlink ID %d\n", id); return 1; } ids[N_CELLS_PER_SLOT] = 0; // remove slot 1 for (i = 0; i < N_CELLS_PER_SLOT; i++) { id = ids[N_CELLS_PER_SLOT + i]; if (id && id_remove(id) < 0) { fprintf(stderr, "ERROR: failed to unlink ID %d from slot 1\n", id); return 1; } ids[N_CELLS_PER_SLOT + i] = 0; } // basic lookup after slot removal id = ids[2 * N_CELLS_PER_SLOT]; str = id_lookup(id); printf("str(%d) : '%s'\n", id, str); // check slot 1 was removed and slots 0 & 2 linked together for (i = 0; i < N_STRINGS; i++) { id = ids[i]; if (id && id_remove(id) < 0) { fprintf(stderr, "ERROR: failed to unlink ID %d for final cleanup\n", id); return 1; } } for (i = 0; i < N_STRINGS; i++) free(strings[i]); id_kill(); return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3470_2
crossvul-cpp_data_bad_3438_1
/* * NET3 Protocol independent device support routines. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Derived from the non IP parts of dev.c 1.0.19 * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * * Additional Authors: * Florian la Roche <rzsfl@rz.uni-sb.de> * Alan Cox <gw4pts@gw4pts.ampr.org> * David Hinds <dahinds@users.sourceforge.net> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * Adam Sulmicki <adam@cfar.umd.edu> * Pekka Riikonen <priikone@poesidon.pspt.fi> * * Changes: * D.J. Barrow : Fixed bug where dev->refcnt gets set * to 2 if register_netdev gets called * before net_dev_init & also removed a * few lines of code in the process. * Alan Cox : device private ioctl copies fields back. * Alan Cox : Transmit queue code does relevant * stunts to keep the queue safe. * Alan Cox : Fixed double lock. * Alan Cox : Fixed promisc NULL pointer trap * ???????? : Support the full private ioctl range * Alan Cox : Moved ioctl permission check into * drivers * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI * Alan Cox : 100 backlog just doesn't cut it when * you start doing multicast video 8) * Alan Cox : Rewrote net_bh and list manager. * Alan Cox : Fix ETH_P_ALL echoback lengths. * Alan Cox : Took out transmit every packet pass * Saved a few bytes in the ioctl handler * Alan Cox : Network driver sets packet type before * calling netif_rx. Saves a function * call a packet. * Alan Cox : Hashed net_bh() * Richard Kooijman: Timestamp fixes. * Alan Cox : Wrong field in SIOCGIFDSTADDR * Alan Cox : Device lock protection. * Alan Cox : Fixed nasty side effect of device close * changes. * Rudi Cilibrasi : Pass the right thing to * set_mac_address() * Dave Miller : 32bit quantity for the device lock to * make it work out on a Sparc. * Bjorn Ekwall : Added KERNELD hack. * Alan Cox : Cleaned up the backlog initialise. * Craig Metz : SIOCGIFCONF fix if space for under * 1 device. * Thomas Bogendoerfer : Return ENODEV for dev_open, if there * is no device open function. * Andi Kleen : Fix error reporting for SIOCGIFCONF * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF * Cyrus Durgin : Cleaned for KMOD * Adam Sulmicki : Bug Fix : Network Device Unload * A network device unload needs to purge * the backlog queue. * Paul Rusty Russell : SIOCSIFNAME * Pekka Riikonen : Netdev boot-time settings code * Andrew Morton : Make unregister_netdevice wait * indefinitely on dev->refcnt * J Hadi Salim : - Backlog queue sampling * - netif_rx() feedback */ #include <asm/uaccess.h> #include <asm/system.h> #include <linux/bitops.h> #include <linux/capability.h> #include <linux/cpu.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/hash.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/notifier.h> #include <linux/skbuff.h> #include <net/net_namespace.h> #include <net/sock.h> #include <linux/rtnetlink.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> #include <net/dst.h> #include <net/pkt_sched.h> #include <net/checksum.h> #include <net/xfrm.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/module.h> #include <linux/netpoll.h> #include <linux/rcupdate.h> #include <linux/delay.h> #include <net/wext.h> #include <net/iw_handler.h> #include <asm/current.h> #include <linux/audit.h> #include <linux/dmaengine.h> #include <linux/err.h> #include <linux/ctype.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <net/ip.h> #include <linux/ipv6.h> #include <linux/in.h> #include <linux/jhash.h> #include <linux/random.h> #include <trace/events/napi.h> #include <trace/events/net.h> #include <trace/events/skb.h> #include <linux/pci.h> #include <linux/inetdevice.h> #include "net-sysfs.h" /* Instead of increasing this, you should create a hash table. */ #define MAX_GRO_SKBS 8 /* This should be increased if a protocol with a bigger head is added. */ #define GRO_MAX_HEAD (MAX_HEADER + 128) /* * The list of packet types we will receive (as opposed to discard) * and the routines to invoke. * * Why 16. Because with 16 the only overlap we get on a hash of the * low nibble of the protocol value is RARP/SNAP/X.25. * * NOTE: That is no longer true with the addition of VLAN tags. Not * sure which should go first, but I bet it won't make much * difference if we are running VLANs. The good news is that * this protocol won't be in the list unless compiled in, so * the average user (w/out VLANs) will not be adversely affected. * --BLG * * 0800 IP * 8100 802.1Q VLAN * 0001 802.3 * 0002 AX.25 * 0004 802.2 * 8035 RARP * 0005 SNAP * 0805 X.25 * 0806 ARP * 8137 IPX * 0009 Localtalk * 86DD IPv6 */ #define PTYPE_HASH_SIZE (16) #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) static DEFINE_SPINLOCK(ptype_lock); static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; static struct list_head ptype_all __read_mostly; /* Taps */ /* * The @dev_base_head list is protected by @dev_base_lock and the rtnl * semaphore. * * Pure readers hold dev_base_lock for reading, or rcu_read_lock() * * Writers must hold the rtnl semaphore while they loop through the * dev_base_head list, and hold dev_base_lock for writing when they do the * actual updates. This allows pure readers to access the list even * while a writer is preparing to update it. * * To put it another way, dev_base_lock is held for writing only to * protect against pure readers; the rtnl semaphore provides the * protection against other writers. * * See, for example usages, register_netdevice() and * unregister_netdevice(), which must be called with the rtnl * semaphore held. */ DEFINE_RWLOCK(dev_base_lock); EXPORT_SYMBOL(dev_base_lock); static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) { unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; } static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) { return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; } static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS spin_lock(&sd->input_pkt_queue.lock); #endif } static inline void rps_unlock(struct softnet_data *sd) { #ifdef CONFIG_RPS spin_unlock(&sd->input_pkt_queue.lock); #endif } /* Device list insertion */ static int list_netdevice(struct net_device *dev) { struct net *net = dev_net(dev); ASSERT_RTNL(); write_lock_bh(&dev_base_lock); list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); hlist_add_head_rcu(&dev->index_hlist, dev_index_hash(net, dev->ifindex)); write_unlock_bh(&dev_base_lock); return 0; } /* Device list removal * caller must respect a RCU grace period before freeing/reusing dev */ static void unlist_netdevice(struct net_device *dev) { ASSERT_RTNL(); /* Unlink dev from the device chain */ write_lock_bh(&dev_base_lock); list_del_rcu(&dev->dev_list); hlist_del_rcu(&dev->name_hlist); hlist_del_rcu(&dev->index_hlist); write_unlock_bh(&dev_base_lock); } /* * Our notifier list */ static RAW_NOTIFIER_HEAD(netdev_chain); /* * Device drivers call our routines to queue packets here. We empty the * queue in the local softnet handler. */ DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); EXPORT_PER_CPU_SYMBOL(softnet_data); #ifdef CONFIG_LOCKDEP /* * register_netdevice() inits txq->_xmit_lock and sets lockdep class * according to dev->type */ static const unsigned short netdev_lock_type[] = {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; static const char *const netdev_lock_name[] = {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; static inline unsigned short netdev_lock_pos(unsigned short dev_type) { int i; for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) if (netdev_lock_type[i] == dev_type) return i; /* the last key is used by default */ return ARRAY_SIZE(netdev_lock_type) - 1; } static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, unsigned short dev_type) { int i; i = netdev_lock_pos(dev_type); lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], netdev_lock_name[i]); } static inline void netdev_set_addr_lockdep_class(struct net_device *dev) { int i; i = netdev_lock_pos(dev->type); lockdep_set_class_and_name(&dev->addr_list_lock, &netdev_addr_lock_key[i], netdev_lock_name[i]); } #else static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, unsigned short dev_type) { } static inline void netdev_set_addr_lockdep_class(struct net_device *dev) { } #endif /******************************************************************************* Protocol management and registration routines *******************************************************************************/ /* * Add a protocol ID to the list. Now that the input handler is * smarter we can dispense with all the messy stuff that used to be * here. * * BEWARE!!! Protocol handlers, mangling input packets, * MUST BE last in hash buckets and checking protocol handlers * MUST start from promiscuous ptype_all chain in net_bh. * It is true now, do not change it. * Explanation follows: if protocol handler, mangling packet, will * be the first on list, it is not able to sense, that packet * is cloned and should be copied-on-write, so that it will * change it and subsequent readers will get broken packet. * --ANK (980803) */ static inline struct list_head *ptype_head(const struct packet_type *pt) { if (pt->type == htons(ETH_P_ALL)) return &ptype_all; else return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; } /** * dev_add_pack - add packet handler * @pt: packet type declaration * * Add a protocol handler to the networking stack. The passed &packet_type * is linked into kernel lists and may not be freed until it has been * removed from the kernel lists. * * This call does not sleep therefore it can not * guarantee all CPU's that are in middle of receiving packets * will see the new packet type (until the next received packet). */ void dev_add_pack(struct packet_type *pt) { struct list_head *head = ptype_head(pt); spin_lock(&ptype_lock); list_add_rcu(&pt->list, head); spin_unlock(&ptype_lock); } EXPORT_SYMBOL(dev_add_pack); /** * __dev_remove_pack - remove packet handler * @pt: packet type declaration * * Remove a protocol handler that was previously added to the kernel * protocol handlers by dev_add_pack(). The passed &packet_type is removed * from the kernel lists and can be freed or reused once this function * returns. * * The packet type might still be in use by receivers * and must not be freed until after all the CPU's have gone * through a quiescent state. */ void __dev_remove_pack(struct packet_type *pt) { struct list_head *head = ptype_head(pt); struct packet_type *pt1; spin_lock(&ptype_lock); list_for_each_entry(pt1, head, list) { if (pt == pt1) { list_del_rcu(&pt->list); goto out; } } printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); out: spin_unlock(&ptype_lock); } EXPORT_SYMBOL(__dev_remove_pack); /** * dev_remove_pack - remove packet handler * @pt: packet type declaration * * Remove a protocol handler that was previously added to the kernel * protocol handlers by dev_add_pack(). The passed &packet_type is removed * from the kernel lists and can be freed or reused once this function * returns. * * This call sleeps to guarantee that no CPU is looking at the packet * type after return. */ void dev_remove_pack(struct packet_type *pt) { __dev_remove_pack(pt); synchronize_net(); } EXPORT_SYMBOL(dev_remove_pack); /****************************************************************************** Device Boot-time Settings Routines *******************************************************************************/ /* Boot time configuration table */ static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; /** * netdev_boot_setup_add - add new setup entry * @name: name of the device * @map: configured settings for the device * * Adds new setup entry to the dev_boot_setup list. The function * returns 0 on error and 1 on success. This is a generic routine to * all netdevices. */ static int netdev_boot_setup_add(char *name, struct ifmap *map) { struct netdev_boot_setup *s; int i; s = dev_boot_setup; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { memset(s[i].name, 0, sizeof(s[i].name)); strlcpy(s[i].name, name, IFNAMSIZ); memcpy(&s[i].map, map, sizeof(s[i].map)); break; } } return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; } /** * netdev_boot_setup_check - check boot time settings * @dev: the netdevice * * Check boot time settings for the device. * The found settings are set for the device to be used * later in the device probing. * Returns 0 if no settings found, 1 if they are. */ int netdev_boot_setup_check(struct net_device *dev) { struct netdev_boot_setup *s = dev_boot_setup; int i; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && !strcmp(dev->name, s[i].name)) { dev->irq = s[i].map.irq; dev->base_addr = s[i].map.base_addr; dev->mem_start = s[i].map.mem_start; dev->mem_end = s[i].map.mem_end; return 1; } } return 0; } EXPORT_SYMBOL(netdev_boot_setup_check); /** * netdev_boot_base - get address from boot time settings * @prefix: prefix for network device * @unit: id for network device * * Check boot time settings for the base address of device. * The found settings are set for the device to be used * later in the device probing. * Returns 0 if no settings found. */ unsigned long netdev_boot_base(const char *prefix, int unit) { const struct netdev_boot_setup *s = dev_boot_setup; char name[IFNAMSIZ]; int i; sprintf(name, "%s%d", prefix, unit); /* * If device already registered then return base of 1 * to indicate not to probe for this interface */ if (__dev_get_by_name(&init_net, name)) return 1; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) if (!strcmp(name, s[i].name)) return s[i].map.base_addr; return 0; } /* * Saves at boot time configured settings for any netdevice. */ int __init netdev_boot_setup(char *str) { int ints[5]; struct ifmap map; str = get_options(str, ARRAY_SIZE(ints), ints); if (!str || !*str) return 0; /* Save settings */ memset(&map, 0, sizeof(map)); if (ints[0] > 0) map.irq = ints[1]; if (ints[0] > 1) map.base_addr = ints[2]; if (ints[0] > 2) map.mem_start = ints[3]; if (ints[0] > 3) map.mem_end = ints[4]; /* Add new entry to the list */ return netdev_boot_setup_add(str, &map); } __setup("netdev=", netdev_boot_setup); /******************************************************************************* Device Interface Subroutines *******************************************************************************/ /** * __dev_get_by_name - find a device by its name * @net: the applicable net namespace * @name: name to find * * Find an interface by name. Must be called under RTNL semaphore * or @dev_base_lock. If the name is found a pointer to the device * is returned. If the name is not found then %NULL is returned. The * reference counters are not incremented so the caller must be * careful with locks. */ struct net_device *__dev_get_by_name(struct net *net, const char *name) { struct hlist_node *p; struct net_device *dev; struct hlist_head *head = dev_name_hash(net, name); hlist_for_each_entry(dev, p, head, name_hlist) if (!strncmp(dev->name, name, IFNAMSIZ)) return dev; return NULL; } EXPORT_SYMBOL(__dev_get_by_name); /** * dev_get_by_name_rcu - find a device by its name * @net: the applicable net namespace * @name: name to find * * Find an interface by name. * If the name is found a pointer to the device is returned. * If the name is not found then %NULL is returned. * The reference counters are not incremented so the caller must be * careful with locks. The caller must hold RCU lock. */ struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) { struct hlist_node *p; struct net_device *dev; struct hlist_head *head = dev_name_hash(net, name); hlist_for_each_entry_rcu(dev, p, head, name_hlist) if (!strncmp(dev->name, name, IFNAMSIZ)) return dev; return NULL; } EXPORT_SYMBOL(dev_get_by_name_rcu); /** * dev_get_by_name - find a device by its name * @net: the applicable net namespace * @name: name to find * * Find an interface by name. This can be called from any * context and does its own locking. The returned handle has * the usage count incremented and the caller must use dev_put() to * release it when it is no longer needed. %NULL is returned if no * matching device is found. */ struct net_device *dev_get_by_name(struct net *net, const char *name) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); if (dev) dev_hold(dev); rcu_read_unlock(); return dev; } EXPORT_SYMBOL(dev_get_by_name); /** * __dev_get_by_index - find a device by its ifindex * @net: the applicable net namespace * @ifindex: index of device * * Search for an interface by index. Returns %NULL if the device * is not found or a pointer to the device. The device has not * had its reference counter increased so the caller must be careful * about locking. The caller must hold either the RTNL semaphore * or @dev_base_lock. */ struct net_device *__dev_get_by_index(struct net *net, int ifindex) { struct hlist_node *p; struct net_device *dev; struct hlist_head *head = dev_index_hash(net, ifindex); hlist_for_each_entry(dev, p, head, index_hlist) if (dev->ifindex == ifindex) return dev; return NULL; } EXPORT_SYMBOL(__dev_get_by_index); /** * dev_get_by_index_rcu - find a device by its ifindex * @net: the applicable net namespace * @ifindex: index of device * * Search for an interface by index. Returns %NULL if the device * is not found or a pointer to the device. The device has not * had its reference counter increased so the caller must be careful * about locking. The caller must hold RCU lock. */ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) { struct hlist_node *p; struct net_device *dev; struct hlist_head *head = dev_index_hash(net, ifindex); hlist_for_each_entry_rcu(dev, p, head, index_hlist) if (dev->ifindex == ifindex) return dev; return NULL; } EXPORT_SYMBOL(dev_get_by_index_rcu); /** * dev_get_by_index - find a device by its ifindex * @net: the applicable net namespace * @ifindex: index of device * * Search for an interface by index. Returns NULL if the device * is not found or a pointer to the device. The device returned has * had a reference added and the pointer is safe until the user calls * dev_put to indicate they have finished with it. */ struct net_device *dev_get_by_index(struct net *net, int ifindex) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); if (dev) dev_hold(dev); rcu_read_unlock(); return dev; } EXPORT_SYMBOL(dev_get_by_index); /** * dev_getbyhwaddr_rcu - find a device by its hardware address * @net: the applicable net namespace * @type: media type of device * @ha: hardware address * * Search for an interface by MAC address. Returns NULL if the device * is not found or a pointer to the device. * The caller must hold RCU or RTNL. * The returned device has not had its ref count increased * and the caller must therefore be careful about locking * */ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *ha) { struct net_device *dev; for_each_netdev_rcu(net, dev) if (dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len)) return dev; return NULL; } EXPORT_SYMBOL(dev_getbyhwaddr_rcu); struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) { struct net_device *dev; ASSERT_RTNL(); for_each_netdev(net, dev) if (dev->type == type) return dev; return NULL; } EXPORT_SYMBOL(__dev_getfirstbyhwtype); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) { struct net_device *dev, *ret = NULL; rcu_read_lock(); for_each_netdev_rcu(net, dev) if (dev->type == type) { dev_hold(dev); ret = dev; break; } rcu_read_unlock(); return ret; } EXPORT_SYMBOL(dev_getfirstbyhwtype); /** * dev_get_by_flags_rcu - find any device with given flags * @net: the applicable net namespace * @if_flags: IFF_* values * @mask: bitmask of bits in if_flags to check * * Search for any interface with the given flags. Returns NULL if a device * is not found or a pointer to the device. Must be called inside * rcu_read_lock(), and result refcount is unchanged. */ struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, unsigned short mask) { struct net_device *dev, *ret; ret = NULL; for_each_netdev_rcu(net, dev) { if (((dev->flags ^ if_flags) & mask) == 0) { ret = dev; break; } } return ret; } EXPORT_SYMBOL(dev_get_by_flags_rcu); /** * dev_valid_name - check if name is okay for network device * @name: name string * * Network device names need to be valid file names to * to allow sysfs to work. We also disallow any kind of * whitespace. */ int dev_valid_name(const char *name) { if (*name == '\0') return 0; if (strlen(name) >= IFNAMSIZ) return 0; if (!strcmp(name, ".") || !strcmp(name, "..")) return 0; while (*name) { if (*name == '/' || isspace(*name)) return 0; name++; } return 1; } EXPORT_SYMBOL(dev_valid_name); /** * __dev_alloc_name - allocate a name for a device * @net: network namespace to allocate the device name in * @name: name format string * @buf: scratch buffer and result name string * * Passed a format string - eg "lt%d" it will try and find a suitable * id. It scans list of devices to build up a free map, then chooses * the first empty slot. The caller must hold the dev_base or rtnl lock * while allocating the name and adding the device in order to avoid * duplicates. * Limited to bits_per_byte * page size devices (ie 32K on most platforms). * Returns the number of the unit assigned or a negative errno code. */ static int __dev_alloc_name(struct net *net, const char *name, char *buf) { int i = 0; const char *p; const int max_netdevices = 8*PAGE_SIZE; unsigned long *inuse; struct net_device *d; p = strnchr(name, IFNAMSIZ-1, '%'); if (p) { /* * Verify the string as this thing may have come from * the user. There must be either one "%d" and no other "%" * characters. */ if (p[1] != 'd' || strchr(p + 2, '%')) return -EINVAL; /* Use one page as a bit array of possible slots */ inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); if (!inuse) return -ENOMEM; for_each_netdev(net, d) { if (!sscanf(d->name, name, &i)) continue; if (i < 0 || i >= max_netdevices) continue; /* avoid cases where sscanf is not exact inverse of printf */ snprintf(buf, IFNAMSIZ, name, i); if (!strncmp(buf, d->name, IFNAMSIZ)) set_bit(i, inuse); } i = find_first_zero_bit(inuse, max_netdevices); free_page((unsigned long) inuse); } if (buf != name) snprintf(buf, IFNAMSIZ, name, i); if (!__dev_get_by_name(net, buf)) return i; /* It is possible to run out of possible slots * when the name is long and there isn't enough space left * for the digits, or if all bits are used. */ return -ENFILE; } /** * dev_alloc_name - allocate a name for a device * @dev: device * @name: name format string * * Passed a format string - eg "lt%d" it will try and find a suitable * id. It scans list of devices to build up a free map, then chooses * the first empty slot. The caller must hold the dev_base or rtnl lock * while allocating the name and adding the device in order to avoid * duplicates. * Limited to bits_per_byte * page size devices (ie 32K on most platforms). * Returns the number of the unit assigned or a negative errno code. */ int dev_alloc_name(struct net_device *dev, const char *name) { char buf[IFNAMSIZ]; struct net *net; int ret; BUG_ON(!dev_net(dev)); net = dev_net(dev); ret = __dev_alloc_name(net, name, buf); if (ret >= 0) strlcpy(dev->name, buf, IFNAMSIZ); return ret; } EXPORT_SYMBOL(dev_alloc_name); static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) { struct net *net; BUG_ON(!dev_net(dev)); net = dev_net(dev); if (!dev_valid_name(name)) return -EINVAL; if (fmt && strchr(name, '%')) return dev_alloc_name(dev, name); else if (__dev_get_by_name(net, name)) return -EEXIST; else if (dev->name != name) strlcpy(dev->name, name, IFNAMSIZ); return 0; } /** * dev_change_name - change name of a device * @dev: device * @newname: name (or format string) must be at least IFNAMSIZ * * Change name of a device, can pass format strings "eth%d". * for wildcarding. */ int dev_change_name(struct net_device *dev, const char *newname) { char oldname[IFNAMSIZ]; int err = 0; int ret; struct net *net; ASSERT_RTNL(); BUG_ON(!dev_net(dev)); net = dev_net(dev); if (dev->flags & IFF_UP) return -EBUSY; if (strncmp(newname, dev->name, IFNAMSIZ) == 0) return 0; memcpy(oldname, dev->name, IFNAMSIZ); err = dev_get_valid_name(dev, newname, 1); if (err < 0) return err; rollback: ret = device_rename(&dev->dev, dev->name); if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); return ret; } write_lock_bh(&dev_base_lock); hlist_del(&dev->name_hlist); write_unlock_bh(&dev_base_lock); synchronize_rcu(); write_lock_bh(&dev_base_lock); hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); write_unlock_bh(&dev_base_lock); ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); ret = notifier_to_errno(ret); if (ret) { /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; memcpy(dev->name, oldname, IFNAMSIZ); goto rollback; } else { printk(KERN_ERR "%s: name change rollback failed: %d.\n", dev->name, ret); } } return err; } /** * dev_set_alias - change ifalias of a device * @dev: device * @alias: name up to IFALIASZ * @len: limit of bytes to copy from info * * Set ifalias for a device, */ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) { ASSERT_RTNL(); if (len >= IFALIASZ) return -EINVAL; if (!len) { if (dev->ifalias) { kfree(dev->ifalias); dev->ifalias = NULL; } return 0; } dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); if (!dev->ifalias) return -ENOMEM; strlcpy(dev->ifalias, alias, len+1); return len; } /** * netdev_features_change - device changes features * @dev: device to cause notification * * Called to indicate a device has changed features. */ void netdev_features_change(struct net_device *dev) { call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); } EXPORT_SYMBOL(netdev_features_change); /** * netdev_state_change - device changes state * @dev: device to cause notification * * Called to indicate a device has changed state. This function calls * the notifier chains for netdev_chain and sends a NEWLINK message * to the routing socket. */ void netdev_state_change(struct net_device *dev) { if (dev->flags & IFF_UP) { call_netdevice_notifiers(NETDEV_CHANGE, dev); rtmsg_ifinfo(RTM_NEWLINK, dev, 0); } } EXPORT_SYMBOL(netdev_state_change); int netdev_bonding_change(struct net_device *dev, unsigned long event) { return call_netdevice_notifiers(event, dev); } EXPORT_SYMBOL(netdev_bonding_change); /** * dev_load - load a network module * @net: the applicable net namespace * @name: name of interface * * If a network interface is not present and the process has suitable * privileges this function loads the module. If module loading is not * available in this kernel then it becomes a nop. */ void dev_load(struct net *net, const char *name) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); rcu_read_unlock(); if (!dev && capable(CAP_NET_ADMIN)) request_module("%s", name); } EXPORT_SYMBOL(dev_load); static int __dev_open(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; int ret; ASSERT_RTNL(); /* * Is it even present? */ if (!netif_device_present(dev)) return -ENODEV; ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); ret = notifier_to_errno(ret); if (ret) return ret; /* * Call device private open method */ set_bit(__LINK_STATE_START, &dev->state); if (ops->ndo_validate_addr) ret = ops->ndo_validate_addr(dev); if (!ret && ops->ndo_open) ret = ops->ndo_open(dev); /* * If it went open OK then: */ if (ret) clear_bit(__LINK_STATE_START, &dev->state); else { /* * Set the flags. */ dev->flags |= IFF_UP; /* * Enable NET_DMA */ net_dmaengine_get(); /* * Initialize multicasting status */ dev_set_rx_mode(dev); /* * Wakeup transmit queue engine */ dev_activate(dev); } return ret; } /** * dev_open - prepare an interface for use. * @dev: device to open * * Takes a device from down to up state. The device's private open * function is invoked and then the multicast lists are loaded. Finally * the device is moved into the up state and a %NETDEV_UP message is * sent to the netdev notifier chain. * * Calling this function on an active interface is a nop. On a failure * a negative errno code is returned. */ int dev_open(struct net_device *dev) { int ret; /* * Is it already up? */ if (dev->flags & IFF_UP) return 0; /* * Open device */ ret = __dev_open(dev); if (ret < 0) return ret; /* * ... and announce new interface. */ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); call_netdevice_notifiers(NETDEV_UP, dev); return ret; } EXPORT_SYMBOL(dev_open); static int __dev_close_many(struct list_head *head) { struct net_device *dev; ASSERT_RTNL(); might_sleep(); list_for_each_entry(dev, head, unreg_list) { /* * Tell people we are going down, so that they can * prepare to death, when device is still operating. */ call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); clear_bit(__LINK_STATE_START, &dev->state); /* Synchronize to scheduled poll. We cannot touch poll list, it * can be even on different cpu. So just clear netif_running(). * * dev->stop() will invoke napi_disable() on all of it's * napi_struct instances on this device. */ smp_mb__after_clear_bit(); /* Commit netif_running(). */ } dev_deactivate_many(head); list_for_each_entry(dev, head, unreg_list) { const struct net_device_ops *ops = dev->netdev_ops; /* * Call the device specific close. This cannot fail. * Only if device is UP * * We allow it to be called even after a DETACH hot-plug * event. */ if (ops->ndo_stop) ops->ndo_stop(dev); /* * Device is now down. */ dev->flags &= ~IFF_UP; /* * Shutdown NET_DMA */ net_dmaengine_put(); } return 0; } static int __dev_close(struct net_device *dev) { int retval; LIST_HEAD(single); list_add(&dev->unreg_list, &single); retval = __dev_close_many(&single); list_del(&single); return retval; } int dev_close_many(struct list_head *head) { struct net_device *dev, *tmp; LIST_HEAD(tmp_list); list_for_each_entry_safe(dev, tmp, head, unreg_list) if (!(dev->flags & IFF_UP)) list_move(&dev->unreg_list, &tmp_list); __dev_close_many(head); /* * Tell people we are down */ list_for_each_entry(dev, head, unreg_list) { rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); call_netdevice_notifiers(NETDEV_DOWN, dev); } /* rollback_registered_many needs the complete original list */ list_splice(&tmp_list, head); return 0; } /** * dev_close - shutdown an interface. * @dev: device to shutdown * * This function moves an active device into down state. A * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier * chain. */ int dev_close(struct net_device *dev) { LIST_HEAD(single); list_add(&dev->unreg_list, &single); dev_close_many(&single); list_del(&single); return 0; } EXPORT_SYMBOL(dev_close); /** * dev_disable_lro - disable Large Receive Offload on a device * @dev: device * * Disable Large Receive Offload (LRO) on a net device. Must be * called under RTNL. This is needed if received packets may be * forwarded to another interface. */ void dev_disable_lro(struct net_device *dev) { if (dev->ethtool_ops && dev->ethtool_ops->get_flags && dev->ethtool_ops->set_flags) { u32 flags = dev->ethtool_ops->get_flags(dev); if (flags & ETH_FLAG_LRO) { flags &= ~ETH_FLAG_LRO; dev->ethtool_ops->set_flags(dev, flags); } } WARN_ON(dev->features & NETIF_F_LRO); } EXPORT_SYMBOL(dev_disable_lro); static int dev_boot_phase = 1; /* * Device change register/unregister. These are not inline or static * as we export them to the world. */ /** * register_netdevice_notifier - register a network notifier block * @nb: notifier * * Register a notifier to be called when network device events occur. * The notifier passed is linked into the kernel structures and must * not be reused until it has been unregistered. A negative errno code * is returned on a failure. * * When registered all registration and up events are replayed * to the new notifier to allow device to have a race free * view of the network device list. */ int register_netdevice_notifier(struct notifier_block *nb) { struct net_device *dev; struct net_device *last; struct net *net; int err; rtnl_lock(); err = raw_notifier_chain_register(&netdev_chain, nb); if (err) goto unlock; if (dev_boot_phase) goto unlock; for_each_net(net) { for_each_netdev(net, dev) { err = nb->notifier_call(nb, NETDEV_REGISTER, dev); err = notifier_to_errno(err); if (err) goto rollback; if (!(dev->flags & IFF_UP)) continue; nb->notifier_call(nb, NETDEV_UP, dev); } } unlock: rtnl_unlock(); return err; rollback: last = dev; for_each_net(net) { for_each_netdev(net, dev) { if (dev == last) break; if (dev->flags & IFF_UP) { nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); nb->notifier_call(nb, NETDEV_DOWN, dev); } nb->notifier_call(nb, NETDEV_UNREGISTER, dev); nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); } } raw_notifier_chain_unregister(&netdev_chain, nb); goto unlock; } EXPORT_SYMBOL(register_netdevice_notifier); /** * unregister_netdevice_notifier - unregister a network notifier block * @nb: notifier * * Unregister a notifier previously registered by * register_netdevice_notifier(). The notifier is unlinked into the * kernel structures and may then be reused. A negative errno code * is returned on a failure. */ int unregister_netdevice_notifier(struct notifier_block *nb) { int err; rtnl_lock(); err = raw_notifier_chain_unregister(&netdev_chain, nb); rtnl_unlock(); return err; } EXPORT_SYMBOL(unregister_netdevice_notifier); /** * call_netdevice_notifiers - call all network notifier blocks * @val: value passed unmodified to notifier function * @dev: net_device pointer passed unmodified to notifier function * * Call all network notifier blocks. Parameters and return value * are as for raw_notifier_call_chain(). */ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) { ASSERT_RTNL(); return raw_notifier_call_chain(&netdev_chain, val, dev); } /* When > 0 there are consumers of rx skb time stamps */ static atomic_t netstamp_needed = ATOMIC_INIT(0); void net_enable_timestamp(void) { atomic_inc(&netstamp_needed); } EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { atomic_dec(&netstamp_needed); } EXPORT_SYMBOL(net_disable_timestamp); static inline void net_timestamp_set(struct sk_buff *skb) { if (atomic_read(&netstamp_needed)) __net_timestamp(skb); else skb->tstamp.tv64 = 0; } static inline void net_timestamp_check(struct sk_buff *skb) { if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed)) __net_timestamp(skb); } /** * dev_forward_skb - loopback an skb to another netif * * @dev: destination network device * @skb: buffer to forward * * return values: * NET_RX_SUCCESS (no congestion) * NET_RX_DROP (packet was dropped, but freed) * * dev_forward_skb can be used for injecting an skb from the * start_xmit function of one device into the receive queue * of another device. * * The receiving device may be in another namespace, so * we have to clear all information in the skb that could * impact namespace isolation. */ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { skb_orphan(skb); nf_reset(skb); if (unlikely(!(dev->flags & IFF_UP) || (skb->len > (dev->mtu + dev->hard_header_len + VLAN_HLEN)))) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } skb_set_dev(skb, dev); skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, dev); return netif_rx(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb); static inline int deliver_skb(struct sk_buff *skb, struct packet_type *pt_prev, struct net_device *orig_dev) { atomic_inc(&skb->users); return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } /* * Support routine. Sends outgoing frames to any network * taps currently in use. */ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) { struct packet_type *ptype; struct sk_buff *skb2 = NULL; struct packet_type *pt_prev = NULL; rcu_read_lock(); list_for_each_entry_rcu(ptype, &ptype_all, list) { /* Never send packets back to the socket * they originated from - MvS (miquels@drinkel.ow.org) */ if ((ptype->dev == dev || !ptype->dev) && (ptype->af_packet_priv == NULL || (struct sock *)ptype->af_packet_priv != skb->sk)) { if (pt_prev) { deliver_skb(skb2, pt_prev, skb->dev); pt_prev = ptype; continue; } skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) break; net_timestamp_set(skb2); /* skb->nh should be correctly set by sender, so that the second statement is just protection against buggy protocols. */ skb_reset_mac_header(skb2); if (skb_network_header(skb2) < skb2->data || skb2->network_header > skb2->tail) { if (net_ratelimit()) printk(KERN_CRIT "protocol %04x is " "buggy, dev %s\n", ntohs(skb2->protocol), dev->name); skb_reset_network_header(skb2); } skb2->transport_header = skb2->network_header; skb2->pkt_type = PACKET_OUTGOING; pt_prev = ptype; } } if (pt_prev) pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); rcu_read_unlock(); } /* * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. */ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) { int rc; if (txq < 1 || txq > dev->num_tx_queues) return -EINVAL; if (dev->reg_state == NETREG_REGISTERED) { ASSERT_RTNL(); rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, txq); if (rc) return rc; if (txq < dev->real_num_tx_queues) qdisc_reset_all_tx_gt(dev, txq); } dev->real_num_tx_queues = txq; return 0; } EXPORT_SYMBOL(netif_set_real_num_tx_queues); #ifdef CONFIG_RPS /** * netif_set_real_num_rx_queues - set actual number of RX queues used * @dev: Network device * @rxq: Actual number of RX queues * * This must be called either with the rtnl_lock held or before * registration of the net device. Returns 0 on success, or a * negative error code. If called before registration, it always * succeeds. */ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) { int rc; if (rxq < 1 || rxq > dev->num_rx_queues) return -EINVAL; if (dev->reg_state == NETREG_REGISTERED) { ASSERT_RTNL(); rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, rxq); if (rc) return rc; } dev->real_num_rx_queues = rxq; return 0; } EXPORT_SYMBOL(netif_set_real_num_rx_queues); #endif static inline void __netif_reschedule(struct Qdisc *q) { struct softnet_data *sd; unsigned long flags; local_irq_save(flags); sd = &__get_cpu_var(softnet_data); q->next_sched = NULL; *sd->output_queue_tailp = q; sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); } void __netif_schedule(struct Qdisc *q) { if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) __netif_reschedule(q); } EXPORT_SYMBOL(__netif_schedule); void dev_kfree_skb_irq(struct sk_buff *skb) { if (atomic_dec_and_test(&skb->users)) { struct softnet_data *sd; unsigned long flags; local_irq_save(flags); sd = &__get_cpu_var(softnet_data); skb->next = sd->completion_queue; sd->completion_queue = skb; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); } } EXPORT_SYMBOL(dev_kfree_skb_irq); void dev_kfree_skb_any(struct sk_buff *skb) { if (in_irq() || irqs_disabled()) dev_kfree_skb_irq(skb); else dev_kfree_skb(skb); } EXPORT_SYMBOL(dev_kfree_skb_any); /** * netif_device_detach - mark device as removed * @dev: network device * * Mark device as removed from system and therefore no longer available. */ void netif_device_detach(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && netif_running(dev)) { netif_tx_stop_all_queues(dev); } } EXPORT_SYMBOL(netif_device_detach); /** * netif_device_attach - mark device as attached * @dev: network device * * Mark device as attached from system and restart if needed. */ void netif_device_attach(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && netif_running(dev)) { netif_tx_wake_all_queues(dev); __netdev_watchdog_up(dev); } } EXPORT_SYMBOL(netif_device_attach); /** * skb_dev_set -- assign a new device to a buffer * @skb: buffer for the new device * @dev: network device * * If an skb is owned by a device already, we have to reset * all data private to the namespace a device belongs to * before assigning it a new device. */ #ifdef CONFIG_NET_NS void skb_set_dev(struct sk_buff *skb, struct net_device *dev) { skb_dst_drop(skb); if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { secpath_reset(skb); nf_reset(skb); skb_init_secmark(skb); skb->mark = 0; skb->priority = 0; skb->nf_trace = 0; skb->ipvs_property = 0; #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #endif } skb->dev = dev; } EXPORT_SYMBOL(skb_set_dev); #endif /* CONFIG_NET_NS */ /* * Invalidate hardware checksum when packet is to be mangled, and * complete checksum manually on outgoing path. */ int skb_checksum_help(struct sk_buff *skb) { __wsum csum; int ret = 0, offset; if (skb->ip_summed == CHECKSUM_COMPLETE) goto out_set_summed; if (unlikely(skb_shinfo(skb)->gso_size)) { /* Let GSO fix up the checksum. */ goto out_set_summed; } offset = skb_checksum_start_offset(skb); BUG_ON(offset >= skb_headlen(skb)); csum = skb_checksum(skb, offset, skb->len - offset, 0); offset += skb->csum_offset; BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(__sum16))) { ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (ret) goto out; } *(__sum16 *)(skb->data + offset) = csum_fold(csum); out_set_summed: skb->ip_summed = CHECKSUM_NONE; out: return ret; } EXPORT_SYMBOL(skb_checksum_help); /** * skb_gso_segment - Perform segmentation on skb. * @skb: buffer to segment * @features: features for the output path (see dev->features) * * This function segments the given skb and returns a list of segments. * * It may return NULL if the skb requires no segmentation. This is * only possible when GSO is used for verifying header integrity. */ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_type *ptype; __be16 type = skb->protocol; int vlan_depth = ETH_HLEN; int err; while (type == htons(ETH_P_8021Q)) { struct vlan_hdr *vh; if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) return ERR_PTR(-EINVAL); vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } skb_reset_mac_header(skb); skb->mac_len = skb->network_header - skb->mac_header; __skb_pull(skb, skb->mac_len); if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { struct net_device *dev = skb->dev; struct ethtool_drvinfo info = {}; if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) dev->ethtool_ops->get_drvinfo(dev, &info); WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n", info.driver, dev ? dev->features : 0L, skb->sk ? skb->sk->sk_route_caps : 0L, skb->len, skb->data_len, skb->ip_summed); if (skb_header_cloned(skb) && (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) return ERR_PTR(err); } rcu_read_lock(); list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { if (ptype->type == type && !ptype->dev && ptype->gso_segment) { if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { err = ptype->gso_send_check(skb); segs = ERR_PTR(err); if (err || skb_gso_ok(skb, features)) break; __skb_push(skb, (skb->data - skb_network_header(skb))); } segs = ptype->gso_segment(skb, features); break; } } rcu_read_unlock(); __skb_push(skb, skb->data - skb_mac_header(skb)); return segs; } EXPORT_SYMBOL(skb_gso_segment); /* Take action when hardware reception checksum errors are detected. */ #ifdef CONFIG_BUG void netdev_rx_csum_fault(struct net_device *dev) { if (net_ratelimit()) { printk(KERN_ERR "%s: hw csum failure.\n", dev ? dev->name : "<unknown>"); dump_stack(); } } EXPORT_SYMBOL(netdev_rx_csum_fault); #endif /* Actually, we should eliminate this check as soon as we know, that: * 1. IOMMU is present and allows to map all the memory. * 2. No high memory really exists on this machine. */ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_HIGHMEM int i; if (!(dev->features & NETIF_F_HIGHDMA)) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) if (PageHighMem(skb_shinfo(skb)->frags[i].page)) return 1; } if (PCI_DMA_BUS_IS_PHYS) { struct device *pdev = dev->dev.parent; if (!pdev) return 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) return 1; } } #endif return 0; } struct dev_gso_cb { void (*destructor)(struct sk_buff *skb); }; #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) static void dev_gso_skb_destructor(struct sk_buff *skb) { struct dev_gso_cb *cb; do { struct sk_buff *nskb = skb->next; skb->next = nskb->next; nskb->next = NULL; kfree_skb(nskb); } while (skb->next); cb = DEV_GSO_CB(skb); if (cb->destructor) cb->destructor(skb); } /** * dev_gso_segment - Perform emulated hardware segmentation on skb. * @skb: buffer to segment * @features: device features as applicable to this skb * * This function segments the given skb and stores the list of segments * in skb->next. */ static int dev_gso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs; segs = skb_gso_segment(skb, features); /* Verifying header integrity only. */ if (!segs) return 0; if (IS_ERR(segs)) return PTR_ERR(segs); skb->next = segs; DEV_GSO_CB(skb)->destructor = skb->destructor; skb->destructor = dev_gso_skb_destructor; return 0; } /* * Try to orphan skb early, right before transmission by the device. * We cannot orphan skb if tx timestamp is requested or the sk-reference * is needed on driver level for other reasons, e.g. see net/can/raw.c */ static inline void skb_orphan_try(struct sk_buff *skb) { struct sock *sk = skb->sk; if (sk && !skb_shinfo(skb)->tx_flags) { /* skb_tx_hash() wont be able to get sk. * We copy sk_hash into skb->rxhash */ if (!skb->rxhash) skb->rxhash = sk->sk_hash; skb_orphan(skb); } } static bool can_checksum_protocol(unsigned long features, __be16 protocol) { return ((features & NETIF_F_GEN_CSUM) || ((features & NETIF_F_V4_CSUM) && protocol == htons(ETH_P_IP)) || ((features & NETIF_F_V6_CSUM) && protocol == htons(ETH_P_IPV6)) || ((features & NETIF_F_FCOE_CRC) && protocol == htons(ETH_P_FCOE))); } static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features) { if (!can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; features &= ~NETIF_F_SG; } else if (illegal_highdma(skb->dev, skb)) { features &= ~NETIF_F_SG; } return features; } int netif_skb_features(struct sk_buff *skb) { __be16 protocol = skb->protocol; int features = skb->dev->features; if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; } else if (!vlan_tx_tag_present(skb)) { return harmonize_features(skb, protocol, features); } features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); if (protocol != htons(ETH_P_8021Q)) { return harmonize_features(skb, protocol, features); } else { features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; return harmonize_features(skb, protocol, features); } } EXPORT_SYMBOL(netif_skb_features); /* * Returns true if either: * 1. skb has frag_list and the device doesn't support FRAGLIST, or * 2. skb is fragmented and the device does not support SG, or if * at least one of fragments is in highmem and device does not * support DMA from it. */ static inline int skb_needs_linearize(struct sk_buff *skb, int features) { return skb_is_nonlinear(skb) && ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); } int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) { const struct net_device_ops *ops = dev->netdev_ops; int rc = NETDEV_TX_OK; if (likely(!skb->next)) { int features; /* * If device doesnt need skb->dst, release it right now while * its hot in this cpu cache */ if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(skb); if (!list_empty(&ptype_all)) dev_queue_xmit_nit(skb, dev); skb_orphan_try(skb); features = netif_skb_features(skb); if (vlan_tx_tag_present(skb) && !(features & NETIF_F_HW_VLAN_TX)) { skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (unlikely(!skb)) goto out; skb->vlan_tci = 0; } if (netif_needs_gso(skb, features)) { if (unlikely(dev_gso_segment(skb, features))) goto out_kfree_skb; if (skb->next) goto gso; } else { if (skb_needs_linearize(skb, features) && __skb_linearize(skb)) goto out_kfree_skb; /* If packet is not checksummed and device does not * support checksumming for this protocol, complete * checksumming here. */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_set_transport_header(skb, skb_checksum_start_offset(skb)); if (!(features & NETIF_F_ALL_CSUM) && skb_checksum_help(skb)) goto out_kfree_skb; } } rc = ops->ndo_start_xmit(skb, dev); trace_net_dev_xmit(skb, rc); if (rc == NETDEV_TX_OK) txq_trans_update(txq); return rc; } gso: do { struct sk_buff *nskb = skb->next; skb->next = nskb->next; nskb->next = NULL; /* * If device doesnt need nskb->dst, release it right now while * its hot in this cpu cache */ if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(nskb); rc = ops->ndo_start_xmit(nskb, dev); trace_net_dev_xmit(nskb, rc); if (unlikely(rc != NETDEV_TX_OK)) { if (rc & ~NETDEV_TX_MASK) goto out_kfree_gso_skb; nskb->next = skb->next; skb->next = nskb; return rc; } txq_trans_update(txq); if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) return NETDEV_TX_BUSY; } while (skb->next); out_kfree_gso_skb: if (likely(skb->next == NULL)) skb->destructor = DEV_GSO_CB(skb)->destructor; out_kfree_skb: kfree_skb(skb); out: return rc; } static u32 hashrnd __read_mostly; /* * Returns a Tx hash based on the given packet descriptor a Tx queues' number * to be used as a distribution range. */ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, unsigned int num_tx_queues) { u32 hash; if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); while (unlikely(hash >= num_tx_queues)) hash -= num_tx_queues; return hash; } if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; else hash = (__force u16) skb->protocol ^ skb->rxhash; hash = jhash_1word(hash, hashrnd); return (u16) (((u64) hash * num_tx_queues) >> 32); } EXPORT_SYMBOL(__skb_tx_hash); static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) { if (unlikely(queue_index >= dev->real_num_tx_queues)) { if (net_ratelimit()) { pr_warning("%s selects TX queue %d, but " "real number of TX queues is %d\n", dev->name, queue_index, dev->real_num_tx_queues); } return 0; } return queue_index; } static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_XPS struct xps_dev_maps *dev_maps; struct xps_map *map; int queue_index = -1; rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_maps); if (dev_maps) { map = rcu_dereference( dev_maps->cpu_map[raw_smp_processor_id()]); if (map) { if (map->len == 1) queue_index = map->queues[0]; else { u32 hash; if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; else hash = (__force u16) skb->protocol ^ skb->rxhash; hash = jhash_1word(hash, hashrnd); queue_index = map->queues[ ((u64)hash * map->len) >> 32]; } if (unlikely(queue_index >= dev->real_num_tx_queues)) queue_index = -1; } } rcu_read_unlock(); return queue_index; #else return -1; #endif } static struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb) { int queue_index; const struct net_device_ops *ops = dev->netdev_ops; if (dev->real_num_tx_queues == 1) queue_index = 0; else if (ops->ndo_select_queue) { queue_index = ops->ndo_select_queue(dev, skb); queue_index = dev_cap_txqueue(dev, queue_index); } else { struct sock *sk = skb->sk; queue_index = sk_tx_queue_get(sk); if (queue_index < 0 || skb->ooo_okay || queue_index >= dev->real_num_tx_queues) { int old_index = queue_index; queue_index = get_xps_queue(dev, skb); if (queue_index < 0) queue_index = skb_tx_hash(dev, skb); if (queue_index != old_index && sk) { struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1); if (dst && skb_dst(skb) == dst) sk_tx_queue_set(sk, queue_index); } } } skb_set_queue_mapping(skb, queue_index); return netdev_get_tx_queue(dev, queue_index); } static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq) { spinlock_t *root_lock = qdisc_lock(q); bool contended = qdisc_is_running(q); int rc; /* * Heuristic to force contended enqueues to serialize on a * separate lock before trying to get qdisc main lock. * This permits __QDISC_STATE_RUNNING owner to get the lock more often * and dequeue packets faster. */ if (unlikely(contended)) spin_lock(&q->busylock); spin_lock(root_lock); if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { kfree_skb(skb); rc = NET_XMIT_DROP; } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && qdisc_run_begin(q)) { /* * This is a work-conserving queue; there are no old skbs * waiting to be sent out; and the qdisc is not running - * xmit the skb directly. */ if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) skb_dst_force(skb); qdisc_skb_cb(skb)->pkt_len = skb->len; qdisc_bstats_update(q, skb); if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { if (unlikely(contended)) { spin_unlock(&q->busylock); contended = false; } __qdisc_run(q); } else qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { skb_dst_force(skb); rc = qdisc_enqueue_root(skb, q); if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); contended = false; } __qdisc_run(q); } } spin_unlock(root_lock); if (unlikely(contended)) spin_unlock(&q->busylock); return rc; } static DEFINE_PER_CPU(int, xmit_recursion); #define RECURSION_LIMIT 10 /** * dev_queue_xmit - transmit a buffer * @skb: buffer to transmit * * Queue a buffer for transmission to a network device. The caller must * have set the device and priority and built the buffer before calling * this function. The function can be called from an interrupt. * * A negative errno code is returned on a failure. A success does not * guarantee the frame will be transmitted as it may be dropped due * to congestion or traffic shaping. * * ----------------------------------------------------------------------------------- * I notice this method can also return errors from the queue disciplines, * including NET_XMIT_DROP, which is a positive value. So, errors can also * be positive. * * Regardless of the return value, the skb is consumed, so it is currently * difficult to retry a send to this method. (You can bump the ref count * before sending to hold a reference for retry if you are careful.) * * When calling this method, interrupts MUST be enabled. This is because * the BH enable code must have IRQs enabled so that it will not deadlock. * --BLG */ int dev_queue_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct netdev_queue *txq; struct Qdisc *q; int rc = -ENOMEM; /* Disable soft irqs for various locks below. Also * stops preemption for RCU. */ rcu_read_lock_bh(); txq = dev_pick_tx(dev, skb); q = rcu_dereference_bh(txq->qdisc); #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); #endif trace_net_dev_queue(skb); if (q->enqueue) { rc = __dev_xmit_skb(skb, q, dev, txq); goto out; } /* The device has no queue. Common case for software devices: loopback, all the sorts of tunnels... Really, it is unlikely that netif_tx_lock protection is necessary here. (f.e. loopback and IP tunnels are clean ignoring statistics counters.) However, it is possible, that they rely on protection made by us here. Check this and shot the lock. It is not prone from deadlocks. Either shot noqueue qdisc, it is even simpler 8) */ if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ if (txq->xmit_lock_owner != cpu) { if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) goto recursion_alert; HARD_TX_LOCK(dev, txq, cpu); if (!netif_tx_queue_stopped(txq)) { __this_cpu_inc(xmit_recursion); rc = dev_hard_start_xmit(skb, dev, txq); __this_cpu_dec(xmit_recursion); if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); goto out; } } HARD_TX_UNLOCK(dev, txq); if (net_ratelimit()) printk(KERN_CRIT "Virtual device %s asks to " "queue packet!\n", dev->name); } else { /* Recursion is detected! It is possible, * unfortunately */ recursion_alert: if (net_ratelimit()) printk(KERN_CRIT "Dead loop on virtual device " "%s, fix it urgently!\n", dev->name); } } rc = -ENETDOWN; rcu_read_unlock_bh(); kfree_skb(skb); return rc; out: rcu_read_unlock_bh(); return rc; } EXPORT_SYMBOL(dev_queue_xmit); /*======================================================================= Receiver routines =======================================================================*/ int netdev_max_backlog __read_mostly = 1000; int netdev_tstamp_prequeue __read_mostly = 1; int netdev_budget __read_mostly = 300; int weight_p __read_mostly = 64; /* old backlog weight */ /* Called with irq disabled */ static inline void ____napi_schedule(struct softnet_data *sd, struct napi_struct *napi) { list_add_tail(&napi->poll_list, &sd->poll_list); __raise_softirq_irqoff(NET_RX_SOFTIRQ); } /* * __skb_get_rxhash: calculate a flow hash based on src/dst addresses * and src/dst port numbers. Returns a non-zero hash number on success * and 0 on failure. */ __u32 __skb_get_rxhash(struct sk_buff *skb) { int nhoff, hash = 0, poff; struct ipv6hdr *ip6; struct iphdr *ip; u8 ip_proto; u32 addr1, addr2, ihl; union { u32 v32; u16 v16[2]; } ports; nhoff = skb_network_offset(skb); switch (skb->protocol) { case __constant_htons(ETH_P_IP): if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) goto done; ip = (struct iphdr *) (skb->data + nhoff); if (ip->frag_off & htons(IP_MF | IP_OFFSET)) ip_proto = 0; else ip_proto = ip->protocol; addr1 = (__force u32) ip->saddr; addr2 = (__force u32) ip->daddr; ihl = ip->ihl; break; case __constant_htons(ETH_P_IPV6): if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) goto done; ip6 = (struct ipv6hdr *) (skb->data + nhoff); ip_proto = ip6->nexthdr; addr1 = (__force u32) ip6->saddr.s6_addr32[3]; addr2 = (__force u32) ip6->daddr.s6_addr32[3]; ihl = (40 >> 2); break; default: goto done; } ports.v32 = 0; poff = proto_ports_offset(ip_proto); if (poff >= 0) { nhoff += ihl * 4 + poff; if (pskb_may_pull(skb, nhoff + 4)) { ports.v32 = * (__force u32 *) (skb->data + nhoff); if (ports.v16[1] < ports.v16[0]) swap(ports.v16[0], ports.v16[1]); } } /* get a consistent hash (same value on both flow directions) */ if (addr2 < addr1) swap(addr1, addr2); hash = jhash_3words(addr1, addr2, ports.v32, hashrnd); if (!hash) hash = 1; done: return hash; } EXPORT_SYMBOL(__skb_get_rxhash); #ifdef CONFIG_RPS /* One global table that all flow-based protocols share. */ struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; EXPORT_SYMBOL(rps_sock_flow_table); /* * get_rps_cpu is called from netif_receive_skb and returns the target * CPU from the RPS map of the receiving queue for a given skb. * rcu_read_lock must be held on entry. */ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow **rflowp) { struct netdev_rx_queue *rxqueue; struct rps_map *map; struct rps_dev_flow_table *flow_table; struct rps_sock_flow_table *sock_flow_table; int cpu = -1; u16 tcpu; if (skb_rx_queue_recorded(skb)) { u16 index = skb_get_rx_queue(skb); if (unlikely(index >= dev->real_num_rx_queues)) { WARN_ONCE(dev->real_num_rx_queues > 1, "%s received packet on queue %u, but number " "of RX queues is %u\n", dev->name, index, dev->real_num_rx_queues); goto done; } rxqueue = dev->_rx + index; } else rxqueue = dev->_rx; map = rcu_dereference(rxqueue->rps_map); if (map) { if (map->len == 1 && !rcu_dereference_raw(rxqueue->rps_flow_table)) { tcpu = map->cpus[0]; if (cpu_online(tcpu)) cpu = tcpu; goto done; } } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) { goto done; } skb_reset_network_header(skb); if (!skb_get_rxhash(skb)) goto done; flow_table = rcu_dereference(rxqueue->rps_flow_table); sock_flow_table = rcu_dereference(rps_sock_flow_table); if (flow_table && sock_flow_table) { u16 next_cpu; struct rps_dev_flow *rflow; rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; tcpu = rflow->cpu; next_cpu = sock_flow_table->ents[skb->rxhash & sock_flow_table->mask]; /* * If the desired CPU (where last recvmsg was done) is * different from current CPU (one in the rx-queue flow * table entry), switch if one of the following holds: * - Current CPU is unset (equal to RPS_NO_CPU). * - Current CPU is offline. * - The current CPU's queue tail has advanced beyond the * last packet that was enqueued using this table entry. * This guarantees that all previous packets for the flow * have been dequeued, thus preserving in order delivery. */ if (unlikely(tcpu != next_cpu) && (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || ((int)(per_cpu(softnet_data, tcpu).input_queue_head - rflow->last_qtail)) >= 0)) { tcpu = rflow->cpu = next_cpu; if (tcpu != RPS_NO_CPU) rflow->last_qtail = per_cpu(softnet_data, tcpu).input_queue_head; } if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { *rflowp = rflow; cpu = tcpu; goto done; } } if (map) { tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; if (cpu_online(tcpu)) { cpu = tcpu; goto done; } } done: return cpu; } /* Called from hardirq (IPI) context */ static void rps_trigger_softirq(void *data) { struct softnet_data *sd = data; ____napi_schedule(sd, &sd->backlog); sd->received_rps++; } #endif /* CONFIG_RPS */ /* * Check if this softnet_data structure is another cpu one * If yes, queue it to our IPI list and return 1 * If no, return 0 */ static int rps_ipi_queued(struct softnet_data *sd) { #ifdef CONFIG_RPS struct softnet_data *mysd = &__get_cpu_var(softnet_data); if (sd != mysd) { sd->rps_ipi_next = mysd->rps_ipi_list; mysd->rps_ipi_list = sd; __raise_softirq_irqoff(NET_RX_SOFTIRQ); return 1; } #endif /* CONFIG_RPS */ return 0; } /* * enqueue_to_backlog is called to queue an skb to a per CPU backlog * queue (may be a remote CPU queue). */ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, unsigned int *qtail) { struct softnet_data *sd; unsigned long flags; sd = &per_cpu(softnet_data, cpu); local_irq_save(flags); rps_lock(sd); if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { if (skb_queue_len(&sd->input_pkt_queue)) { enqueue: __skb_queue_tail(&sd->input_pkt_queue, skb); input_queue_tail_incr_save(sd, qtail); rps_unlock(sd); local_irq_restore(flags); return NET_RX_SUCCESS; } /* Schedule NAPI for backlog device * We can use non atomic operation since we own the queue lock */ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { if (!rps_ipi_queued(sd)) ____napi_schedule(sd, &sd->backlog); } goto enqueue; } sd->dropped++; rps_unlock(sd); local_irq_restore(flags); atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } /** * netif_rx - post buffer to the network code * @skb: buffer to post * * This function receives a packet from a device driver and queues it for * the upper (protocol) levels to process. It always succeeds. The buffer * may be dropped during processing for congestion control or by the * protocol layers. * * return values: * NET_RX_SUCCESS (no congestion) * NET_RX_DROP (packet was dropped) * */ int netif_rx(struct sk_buff *skb) { int ret; /* if netpoll wants it, pretend we never saw it */ if (netpoll_rx(skb)) return NET_RX_DROP; if (netdev_tstamp_prequeue) net_timestamp_check(skb); trace_netif_rx(skb); #ifdef CONFIG_RPS { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; preempt_disable(); rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu < 0) cpu = smp_processor_id(); ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); preempt_enable(); } #else { unsigned int qtail; ret = enqueue_to_backlog(skb, get_cpu(), &qtail); put_cpu(); } #endif return ret; } EXPORT_SYMBOL(netif_rx); int netif_rx_ni(struct sk_buff *skb) { int err; preempt_disable(); err = netif_rx(skb); if (local_softirq_pending()) do_softirq(); preempt_enable(); return err; } EXPORT_SYMBOL(netif_rx_ni); static void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); if (sd->completion_queue) { struct sk_buff *clist; local_irq_disable(); clist = sd->completion_queue; sd->completion_queue = NULL; local_irq_enable(); while (clist) { struct sk_buff *skb = clist; clist = clist->next; WARN_ON(atomic_read(&skb->users)); trace_kfree_skb(skb, net_tx_action); __kfree_skb(skb); } } if (sd->output_queue) { struct Qdisc *head; local_irq_disable(); head = sd->output_queue; sd->output_queue = NULL; sd->output_queue_tailp = &sd->output_queue; local_irq_enable(); while (head) { struct Qdisc *q = head; spinlock_t *root_lock; head = head->next_sched; root_lock = qdisc_lock(q); if (spin_trylock(root_lock)) { smp_mb__before_clear_bit(); clear_bit(__QDISC_STATE_SCHED, &q->state); qdisc_run(q); spin_unlock(root_lock); } else { if (!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)) { __netif_reschedule(q); } else { smp_mb__before_clear_bit(); clear_bit(__QDISC_STATE_SCHED, &q->state); } } } } } #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) /* This hook is defined here for ATM LANE */ int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr) __read_mostly; EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); #endif #ifdef CONFIG_NET_CLS_ACT /* TODO: Maybe we should just force sch_ingress to be compiled in * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions * a compare and 2 stores extra right now if we dont have it on * but have CONFIG_NET_CLS_ACT * NOTE: This doesnt stop any functionality; if you dont have * the ingress scheduler, you just cant add policies on ingress. * */ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) { struct net_device *dev = skb->dev; u32 ttl = G_TC_RTTL(skb->tc_verd); int result = TC_ACT_OK; struct Qdisc *q; if (unlikely(MAX_RED_LOOP < ttl++)) { if (net_ratelimit()) pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", skb->skb_iif, dev->ifindex); return TC_ACT_SHOT; } skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); q = rxq->qdisc; if (q != &noop_qdisc) { spin_lock(qdisc_lock(q)); if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) result = qdisc_enqueue_root(skb, q); spin_unlock(qdisc_lock(q)); } return result; } static inline struct sk_buff *handle_ing(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, struct net_device *orig_dev) { struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); if (!rxq || rxq->qdisc == &noop_qdisc) goto out; if (*pt_prev) { *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; } switch (ing_filter(skb, rxq)) { case TC_ACT_SHOT: case TC_ACT_STOLEN: kfree_skb(skb); return NULL; } out: skb->tc_verd = 0; return skb; } #endif /** * netdev_rx_handler_register - register receive handler * @dev: device to register a handler for * @rx_handler: receive handler to register * @rx_handler_data: data pointer that is used by rx handler * * Register a receive hander for a device. This handler will then be * called from __netif_receive_skb. A negative errno code is returned * on a failure. * * The caller must hold the rtnl_mutex. */ int netdev_rx_handler_register(struct net_device *dev, rx_handler_func_t *rx_handler, void *rx_handler_data) { ASSERT_RTNL(); if (dev->rx_handler) return -EBUSY; rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); rcu_assign_pointer(dev->rx_handler, rx_handler); return 0; } EXPORT_SYMBOL_GPL(netdev_rx_handler_register); /** * netdev_rx_handler_unregister - unregister receive handler * @dev: device to unregister a handler from * * Unregister a receive hander from a device. * * The caller must hold the rtnl_mutex. */ void netdev_rx_handler_unregister(struct net_device *dev) { ASSERT_RTNL(); rcu_assign_pointer(dev->rx_handler, NULL); rcu_assign_pointer(dev->rx_handler_data, NULL); } EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, struct net_device *master) { if (skb->pkt_type == PACKET_HOST) { u16 *dest = (u16 *) eth_hdr(skb)->h_dest; memcpy(dest, master->dev_addr, ETH_ALEN); } } /* On bonding slaves other than the currently active slave, suppress * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and * ARP on active-backup slaves with arp_validate enabled. */ int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master) { struct net_device *dev = skb->dev; if (master->priv_flags & IFF_MASTER_ARPMON) dev->last_rx = jiffies; if ((master->priv_flags & IFF_MASTER_ALB) && (master->priv_flags & IFF_BRIDGE_PORT)) { /* Do address unmangle. The local destination address * will be always the one master has. Provides the right * functionality in a bridge. */ skb_bond_set_mac_by_master(skb, master); } if (dev->priv_flags & IFF_SLAVE_INACTIVE) { if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && skb->protocol == __cpu_to_be16(ETH_P_ARP)) return 0; if (master->priv_flags & IFF_MASTER_ALB) { if (skb->pkt_type != PACKET_BROADCAST && skb->pkt_type != PACKET_MULTICAST) return 0; } if (master->priv_flags & IFF_MASTER_8023AD && skb->protocol == __cpu_to_be16(ETH_P_SLOW)) return 0; return 1; } return 0; } EXPORT_SYMBOL(__skb_bond_should_drop); static int __netif_receive_skb(struct sk_buff *skb) { struct packet_type *ptype, *pt_prev; rx_handler_func_t *rx_handler; struct net_device *orig_dev; struct net_device *master; struct net_device *null_or_orig; struct net_device *orig_or_bond; int ret = NET_RX_DROP; __be16 type; if (!netdev_tstamp_prequeue) net_timestamp_check(skb); trace_netif_receive_skb(skb); /* if we've gotten here through NAPI, check netpoll */ if (netpoll_receive_skb(skb)) return NET_RX_DROP; if (!skb->skb_iif) skb->skb_iif = skb->dev->ifindex; /* * bonding note: skbs received on inactive slaves should only * be delivered to pkt handlers that are exact matches. Also * the deliver_no_wcard flag will be set. If packet handlers * are sensitive to duplicate packets these skbs will need to * be dropped at the handler. */ null_or_orig = NULL; orig_dev = skb->dev; master = ACCESS_ONCE(orig_dev->master); if (skb->deliver_no_wcard) null_or_orig = orig_dev; else if (master) { if (skb_bond_should_drop(skb, master)) { skb->deliver_no_wcard = 1; null_or_orig = orig_dev; /* deliver only exact match */ } else skb->dev = master; } __this_cpu_inc(softnet_data.processed); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb->mac_len = skb->network_header - skb->mac_header; pt_prev = NULL; rcu_read_lock(); #ifdef CONFIG_NET_CLS_ACT if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); goto ncls; } #endif list_for_each_entry_rcu(ptype, &ptype_all, list) { if (ptype->dev == null_or_orig || ptype->dev == skb->dev || ptype->dev == orig_dev) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; } } #ifdef CONFIG_NET_CLS_ACT skb = handle_ing(skb, &pt_prev, &ret, orig_dev); if (!skb) goto out; ncls: #endif /* Handle special case of bridge or macvlan */ rx_handler = rcu_dereference(skb->dev->rx_handler); if (rx_handler) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = NULL; } skb = rx_handler(skb); if (!skb) goto out; } if (vlan_tx_tag_present(skb)) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = NULL; } if (vlan_hwaccel_do_receive(&skb)) { ret = __netif_receive_skb(skb); goto out; } else if (unlikely(!skb)) goto out; } /* * Make sure frames received on VLAN interfaces stacked on * bonding interfaces still make their way to any base bonding * device that may have registered for a specific ptype. The * handler may have to adjust skb->dev and orig_dev. */ orig_or_bond = orig_dev; if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { orig_or_bond = vlan_dev_real_dev(skb->dev); } type = skb->protocol; list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { if (ptype->type == type && (ptype->dev == null_or_orig || ptype->dev == skb->dev || ptype->dev == orig_dev || ptype->dev == orig_or_bond)) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; } } if (pt_prev) { ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } else { atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); /* Jamal, now you will not able to escape explaining * me how you were going to use this. :-) */ ret = NET_RX_DROP; } out: rcu_read_unlock(); return ret; } /** * netif_receive_skb - process receive buffer from network * @skb: buffer to process * * netif_receive_skb() is the main receive data processing function. * It always succeeds. The buffer may be dropped during processing * for congestion control or by the protocol layers. * * This function may only be called from softirq context and interrupts * should be enabled. * * Return values (usually ignored): * NET_RX_SUCCESS: no congestion * NET_RX_DROP: packet was dropped */ int netif_receive_skb(struct sk_buff *skb) { if (netdev_tstamp_prequeue) net_timestamp_check(skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; #ifdef CONFIG_RPS { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu, ret; rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu >= 0) { ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); } else { rcu_read_unlock(); ret = __netif_receive_skb(skb); } return ret; } #else return __netif_receive_skb(skb); #endif } EXPORT_SYMBOL(netif_receive_skb); /* Network device is going away, flush any packets still pending * Called with irqs disabled. */ static void flush_backlog(void *arg) { struct net_device *dev = arg; struct softnet_data *sd = &__get_cpu_var(softnet_data); struct sk_buff *skb, *tmp; rps_lock(sd); skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->input_pkt_queue); kfree_skb(skb); input_queue_head_incr(sd); } } rps_unlock(sd); skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->process_queue); kfree_skb(skb); input_queue_head_incr(sd); } } } static int napi_gro_complete(struct sk_buff *skb) { struct packet_type *ptype; __be16 type = skb->protocol; struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; int err = -ENOENT; if (NAPI_GRO_CB(skb)->count == 1) { skb_shinfo(skb)->gso_size = 0; goto out; } rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { if (ptype->type != type || ptype->dev || !ptype->gro_complete) continue; err = ptype->gro_complete(skb); break; } rcu_read_unlock(); if (err) { WARN_ON(&ptype->list == head); kfree_skb(skb); return NET_RX_SUCCESS; } out: return netif_receive_skb(skb); } inline void napi_gro_flush(struct napi_struct *napi) { struct sk_buff *skb, *next; for (skb = napi->gro_list; skb; skb = next) { next = skb->next; skb->next = NULL; napi_gro_complete(skb); } napi->gro_count = 0; napi->gro_list = NULL; } EXPORT_SYMBOL(napi_gro_flush); enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { struct sk_buff **pp = NULL; struct packet_type *ptype; __be16 type = skb->protocol; struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; int same_flow; int mac_len; enum gro_result ret; if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) goto normal; if (skb_is_gso(skb) || skb_has_frag_list(skb)) goto normal; rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { if (ptype->type != type || ptype->dev || !ptype->gro_receive) continue; skb_set_network_header(skb, skb_gro_offset(skb)); mac_len = skb->network_header - skb->mac_header; skb->mac_len = mac_len; NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->free = 0; pp = ptype->gro_receive(&napi->gro_list, skb); break; } rcu_read_unlock(); if (&ptype->list == head) goto normal; same_flow = NAPI_GRO_CB(skb)->same_flow; ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; if (pp) { struct sk_buff *nskb = *pp; *pp = nskb->next; nskb->next = NULL; napi_gro_complete(nskb); napi->gro_count--; } if (same_flow) goto ok; if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS) goto normal; napi->gro_count++; NAPI_GRO_CB(skb)->count = 1; skb_shinfo(skb)->gso_size = skb_gro_len(skb); skb->next = napi->gro_list; napi->gro_list = skb; ret = GRO_HELD; pull: if (skb_headlen(skb) < skb_gro_offset(skb)) { int grow = skb_gro_offset(skb) - skb_headlen(skb); BUG_ON(skb->end - skb->tail < grow); memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); skb->tail += grow; skb->data_len -= grow; skb_shinfo(skb)->frags[0].page_offset += grow; skb_shinfo(skb)->frags[0].size -= grow; if (unlikely(!skb_shinfo(skb)->frags[0].size)) { put_page(skb_shinfo(skb)->frags[0].page); memmove(skb_shinfo(skb)->frags, skb_shinfo(skb)->frags + 1, --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); } } ok: return ret; normal: ret = GRO_NORMAL; goto pull; } EXPORT_SYMBOL(dev_gro_receive); static inline gro_result_t __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { struct sk_buff *p; for (p = napi->gro_list; p; p = p->next) { unsigned long diffs; diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; diffs |= p->vlan_tci ^ skb->vlan_tci; diffs |= compare_ether_header(skb_mac_header(p), skb_gro_mac_header(skb)); NAPI_GRO_CB(p)->same_flow = !diffs; NAPI_GRO_CB(p)->flush = 0; } return dev_gro_receive(napi, skb); } gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) { switch (ret) { case GRO_NORMAL: if (netif_receive_skb(skb)) ret = GRO_DROP; break; case GRO_DROP: case GRO_MERGED_FREE: kfree_skb(skb); break; case GRO_HELD: case GRO_MERGED: break; } return ret; } EXPORT_SYMBOL(napi_skb_finish); void skb_gro_reset_offset(struct sk_buff *skb) { NAPI_GRO_CB(skb)->data_offset = 0; NAPI_GRO_CB(skb)->frag0 = NULL; NAPI_GRO_CB(skb)->frag0_len = 0; if (skb->mac_header == skb->tail && !PageHighMem(skb_shinfo(skb)->frags[0].page)) { NAPI_GRO_CB(skb)->frag0 = page_address(skb_shinfo(skb)->frags[0].page) + skb_shinfo(skb)->frags[0].page_offset; NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size; } } EXPORT_SYMBOL(skb_gro_reset_offset); gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { skb_gro_reset_offset(skb); return napi_skb_finish(__napi_gro_receive(napi, skb), skb); } EXPORT_SYMBOL(napi_gro_receive); static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) { __skb_pull(skb, skb_headlen(skb)); skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); skb->vlan_tci = 0; skb->dev = napi->dev; skb->skb_iif = 0; napi->skb = skb; } struct sk_buff *napi_get_frags(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; if (!skb) { skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); if (skb) napi->skb = skb; } return skb; } EXPORT_SYMBOL(napi_get_frags); gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, gro_result_t ret) { switch (ret) { case GRO_NORMAL: case GRO_HELD: skb->protocol = eth_type_trans(skb, skb->dev); if (ret == GRO_HELD) skb_gro_pull(skb, -ETH_HLEN); else if (netif_receive_skb(skb)) ret = GRO_DROP; break; case GRO_DROP: case GRO_MERGED_FREE: napi_reuse_skb(napi, skb); break; case GRO_MERGED: break; } return ret; } EXPORT_SYMBOL(napi_frags_finish); struct sk_buff *napi_frags_skb(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; struct ethhdr *eth; unsigned int hlen; unsigned int off; napi->skb = NULL; skb_reset_mac_header(skb); skb_gro_reset_offset(skb); off = skb_gro_offset(skb); hlen = off + sizeof(*eth); eth = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { eth = skb_gro_header_slow(skb, hlen, off); if (unlikely(!eth)) { napi_reuse_skb(napi, skb); skb = NULL; goto out; } } skb_gro_pull(skb, sizeof(*eth)); /* * This works because the only protocols we care about don't require * special handling. We'll fix it up properly at the end. */ skb->protocol = eth->h_proto; out: return skb; } EXPORT_SYMBOL(napi_frags_skb); gro_result_t napi_gro_frags(struct napi_struct *napi) { struct sk_buff *skb = napi_frags_skb(napi); if (!skb) return GRO_DROP; return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); } EXPORT_SYMBOL(napi_gro_frags); /* * net_rps_action sends any pending IPI's for rps. * Note: called with local irq disabled, but exits with local irq enabled. */ static void net_rps_action_and_irq_enable(struct softnet_data *sd) { #ifdef CONFIG_RPS struct softnet_data *remsd = sd->rps_ipi_list; if (remsd) { sd->rps_ipi_list = NULL; local_irq_enable(); /* Send pending IPI's to kick RPS processing on remote cpus. */ while (remsd) { struct softnet_data *next = remsd->rps_ipi_next; if (cpu_online(remsd->cpu)) __smp_call_function_single(remsd->cpu, &remsd->csd, 0); remsd = next; } } else #endif local_irq_enable(); } static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); #ifdef CONFIG_RPS /* Check if we have pending ipi, its better to send them now, * not waiting net_rx_action() end. */ if (sd->rps_ipi_list) { local_irq_disable(); net_rps_action_and_irq_enable(sd); } #endif napi->weight = weight_p; local_irq_disable(); while (work < quota) { struct sk_buff *skb; unsigned int qlen; while ((skb = __skb_dequeue(&sd->process_queue))) { local_irq_enable(); __netif_receive_skb(skb); local_irq_disable(); input_queue_head_incr(sd); if (++work >= quota) { local_irq_enable(); return work; } } rps_lock(sd); qlen = skb_queue_len(&sd->input_pkt_queue); if (qlen) skb_queue_splice_tail_init(&sd->input_pkt_queue, &sd->process_queue); if (qlen < quota - work) { /* * Inline a custom version of __napi_complete(). * only current cpu owns and manipulates this napi, * and NAPI_STATE_SCHED is the only possible flag set on backlog. * we can use a plain write instead of clear_bit(), * and we dont need an smp_mb() memory barrier. */ list_del(&napi->poll_list); napi->state = 0; quota = work + qlen; } rps_unlock(sd); } local_irq_enable(); return work; } /** * __napi_schedule - schedule for receive * @n: entry to schedule * * The entry's receive function will be scheduled to run */ void __napi_schedule(struct napi_struct *n) { unsigned long flags; local_irq_save(flags); ____napi_schedule(&__get_cpu_var(softnet_data), n); local_irq_restore(flags); } EXPORT_SYMBOL(__napi_schedule); void __napi_complete(struct napi_struct *n) { BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); BUG_ON(n->gro_list); list_del(&n->poll_list); smp_mb__before_clear_bit(); clear_bit(NAPI_STATE_SCHED, &n->state); } EXPORT_SYMBOL(__napi_complete); void napi_complete(struct napi_struct *n) { unsigned long flags; /* * don't let napi dequeue from the cpu poll list * just in case its running on a different cpu */ if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) return; napi_gro_flush(n); local_irq_save(flags); __napi_complete(n); local_irq_restore(flags); } EXPORT_SYMBOL(napi_complete); void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { INIT_LIST_HEAD(&napi->poll_list); napi->gro_count = 0; napi->gro_list = NULL; napi->skb = NULL; napi->poll = poll; napi->weight = weight; list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL spin_lock_init(&napi->poll_lock); napi->poll_owner = -1; #endif set_bit(NAPI_STATE_SCHED, &napi->state); } EXPORT_SYMBOL(netif_napi_add); void netif_napi_del(struct napi_struct *napi) { struct sk_buff *skb, *next; list_del_init(&napi->dev_list); napi_free_frags(napi); for (skb = napi->gro_list; skb; skb = next) { next = skb->next; skb->next = NULL; kfree_skb(skb); } napi->gro_list = NULL; napi->gro_count = 0; } EXPORT_SYMBOL(netif_napi_del); static void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; void *have; local_irq_disable(); while (!list_empty(&sd->poll_list)) { struct napi_struct *n; int work, weight; /* If softirq window is exhuasted then punt. * Allow this to run for 2 jiffies since which will allow * an average latency of 1.5/HZ. */ if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) goto softnet_break; local_irq_enable(); /* Even though interrupts have been re-enabled, this * access is safe because interrupts can only add new * entries to the tail of this list, and only ->poll() * calls can remove this head entry from the list. */ n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); have = netpoll_poll_lock(n); weight = n->weight; /* This NAPI_STATE_SCHED test is for avoiding a race * with netpoll's poll_napi(). Only the entity which * obtains the lock and sees NAPI_STATE_SCHED set will * actually make the ->poll() call. Therefore we avoid * accidently calling ->poll() when NAPI is not scheduled. */ work = 0; if (test_bit(NAPI_STATE_SCHED, &n->state)) { work = n->poll(n, weight); trace_napi_poll(n); } WARN_ON_ONCE(work > weight); budget -= work; local_irq_disable(); /* Drivers must not modify the NAPI state if they * consume the entire weight. In such cases this code * still "owns" the NAPI instance and therefore can * move the instance around on the list at-will. */ if (unlikely(work == weight)) { if (unlikely(napi_disable_pending(n))) { local_irq_enable(); napi_complete(n); local_irq_disable(); } else list_move_tail(&n->poll_list, &sd->poll_list); } netpoll_poll_unlock(have); } out: net_rps_action_and_irq_enable(sd); #ifdef CONFIG_NET_DMA /* * There may not be any more sk_buffs coming right now, so push * any pending DMA copies to hardware */ dma_issue_pending_all(); #endif return; softnet_break: sd->time_squeeze++; __raise_softirq_irqoff(NET_RX_SOFTIRQ); goto out; } static gifconf_func_t *gifconf_list[NPROTO]; /** * register_gifconf - register a SIOCGIF handler * @family: Address family * @gifconf: Function handler * * Register protocol dependent address dumping routines. The handler * that is passed must not be freed or reused until it has been replaced * by another handler. */ int register_gifconf(unsigned int family, gifconf_func_t *gifconf) { if (family >= NPROTO) return -EINVAL; gifconf_list[family] = gifconf; return 0; } EXPORT_SYMBOL(register_gifconf); /* * Map an interface index to its name (SIOCGIFNAME) */ /* * We need this ioctl for efficient implementation of the * if_indextoname() function required by the IPv6 API. Without * it, we would have to search all the interfaces to find a * match. --pb */ static int dev_ifname(struct net *net, struct ifreq __user *arg) { struct net_device *dev; struct ifreq ifr; /* * Fetch the caller's info block. */ if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); if (!dev) { rcu_read_unlock(); return -ENODEV; } strcpy(ifr.ifr_name, dev->name); rcu_read_unlock(); if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) return -EFAULT; return 0; } /* * Perform a SIOCGIFCONF call. This structure will change * size eventually, and there is nothing I can do about it. * Thus we will need a 'compatibility mode'. */ static int dev_ifconf(struct net *net, char __user *arg) { struct ifconf ifc; struct net_device *dev; char __user *pos; int len; int total; int i; /* * Fetch the caller's info block. */ if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) return -EFAULT; pos = ifc.ifc_buf; len = ifc.ifc_len; /* * Loop over the interfaces, and write an info block for each. */ total = 0; for_each_netdev(net, dev) { for (i = 0; i < NPROTO; i++) { if (gifconf_list[i]) { int done; if (!pos) done = gifconf_list[i](dev, NULL, 0); else done = gifconf_list[i](dev, pos + total, len - total); if (done < 0) return -EFAULT; total += done; } } } /* * All done. Write the updated control block back to the caller. */ ifc.ifc_len = total; /* * Both BSD and Solaris return 0 here, so we do too. */ return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0; } #ifdef CONFIG_PROC_FS /* * This is invoked by the /proc filesystem handler to display a device * in detail. */ void *dev_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct net *net = seq_file_net(seq); loff_t off; struct net_device *dev; rcu_read_lock(); if (!*pos) return SEQ_START_TOKEN; off = 1; for_each_netdev_rcu(net, dev) if (off++ == *pos) return dev; return NULL; } void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net_device *dev = (v == SEQ_START_TOKEN) ? first_net_device(seq_file_net(seq)) : next_net_device((struct net_device *)v); ++*pos; return rcu_dereference(dev); } void dev_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", dev->name, stats->rx_bytes, stats->rx_packets, stats->rx_errors, stats->rx_dropped + stats->rx_missed_errors, stats->rx_fifo_errors, stats->rx_length_errors + stats->rx_over_errors + stats->rx_crc_errors + stats->rx_frame_errors, stats->rx_compressed, stats->multicast, stats->tx_bytes, stats->tx_packets, stats->tx_errors, stats->tx_dropped, stats->tx_fifo_errors, stats->collisions, stats->tx_carrier_errors + stats->tx_aborted_errors + stats->tx_window_errors + stats->tx_heartbeat_errors, stats->tx_compressed); } /* * Called from the PROCfs module. This now uses the new arbitrary sized * /proc/net interface to create /proc/net/dev */ static int dev_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Inter-| Receive " " | Transmit\n" " face |bytes packets errs drop fifo frame " "compressed multicast|bytes packets errs " "drop fifo colls carrier compressed\n"); else dev_seq_printf_stats(seq, v); return 0; } static struct softnet_data *softnet_get_online(loff_t *pos) { struct softnet_data *sd = NULL; while (*pos < nr_cpu_ids) if (cpu_online(*pos)) { sd = &per_cpu(softnet_data, *pos); break; } else ++*pos; return sd; } static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) { return softnet_get_online(pos); } static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return softnet_get_online(pos); } static void softnet_seq_stop(struct seq_file *seq, void *v) { } static int softnet_seq_show(struct seq_file *seq, void *v) { struct softnet_data *sd = v; seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", sd->processed, sd->dropped, sd->time_squeeze, 0, 0, 0, 0, 0, /* was fastroute */ sd->cpu_collision, sd->received_rps); return 0; } static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = dev_seq_show, }; static int dev_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &dev_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations dev_seq_fops = { .owner = THIS_MODULE, .open = dev_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static const struct seq_operations softnet_seq_ops = { .start = softnet_seq_start, .next = softnet_seq_next, .stop = softnet_seq_stop, .show = softnet_seq_show, }; static int softnet_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &softnet_seq_ops); } static const struct file_operations softnet_seq_fops = { .owner = THIS_MODULE, .open = softnet_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *ptype_get_idx(loff_t pos) { struct packet_type *pt = NULL; loff_t i = 0; int t; list_for_each_entry_rcu(pt, &ptype_all, list) { if (i == pos) return pt; ++i; } for (t = 0; t < PTYPE_HASH_SIZE; t++) { list_for_each_entry_rcu(pt, &ptype_base[t], list) { if (i == pos) return pt; ++i; } } return NULL; } static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; } static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct packet_type *pt; struct list_head *nxt; int hash; ++*pos; if (v == SEQ_START_TOKEN) return ptype_get_idx(0); pt = v; nxt = pt->list.next; if (pt->type == htons(ETH_P_ALL)) { if (nxt != &ptype_all) goto found; hash = 0; nxt = ptype_base[0].next; } else hash = ntohs(pt->type) & PTYPE_HASH_MASK; while (nxt == &ptype_base[hash]) { if (++hash >= PTYPE_HASH_SIZE) return NULL; nxt = ptype_base[hash].next; } found: return list_entry(nxt, struct packet_type, list); } static void ptype_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int ptype_seq_show(struct seq_file *seq, void *v) { struct packet_type *pt = v; if (v == SEQ_START_TOKEN) seq_puts(seq, "Type Device Function\n"); else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { if (pt->type == htons(ETH_P_ALL)) seq_puts(seq, "ALL "); else seq_printf(seq, "%04x", ntohs(pt->type)); seq_printf(seq, " %-8s %pF\n", pt->dev ? pt->dev->name : "", pt->func); } return 0; } static const struct seq_operations ptype_seq_ops = { .start = ptype_seq_start, .next = ptype_seq_next, .stop = ptype_seq_stop, .show = ptype_seq_show, }; static int ptype_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &ptype_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations ptype_seq_fops = { .owner = THIS_MODULE, .open = ptype_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static int __net_init dev_proc_net_init(struct net *net) { int rc = -ENOMEM; if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) goto out; if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) goto out_dev; if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) goto out_softnet; if (wext_proc_init(net)) goto out_ptype; rc = 0; out: return rc; out_ptype: proc_net_remove(net, "ptype"); out_softnet: proc_net_remove(net, "softnet_stat"); out_dev: proc_net_remove(net, "dev"); goto out; } static void __net_exit dev_proc_net_exit(struct net *net) { wext_proc_exit(net); proc_net_remove(net, "ptype"); proc_net_remove(net, "softnet_stat"); proc_net_remove(net, "dev"); } static struct pernet_operations __net_initdata dev_proc_ops = { .init = dev_proc_net_init, .exit = dev_proc_net_exit, }; static int __init dev_proc_init(void) { return register_pernet_subsys(&dev_proc_ops); } #else #define dev_proc_init() 0 #endif /* CONFIG_PROC_FS */ /** * netdev_set_master - set up master/slave pair * @slave: slave device * @master: new master device * * Changes the master device of the slave. Pass %NULL to break the * bonding. The caller must hold the RTNL semaphore. On a failure * a negative errno code is returned. On success the reference counts * are adjusted, %RTM_NEWLINK is sent to the routing socket and the * function returns zero. */ int netdev_set_master(struct net_device *slave, struct net_device *master) { struct net_device *old = slave->master; ASSERT_RTNL(); if (master) { if (old) return -EBUSY; dev_hold(master); } slave->master = master; if (old) { synchronize_net(); dev_put(old); } if (master) slave->flags |= IFF_SLAVE; else slave->flags &= ~IFF_SLAVE; rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); return 0; } EXPORT_SYMBOL(netdev_set_master); static void dev_change_rx_flags(struct net_device *dev, int flags) { const struct net_device_ops *ops = dev->netdev_ops; if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) ops->ndo_change_rx_flags(dev, flags); } static int __dev_set_promiscuity(struct net_device *dev, int inc) { unsigned short old_flags = dev->flags; uid_t uid; gid_t gid; ASSERT_RTNL(); dev->flags |= IFF_PROMISC; dev->promiscuity += inc; if (dev->promiscuity == 0) { /* * Avoid overflow. * If inc causes overflow, untouch promisc and return error. */ if (inc < 0) dev->flags &= ~IFF_PROMISC; else { dev->promiscuity -= inc; printk(KERN_WARNING "%s: promiscuity touches roof, " "set promiscuity failed, promiscuity feature " "of device might be broken.\n", dev->name); return -EOVERFLOW; } } if (dev->flags != old_flags) { printk(KERN_INFO "device %s %s promiscuous mode\n", dev->name, (dev->flags & IFF_PROMISC) ? "entered" : "left"); if (audit_enabled) { current_uid_gid(&uid, &gid); audit_log(current->audit_context, GFP_ATOMIC, AUDIT_ANOM_PROMISCUOUS, "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", dev->name, (dev->flags & IFF_PROMISC), (old_flags & IFF_PROMISC), audit_get_loginuid(current), uid, gid, audit_get_sessionid(current)); } dev_change_rx_flags(dev, IFF_PROMISC); } return 0; } /** * dev_set_promiscuity - update promiscuity count on a device * @dev: device * @inc: modifier * * Add or remove promiscuity from a device. While the count in the device * remains above zero the interface remains promiscuous. Once it hits zero * the device reverts back to normal filtering operation. A negative inc * value is used to drop promiscuity on the device. * Return 0 if successful or a negative errno code on error. */ int dev_set_promiscuity(struct net_device *dev, int inc) { unsigned short old_flags = dev->flags; int err; err = __dev_set_promiscuity(dev, inc); if (err < 0) return err; if (dev->flags != old_flags) dev_set_rx_mode(dev); return err; } EXPORT_SYMBOL(dev_set_promiscuity); /** * dev_set_allmulti - update allmulti count on a device * @dev: device * @inc: modifier * * Add or remove reception of all multicast frames to a device. While the * count in the device remains above zero the interface remains listening * to all interfaces. Once it hits zero the device reverts back to normal * filtering operation. A negative @inc value is used to drop the counter * when releasing a resource needing all multicasts. * Return 0 if successful or a negative errno code on error. */ int dev_set_allmulti(struct net_device *dev, int inc) { unsigned short old_flags = dev->flags; ASSERT_RTNL(); dev->flags |= IFF_ALLMULTI; dev->allmulti += inc; if (dev->allmulti == 0) { /* * Avoid overflow. * If inc causes overflow, untouch allmulti and return error. */ if (inc < 0) dev->flags &= ~IFF_ALLMULTI; else { dev->allmulti -= inc; printk(KERN_WARNING "%s: allmulti touches roof, " "set allmulti failed, allmulti feature of " "device might be broken.\n", dev->name); return -EOVERFLOW; } } if (dev->flags ^ old_flags) { dev_change_rx_flags(dev, IFF_ALLMULTI); dev_set_rx_mode(dev); } return 0; } EXPORT_SYMBOL(dev_set_allmulti); /* * Upload unicast and multicast address lists to device and * configure RX filtering. When the device doesn't support unicast * filtering it is put in promiscuous mode while unicast addresses * are present. */ void __dev_set_rx_mode(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; /* dev_open will call this function so the list will stay sane. */ if (!(dev->flags&IFF_UP)) return; if (!netif_device_present(dev)) return; if (ops->ndo_set_rx_mode) ops->ndo_set_rx_mode(dev); else { /* Unicast addresses changes may only happen under the rtnl, * therefore calling __dev_set_promiscuity here is safe. */ if (!netdev_uc_empty(dev) && !dev->uc_promisc) { __dev_set_promiscuity(dev, 1); dev->uc_promisc = 1; } else if (netdev_uc_empty(dev) && dev->uc_promisc) { __dev_set_promiscuity(dev, -1); dev->uc_promisc = 0; } if (ops->ndo_set_multicast_list) ops->ndo_set_multicast_list(dev); } } void dev_set_rx_mode(struct net_device *dev) { netif_addr_lock_bh(dev); __dev_set_rx_mode(dev); netif_addr_unlock_bh(dev); } /** * dev_get_flags - get flags reported to userspace * @dev: device * * Get the combination of flag bits exported through APIs to userspace. */ unsigned dev_get_flags(const struct net_device *dev) { unsigned flags; flags = (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI | IFF_RUNNING | IFF_LOWER_UP | IFF_DORMANT)) | (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); if (netif_running(dev)) { if (netif_oper_up(dev)) flags |= IFF_RUNNING; if (netif_carrier_ok(dev)) flags |= IFF_LOWER_UP; if (netif_dormant(dev)) flags |= IFF_DORMANT; } return flags; } EXPORT_SYMBOL(dev_get_flags); int __dev_change_flags(struct net_device *dev, unsigned int flags) { int old_flags = dev->flags; int ret; ASSERT_RTNL(); /* * Set the flags on our device. */ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | IFF_AUTOMEDIA)) | (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | IFF_ALLMULTI)); /* * Load in the correct multicast list now the flags have changed. */ if ((old_flags ^ flags) & IFF_MULTICAST) dev_change_rx_flags(dev, IFF_MULTICAST); dev_set_rx_mode(dev); /* * Have we downed the interface. We handle IFF_UP ourselves * according to user attempts to set it, rather than blindly * setting it. */ ret = 0; if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); if (!ret) dev_set_rx_mode(dev); } if ((flags ^ dev->gflags) & IFF_PROMISC) { int inc = (flags & IFF_PROMISC) ? 1 : -1; dev->gflags ^= IFF_PROMISC; dev_set_promiscuity(dev, inc); } /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI is important. Some (broken) drivers set IFF_PROMISC, when IFF_ALLMULTI is requested not asking us and not reporting. */ if ((flags ^ dev->gflags) & IFF_ALLMULTI) { int inc = (flags & IFF_ALLMULTI) ? 1 : -1; dev->gflags ^= IFF_ALLMULTI; dev_set_allmulti(dev, inc); } return ret; } void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) { unsigned int changes = dev->flags ^ old_flags; if (changes & IFF_UP) { if (dev->flags & IFF_UP) call_netdevice_notifiers(NETDEV_UP, dev); else call_netdevice_notifiers(NETDEV_DOWN, dev); } if (dev->flags & IFF_UP && (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) call_netdevice_notifiers(NETDEV_CHANGE, dev); } /** * dev_change_flags - change device settings * @dev: device * @flags: device state flags * * Change settings on device based state flags. The flags are * in the userspace exported format. */ int dev_change_flags(struct net_device *dev, unsigned flags) { int ret, changes; int old_flags = dev->flags; ret = __dev_change_flags(dev, flags); if (ret < 0) return ret; changes = old_flags ^ dev->flags; if (changes) rtmsg_ifinfo(RTM_NEWLINK, dev, changes); __dev_notify_flags(dev, old_flags); return ret; } EXPORT_SYMBOL(dev_change_flags); /** * dev_set_mtu - Change maximum transfer unit * @dev: device * @new_mtu: new transfer unit * * Change the maximum transfer size of the network device. */ int dev_set_mtu(struct net_device *dev, int new_mtu) { const struct net_device_ops *ops = dev->netdev_ops; int err; if (new_mtu == dev->mtu) return 0; /* MTU must be positive. */ if (new_mtu < 0) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; err = 0; if (ops->ndo_change_mtu) err = ops->ndo_change_mtu(dev, new_mtu); else dev->mtu = new_mtu; if (!err && dev->flags & IFF_UP) call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); return err; } EXPORT_SYMBOL(dev_set_mtu); /** * dev_set_mac_address - Change Media Access Control Address * @dev: device * @sa: new address * * Change the hardware (MAC) address of the device */ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) { const struct net_device_ops *ops = dev->netdev_ops; int err; if (!ops->ndo_set_mac_address) return -EOPNOTSUPP; if (sa->sa_family != dev->type) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; err = ops->ndo_set_mac_address(dev, sa); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return err; } EXPORT_SYMBOL(dev_set_mac_address); /* * Perform the SIOCxIFxxx calls, inside rcu_read_lock() */ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) { int err; struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); if (!dev) return -ENODEV; switch (cmd) { case SIOCGIFFLAGS: /* Get interface flags */ ifr->ifr_flags = (short) dev_get_flags(dev); return 0; case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */ ifr->ifr_metric = 0; return 0; case SIOCGIFMTU: /* Get the MTU of a device */ ifr->ifr_mtu = dev->mtu; return 0; case SIOCGIFHWADDR: if (!dev->addr_len) memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); else memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); ifr->ifr_hwaddr.sa_family = dev->type; return 0; case SIOCGIFSLAVE: err = -EINVAL; break; case SIOCGIFMAP: ifr->ifr_map.mem_start = dev->mem_start; ifr->ifr_map.mem_end = dev->mem_end; ifr->ifr_map.base_addr = dev->base_addr; ifr->ifr_map.irq = dev->irq; ifr->ifr_map.dma = dev->dma; ifr->ifr_map.port = dev->if_port; return 0; case SIOCGIFINDEX: ifr->ifr_ifindex = dev->ifindex; return 0; case SIOCGIFTXQLEN: ifr->ifr_qlen = dev->tx_queue_len; return 0; default: /* dev_ioctl() should ensure this case * is never reached */ WARN_ON(1); err = -EINVAL; break; } return err; } /* * Perform the SIOCxIFxxx calls, inside rtnl_lock() */ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) { int err; struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); const struct net_device_ops *ops; if (!dev) return -ENODEV; ops = dev->netdev_ops; switch (cmd) { case SIOCSIFFLAGS: /* Set interface flags */ return dev_change_flags(dev, ifr->ifr_flags); case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */ return -EOPNOTSUPP; case SIOCSIFMTU: /* Set the MTU of a device */ return dev_set_mtu(dev, ifr->ifr_mtu); case SIOCSIFHWADDR: return dev_set_mac_address(dev, &ifr->ifr_hwaddr); case SIOCSIFHWBROADCAST: if (ifr->ifr_hwaddr.sa_family != dev->type) return -EINVAL; memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return 0; case SIOCSIFMAP: if (ops->ndo_set_config) { if (!netif_device_present(dev)) return -ENODEV; return ops->ndo_set_config(dev, &ifr->ifr_map); } return -EOPNOTSUPP; case SIOCADDMULTI: if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCDELMULTI: if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCSIFTXQLEN: if (ifr->ifr_qlen < 0) return -EINVAL; dev->tx_queue_len = ifr->ifr_qlen; return 0; case SIOCSIFNAME: ifr->ifr_newname[IFNAMSIZ-1] = '\0'; return dev_change_name(dev, ifr->ifr_newname); /* * Unknown or private ioctl */ default: if ((cmd >= SIOCDEVPRIVATE && cmd <= SIOCDEVPRIVATE + 15) || cmd == SIOCBONDENSLAVE || cmd == SIOCBONDRELEASE || cmd == SIOCBONDSETHWADDR || cmd == SIOCBONDSLAVEINFOQUERY || cmd == SIOCBONDINFOQUERY || cmd == SIOCBONDCHANGEACTIVE || cmd == SIOCGMIIPHY || cmd == SIOCGMIIREG || cmd == SIOCSMIIREG || cmd == SIOCBRADDIF || cmd == SIOCBRDELIF || cmd == SIOCSHWTSTAMP || cmd == SIOCWANDEV) { err = -EOPNOTSUPP; if (ops->ndo_do_ioctl) { if (netif_device_present(dev)) err = ops->ndo_do_ioctl(dev, ifr, cmd); else err = -ENODEV; } } else err = -EINVAL; } return err; } /* * This function handles all "interface"-type I/O control requests. The actual * 'doing' part of this is dev_ifsioc above. */ /** * dev_ioctl - network device ioctl * @net: the applicable net namespace * @cmd: command to issue * @arg: pointer to a struct ifreq in user space * * Issue ioctl functions to devices. This is normally called by the * user space syscall interfaces but can sometimes be useful for * other purposes. The return value is the return from the syscall if * positive or a negative errno code on error. */ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) { struct ifreq ifr; int ret; char *colon; /* One special case: SIOCGIFCONF takes ifconf argument and requires shared lock, because it sleeps writing to user space. */ if (cmd == SIOCGIFCONF) { rtnl_lock(); ret = dev_ifconf(net, (char __user *) arg); rtnl_unlock(); return ret; } if (cmd == SIOCGIFNAME) return dev_ifname(net, (struct ifreq __user *)arg); if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; ifr.ifr_name[IFNAMSIZ-1] = 0; colon = strchr(ifr.ifr_name, ':'); if (colon) *colon = 0; /* * See which interface the caller is talking about. */ switch (cmd) { /* * These ioctl calls: * - can be done by all. * - atomic and do not require locking. * - return a value */ case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFHWADDR: case SIOCGIFSLAVE: case SIOCGIFMAP: case SIOCGIFINDEX: case SIOCGIFTXQLEN: dev_load(net, ifr.ifr_name); rcu_read_lock(); ret = dev_ifsioc_locked(net, &ifr, cmd); rcu_read_unlock(); if (!ret) { if (colon) *colon = ':'; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; } return ret; case SIOCETHTOOL: dev_load(net, ifr.ifr_name); rtnl_lock(); ret = dev_ethtool(net, &ifr); rtnl_unlock(); if (!ret) { if (colon) *colon = ':'; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; } return ret; /* * These ioctl calls: * - require superuser power. * - require strict serialization. * - return a value */ case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSIFNAME: if (!capable(CAP_NET_ADMIN)) return -EPERM; dev_load(net, ifr.ifr_name); rtnl_lock(); ret = dev_ifsioc(net, &ifr, cmd); rtnl_unlock(); if (!ret) { if (colon) *colon = ':'; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; } return ret; /* * These ioctl calls: * - require superuser power. * - require strict serialization. * - do not return a value */ case SIOCSIFFLAGS: case SIOCSIFMETRIC: case SIOCSIFMTU: case SIOCSIFMAP: case SIOCSIFHWADDR: case SIOCSIFSLAVE: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCSIFHWBROADCAST: case SIOCSIFTXQLEN: case SIOCSMIIREG: case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSHWTSTAMP: if (!capable(CAP_NET_ADMIN)) return -EPERM; /* fall through */ case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: dev_load(net, ifr.ifr_name); rtnl_lock(); ret = dev_ifsioc(net, &ifr, cmd); rtnl_unlock(); return ret; case SIOCGIFMEM: /* Get the per device memory space. We can add this but * currently do not support it */ case SIOCSIFMEM: /* Set the per device memory buffer space. * Not applicable in our case */ case SIOCSIFLINK: return -EINVAL; /* * Unknown or private ioctl. */ default: if (cmd == SIOCWANDEV || (cmd >= SIOCDEVPRIVATE && cmd <= SIOCDEVPRIVATE + 15)) { dev_load(net, ifr.ifr_name); rtnl_lock(); ret = dev_ifsioc(net, &ifr, cmd); rtnl_unlock(); if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; return ret; } /* Take care of Wireless Extensions */ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) return wext_handle_ioctl(net, &ifr, cmd, arg); return -EINVAL; } } /** * dev_new_index - allocate an ifindex * @net: the applicable net namespace * * Returns a suitable unique value for a new device interface * number. The caller must hold the rtnl semaphore or the * dev_base_lock to be sure it remains unique. */ static int dev_new_index(struct net *net) { static int ifindex; for (;;) { if (++ifindex <= 0) ifindex = 1; if (!__dev_get_by_index(net, ifindex)) return ifindex; } } /* Delayed registration/unregisteration */ static LIST_HEAD(net_todo_list); static void net_set_todo(struct net_device *dev) { list_add_tail(&dev->todo_list, &net_todo_list); } static void rollback_registered_many(struct list_head *head) { struct net_device *dev, *tmp; BUG_ON(dev_boot_phase); ASSERT_RTNL(); list_for_each_entry_safe(dev, tmp, head, unreg_list) { /* Some devices call without registering * for initialization unwind. Remove those * devices and proceed with the remaining. */ if (dev->reg_state == NETREG_UNINITIALIZED) { pr_debug("unregister_netdevice: device %s/%p never " "was registered\n", dev->name, dev); WARN_ON(1); list_del(&dev->unreg_list); continue; } BUG_ON(dev->reg_state != NETREG_REGISTERED); } /* If device is running, close it first. */ dev_close_many(head); list_for_each_entry(dev, head, unreg_list) { /* And unlink it from device chain. */ unlist_netdevice(dev); dev->reg_state = NETREG_UNREGISTERING; } synchronize_net(); list_for_each_entry(dev, head, unreg_list) { /* Shutdown queueing discipline. */ dev_shutdown(dev); /* Notify protocols, that we are about to destroy this device. They should clean all the things. */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); if (!dev->rtnl_link_ops || dev->rtnl_link_state == RTNL_LINK_INITIALIZED) rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); /* * Flush the unicast and multicast chains */ dev_uc_flush(dev); dev_mc_flush(dev); if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); /* Notifier chain MUST detach us from master device. */ WARN_ON(dev->master); /* Remove entries from kobject tree */ netdev_unregister_kobject(dev); } /* Process any work delayed until the end of the batch */ dev = list_first_entry(head, struct net_device, unreg_list); call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); rcu_barrier(); list_for_each_entry(dev, head, unreg_list) dev_put(dev); } static void rollback_registered(struct net_device *dev) { LIST_HEAD(single); list_add(&dev->unreg_list, &single); rollback_registered_many(&single); list_del(&single); } unsigned long netdev_fix_features(unsigned long features, const char *name) { /* Fix illegal SG+CSUM combinations. */ if ((features & NETIF_F_SG) && !(features & NETIF_F_ALL_CSUM)) { if (name) printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no " "checksum feature.\n", name); features &= ~NETIF_F_SG; } /* TSO requires that SG is present as well. */ if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { if (name) printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no " "SG feature.\n", name); features &= ~NETIF_F_TSO; } if (features & NETIF_F_UFO) { /* maybe split UFO into V4 and V6? */ if (!((features & NETIF_F_GEN_CSUM) || (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { if (name) printk(KERN_ERR "%s: Dropping NETIF_F_UFO " "since no checksum offload features.\n", name); features &= ~NETIF_F_UFO; } if (!(features & NETIF_F_SG)) { if (name) printk(KERN_ERR "%s: Dropping NETIF_F_UFO " "since no NETIF_F_SG feature.\n", name); features &= ~NETIF_F_UFO; } } return features; } EXPORT_SYMBOL(netdev_fix_features); /** * netif_stacked_transfer_operstate - transfer operstate * @rootdev: the root or lower level device to transfer state from * @dev: the device to transfer operstate to * * Transfer operational state from root to device. This is normally * called when a stacking relationship exists between the root * device and the device(a leaf device). */ void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev) { if (rootdev->operstate == IF_OPER_DORMANT) netif_dormant_on(dev); else netif_dormant_off(dev); if (netif_carrier_ok(rootdev)) { if (!netif_carrier_ok(dev)) netif_carrier_on(dev); } else { if (netif_carrier_ok(dev)) netif_carrier_off(dev); } } EXPORT_SYMBOL(netif_stacked_transfer_operstate); #ifdef CONFIG_RPS static int netif_alloc_rx_queues(struct net_device *dev) { unsigned int i, count = dev->num_rx_queues; struct netdev_rx_queue *rx; BUG_ON(count < 1); rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); if (!rx) { pr_err("netdev: Unable to allocate %u rx queues.\n", count); return -ENOMEM; } dev->_rx = rx; for (i = 0; i < count; i++) rx[i].dev = dev; return 0; } #endif static void netdev_init_one_queue(struct net_device *dev, struct netdev_queue *queue, void *_unused) { /* Initialize queue lock */ spin_lock_init(&queue->_xmit_lock); netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); queue->xmit_lock_owner = -1; netdev_queue_numa_node_write(queue, NUMA_NO_NODE); queue->dev = dev; } static int netif_alloc_netdev_queues(struct net_device *dev) { unsigned int count = dev->num_tx_queues; struct netdev_queue *tx; BUG_ON(count < 1); tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); if (!tx) { pr_err("netdev: Unable to allocate %u tx queues.\n", count); return -ENOMEM; } dev->_tx = tx; netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); spin_lock_init(&dev->tx_global_lock); return 0; } /** * register_netdevice - register a network device * @dev: device to register * * Take a completed network device structure and add it to the kernel * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier * chain. 0 is returned on success. A negative errno code is returned * on a failure to set up the device, or if the name is a duplicate. * * Callers must hold the rtnl semaphore. You may want * register_netdev() instead of this. * * BUGS: * The locking appears insufficient to guarantee two parallel registers * will not get the same name. */ int register_netdevice(struct net_device *dev) { int ret; struct net *net = dev_net(dev); BUG_ON(dev_boot_phase); ASSERT_RTNL(); might_sleep(); /* When net_device's are persistent, this will be fatal. */ BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); BUG_ON(!net); spin_lock_init(&dev->addr_list_lock); netdev_set_addr_lockdep_class(dev); dev->iflink = -1; /* Init, if this function is available */ if (dev->netdev_ops->ndo_init) { ret = dev->netdev_ops->ndo_init(dev); if (ret) { if (ret > 0) ret = -EIO; goto out; } } ret = dev_get_valid_name(dev, dev->name, 0); if (ret) goto err_uninit; dev->ifindex = dev_new_index(net); if (dev->iflink == -1) dev->iflink = dev->ifindex; /* Fix illegal checksum combinations */ if ((dev->features & NETIF_F_HW_CSUM) && (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n", dev->name); dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); } if ((dev->features & NETIF_F_NO_CSUM) && (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n", dev->name); dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); } dev->features = netdev_fix_features(dev->features, dev->name); /* Enable software GSO if SG is supported. */ if (dev->features & NETIF_F_SG) dev->features |= NETIF_F_GSO; /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, * vlan_dev_init() will do the dev->features check, so these features * are enabled only if supported by underlying device. */ dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA); ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); ret = notifier_to_errno(ret); if (ret) goto err_uninit; ret = netdev_register_kobject(dev); if (ret) goto err_uninit; dev->reg_state = NETREG_REGISTERED; /* * Default initial state at registry is that the * device is present. */ set_bit(__LINK_STATE_PRESENT, &dev->state); dev_init_scheduler(dev); dev_hold(dev); list_netdevice(dev); /* Notify protocols, that a new device appeared. */ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); ret = notifier_to_errno(ret); if (ret) { rollback_registered(dev); dev->reg_state = NETREG_UNREGISTERED; } /* * Prevent userspace races by waiting until the network * device is fully setup before sending notifications. */ if (!dev->rtnl_link_ops || dev->rtnl_link_state == RTNL_LINK_INITIALIZED) rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); out: return ret; err_uninit: if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); goto out; } EXPORT_SYMBOL(register_netdevice); /** * init_dummy_netdev - init a dummy network device for NAPI * @dev: device to init * * This takes a network device structure and initialize the minimum * amount of fields so it can be used to schedule NAPI polls without * registering a full blown interface. This is to be used by drivers * that need to tie several hardware interfaces to a single NAPI * poll scheduler due to HW limitations. */ int init_dummy_netdev(struct net_device *dev) { /* Clear everything. Note we don't initialize spinlocks * are they aren't supposed to be taken by any of the * NAPI code and this dummy netdev is supposed to be * only ever used for NAPI polls */ memset(dev, 0, sizeof(struct net_device)); /* make sure we BUG if trying to hit standard * register/unregister code path */ dev->reg_state = NETREG_DUMMY; /* NAPI wants this */ INIT_LIST_HEAD(&dev->napi_list); /* a dummy interface is started by default */ set_bit(__LINK_STATE_PRESENT, &dev->state); set_bit(__LINK_STATE_START, &dev->state); /* Note : We dont allocate pcpu_refcnt for dummy devices, * because users of this 'device' dont need to change * its refcount. */ return 0; } EXPORT_SYMBOL_GPL(init_dummy_netdev); /** * register_netdev - register a network device * @dev: device to register * * Take a completed network device structure and add it to the kernel * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier * chain. 0 is returned on success. A negative errno code is returned * on a failure to set up the device, or if the name is a duplicate. * * This is a wrapper around register_netdevice that takes the rtnl semaphore * and expands the device name if you passed a format string to * alloc_netdev. */ int register_netdev(struct net_device *dev) { int err; rtnl_lock(); /* * If the name is a format string the caller wants us to do a * name allocation. */ if (strchr(dev->name, '%')) { err = dev_alloc_name(dev, dev->name); if (err < 0) goto out; } err = register_netdevice(dev); out: rtnl_unlock(); return err; } EXPORT_SYMBOL(register_netdev); int netdev_refcnt_read(const struct net_device *dev) { int i, refcnt = 0; for_each_possible_cpu(i) refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); return refcnt; } EXPORT_SYMBOL(netdev_refcnt_read); /* * netdev_wait_allrefs - wait until all references are gone. * * This is called when unregistering network devices. * * Any protocol or device that holds a reference should register * for netdevice notification, and cleanup and put back the * reference if they receive an UNREGISTER event. * We can get stuck here if buggy protocols don't correctly * call dev_put. */ static void netdev_wait_allrefs(struct net_device *dev) { unsigned long rebroadcast_time, warning_time; int refcnt; linkwatch_forget_dev(dev); rebroadcast_time = warning_time = jiffies; refcnt = netdev_refcnt_read(dev); while (refcnt != 0) { if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { rtnl_lock(); /* Rebroadcast unregister notification */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users * should have already handle it the first time */ if (test_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { /* We must not have linkwatch events * pending on unregister. If this * happens, we simply run the queue * unscheduled, resulting in a noop * for this device. */ linkwatch_run_queue(); } __rtnl_unlock(); rebroadcast_time = jiffies; } msleep(250); refcnt = netdev_refcnt_read(dev); if (time_after(jiffies, warning_time + 10 * HZ)) { printk(KERN_EMERG "unregister_netdevice: " "waiting for %s to become free. Usage " "count = %d\n", dev->name, refcnt); warning_time = jiffies; } } } /* The sequence is: * * rtnl_lock(); * ... * register_netdevice(x1); * register_netdevice(x2); * ... * unregister_netdevice(y1); * unregister_netdevice(y2); * ... * rtnl_unlock(); * free_netdev(y1); * free_netdev(y2); * * We are invoked by rtnl_unlock(). * This allows us to deal with problems: * 1) We can delete sysfs objects which invoke hotplug * without deadlocking with linkwatch via keventd. * 2) Since we run with the RTNL semaphore not held, we can sleep * safely in order to wait for the netdev refcnt to drop to zero. * * We must not return until all unregister events added during * the interval the lock was held have been completed. */ void netdev_run_todo(void) { struct list_head list; /* Snapshot list, allow later requests */ list_replace_init(&net_todo_list, &list); __rtnl_unlock(); while (!list_empty(&list)) { struct net_device *dev = list_first_entry(&list, struct net_device, todo_list); list_del(&dev->todo_list); if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { printk(KERN_ERR "network todo '%s' but state %d\n", dev->name, dev->reg_state); dump_stack(); continue; } dev->reg_state = NETREG_UNREGISTERED; on_each_cpu(flush_backlog, dev, 1); netdev_wait_allrefs(dev); /* paranoia */ BUG_ON(netdev_refcnt_read(dev)); WARN_ON(rcu_dereference_raw(dev->ip_ptr)); WARN_ON(rcu_dereference_raw(dev->ip6_ptr)); WARN_ON(dev->dn_ptr); if (dev->destructor) dev->destructor(dev); /* Free network device */ kobject_put(&dev->dev.kobj); } } /* Convert net_device_stats to rtnl_link_stats64. They have the same * fields in the same order, with only the type differing. */ static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, const struct net_device_stats *netdev_stats) { #if BITS_PER_LONG == 64 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); memcpy(stats64, netdev_stats, sizeof(*stats64)); #else size_t i, n = sizeof(*stats64) / sizeof(u64); const unsigned long *src = (const unsigned long *)netdev_stats; u64 *dst = (u64 *)stats64; BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != sizeof(*stats64) / sizeof(u64)); for (i = 0; i < n; i++) dst[i] = src[i]; #endif } /** * dev_get_stats - get network device statistics * @dev: device to get statistics from * @storage: place to store stats * * Get network statistics from device. Return @storage. * The device driver may provide its own method by setting * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; * otherwise the internal statistics structure is used. */ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, struct rtnl_link_stats64 *storage) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_get_stats64) { memset(storage, 0, sizeof(*storage)); ops->ndo_get_stats64(dev, storage); } else if (ops->ndo_get_stats) { netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); } else { netdev_stats_to_stats64(storage, &dev->stats); } storage->rx_dropped += atomic_long_read(&dev->rx_dropped); return storage; } EXPORT_SYMBOL(dev_get_stats); struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) { struct netdev_queue *queue = dev_ingress_queue(dev); #ifdef CONFIG_NET_CLS_ACT if (queue) return queue; queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) return NULL; netdev_init_one_queue(dev, queue, NULL); queue->qdisc = &noop_qdisc; queue->qdisc_sleeping = &noop_qdisc; rcu_assign_pointer(dev->ingress_queue, queue); #endif return queue; } /** * alloc_netdev_mqs - allocate network device * @sizeof_priv: size of private data to allocate space for * @name: device name format string * @setup: callback to initialize device * @txqs: the number of TX subqueues to allocate * @rxqs: the number of RX subqueues to allocate * * Allocates a struct net_device with private data area for driver use * and performs basic initialization. Also allocates subquue structs * for each queue on the device. */ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, void (*setup)(struct net_device *), unsigned int txqs, unsigned int rxqs) { struct net_device *dev; size_t alloc_size; struct net_device *p; BUG_ON(strlen(name) >= sizeof(dev->name)); if (txqs < 1) { pr_err("alloc_netdev: Unable to allocate device " "with zero queues.\n"); return NULL; } #ifdef CONFIG_RPS if (rxqs < 1) { pr_err("alloc_netdev: Unable to allocate device " "with zero RX queues.\n"); return NULL; } #endif alloc_size = sizeof(struct net_device); if (sizeof_priv) { /* ensure 32-byte alignment of private area */ alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); alloc_size += sizeof_priv; } /* ensure 32-byte alignment of whole construct */ alloc_size += NETDEV_ALIGN - 1; p = kzalloc(alloc_size, GFP_KERNEL); if (!p) { printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); return NULL; } dev = PTR_ALIGN(p, NETDEV_ALIGN); dev->padded = (char *)dev - (char *)p; dev->pcpu_refcnt = alloc_percpu(int); if (!dev->pcpu_refcnt) goto free_p; if (dev_addr_init(dev)) goto free_pcpu; dev_mc_init(dev); dev_uc_init(dev); dev_net_set(dev, &init_net); dev->gso_max_size = GSO_MAX_SIZE; INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); dev->ethtool_ntuple_list.count = 0; INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->unreg_list); INIT_LIST_HEAD(&dev->link_watch_list); dev->priv_flags = IFF_XMIT_DST_RELEASE; setup(dev); dev->num_tx_queues = txqs; dev->real_num_tx_queues = txqs; if (netif_alloc_netdev_queues(dev)) goto free_all; #ifdef CONFIG_RPS dev->num_rx_queues = rxqs; dev->real_num_rx_queues = rxqs; if (netif_alloc_rx_queues(dev)) goto free_all; #endif strcpy(dev->name, name); return dev; free_all: free_netdev(dev); return NULL; free_pcpu: free_percpu(dev->pcpu_refcnt); kfree(dev->_tx); #ifdef CONFIG_RPS kfree(dev->_rx); #endif free_p: kfree(p); return NULL; } EXPORT_SYMBOL(alloc_netdev_mqs); /** * free_netdev - free network device * @dev: device * * This function does the last stage of destroying an allocated device * interface. The reference to the device object is released. * If this is the last reference then it will be freed. */ void free_netdev(struct net_device *dev) { struct napi_struct *p, *n; release_net(dev_net(dev)); kfree(dev->_tx); #ifdef CONFIG_RPS kfree(dev->_rx); #endif kfree(rcu_dereference_raw(dev->ingress_queue)); /* Flush device addresses */ dev_addr_flush(dev); /* Clear ethtool n-tuple list */ ethtool_ntuple_flush(dev); list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) netif_napi_del(p); free_percpu(dev->pcpu_refcnt); dev->pcpu_refcnt = NULL; /* Compatibility with error handling in drivers */ if (dev->reg_state == NETREG_UNINITIALIZED) { kfree((char *)dev - dev->padded); return; } BUG_ON(dev->reg_state != NETREG_UNREGISTERED); dev->reg_state = NETREG_RELEASED; /* will free via device release */ put_device(&dev->dev); } EXPORT_SYMBOL(free_netdev); /** * synchronize_net - Synchronize with packet receive processing * * Wait for packets currently being received to be done. * Does not block later packets from starting. */ void synchronize_net(void) { might_sleep(); synchronize_rcu(); } EXPORT_SYMBOL(synchronize_net); /** * unregister_netdevice_queue - remove device from the kernel * @dev: device * @head: list * * This function shuts down a device interface and removes it * from the kernel tables. * If head not NULL, device is queued to be unregistered later. * * Callers must hold the rtnl semaphore. You may want * unregister_netdev() instead of this. */ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) { ASSERT_RTNL(); if (head) { list_move_tail(&dev->unreg_list, head); } else { rollback_registered(dev); /* Finish processing unregister after unlock */ net_set_todo(dev); } } EXPORT_SYMBOL(unregister_netdevice_queue); /** * unregister_netdevice_many - unregister many devices * @head: list of devices */ void unregister_netdevice_many(struct list_head *head) { struct net_device *dev; if (!list_empty(head)) { rollback_registered_many(head); list_for_each_entry(dev, head, unreg_list) net_set_todo(dev); } } EXPORT_SYMBOL(unregister_netdevice_many); /** * unregister_netdev - remove device from the kernel * @dev: device * * This function shuts down a device interface and removes it * from the kernel tables. * * This is just a wrapper for unregister_netdevice that takes * the rtnl semaphore. In general you want to use this and not * unregister_netdevice. */ void unregister_netdev(struct net_device *dev) { rtnl_lock(); unregister_netdevice(dev); rtnl_unlock(); } EXPORT_SYMBOL(unregister_netdev); /** * dev_change_net_namespace - move device to different nethost namespace * @dev: device * @net: network namespace * @pat: If not NULL name pattern to try if the current device name * is already taken in the destination network namespace. * * This function shuts down a device interface and moves it * to a new network namespace. On success 0 is returned, on * a failure a netagive errno code is returned. * * Callers must hold the rtnl semaphore. */ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) { int err; ASSERT_RTNL(); /* Don't allow namespace local devices to be moved. */ err = -EINVAL; if (dev->features & NETIF_F_NETNS_LOCAL) goto out; /* Ensure the device has been registrered */ err = -EINVAL; if (dev->reg_state != NETREG_REGISTERED) goto out; /* Get out if there is nothing todo */ err = 0; if (net_eq(dev_net(dev), net)) goto out; /* Pick the destination device name, and ensure * we can use it in the destination network namespace. */ err = -EEXIST; if (__dev_get_by_name(net, dev->name)) { /* We get here if we can't use the current device name */ if (!pat) goto out; if (dev_get_valid_name(dev, pat, 1)) goto out; } /* * And now a mini version of register_netdevice unregister_netdevice. */ /* If device is running close it first. */ dev_close(dev); /* And unlink it from device chain */ err = -ENODEV; unlist_netdevice(dev); synchronize_net(); /* Shutdown queueing discipline. */ dev_shutdown(dev); /* Notify protocols, that we are about to destroy this device. They should clean all the things. Note that dev->reg_state stays at NETREG_REGISTERED. This is wanted because this way 8021q and macvlan know the device is just moving and can keep their slaves up. */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); /* * Flush the unicast and multicast chains */ dev_uc_flush(dev); dev_mc_flush(dev); /* Actually switch the network namespace */ dev_net_set(dev, net); /* If there is an ifindex conflict assign a new one */ if (__dev_get_by_index(net, dev->ifindex)) { int iflink = (dev->iflink == dev->ifindex); dev->ifindex = dev_new_index(net); if (iflink) dev->iflink = dev->ifindex; } /* Fixup kobjects */ err = device_rename(&dev->dev, dev->name); WARN_ON(err); /* Add the device back in the hashes */ list_netdevice(dev); /* Notify protocols, that a new device appeared. */ call_netdevice_notifiers(NETDEV_REGISTER, dev); /* * Prevent userspace races by waiting until the network * device is fully setup before sending notifications. */ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); synchronize_net(); err = 0; out: return err; } EXPORT_SYMBOL_GPL(dev_change_net_namespace); static int dev_cpu_callback(struct notifier_block *nfb, unsigned long action, void *ocpu) { struct sk_buff **list_skb; struct sk_buff *skb; unsigned int cpu, oldcpu = (unsigned long)ocpu; struct softnet_data *sd, *oldsd; if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) return NOTIFY_OK; local_irq_disable(); cpu = smp_processor_id(); sd = &per_cpu(softnet_data, cpu); oldsd = &per_cpu(softnet_data, oldcpu); /* Find end of our completion_queue. */ list_skb = &sd->completion_queue; while (*list_skb) list_skb = &(*list_skb)->next; /* Append completion queue from offline CPU. */ *list_skb = oldsd->completion_queue; oldsd->completion_queue = NULL; /* Append output queue from offline CPU. */ if (oldsd->output_queue) { *sd->output_queue_tailp = oldsd->output_queue; sd->output_queue_tailp = oldsd->output_queue_tailp; oldsd->output_queue = NULL; oldsd->output_queue_tailp = &oldsd->output_queue; } raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) { netif_rx(skb); input_queue_head_incr(oldsd); } while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx(skb); input_queue_head_incr(oldsd); } return NOTIFY_OK; } /** * netdev_increment_features - increment feature set by one * @all: current feature set * @one: new feature set * @mask: mask feature set * * Computes a new feature set after adding a device with feature set * @one to the master device with current feature set @all. Will not * enable anything that is off in @mask. Returns the new feature set. */ unsigned long netdev_increment_features(unsigned long all, unsigned long one, unsigned long mask) { /* If device needs checksumming, downgrade to it. */ if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); else if (mask & NETIF_F_ALL_CSUM) { /* If one device supports v4/v6 checksumming, set for all. */ if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) && !(all & NETIF_F_GEN_CSUM)) { all &= ~NETIF_F_ALL_CSUM; all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); } /* If one device supports hw checksumming, set for all. */ if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) { all &= ~NETIF_F_ALL_CSUM; all |= NETIF_F_HW_CSUM; } } one |= NETIF_F_ALL_CSUM; one |= all & NETIF_F_ONE_FOR_ALL; all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; all |= one & mask & NETIF_F_ONE_FOR_ALL; return all; } EXPORT_SYMBOL(netdev_increment_features); static struct hlist_head *netdev_create_hash(void) { int i; struct hlist_head *hash; hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); if (hash != NULL) for (i = 0; i < NETDEV_HASHENTRIES; i++) INIT_HLIST_HEAD(&hash[i]); return hash; } /* Initialize per network namespace state */ static int __net_init netdev_init(struct net *net) { INIT_LIST_HEAD(&net->dev_base_head); net->dev_name_head = netdev_create_hash(); if (net->dev_name_head == NULL) goto err_name; net->dev_index_head = netdev_create_hash(); if (net->dev_index_head == NULL) goto err_idx; return 0; err_idx: kfree(net->dev_name_head); err_name: return -ENOMEM; } /** * netdev_drivername - network driver for the device * @dev: network device * @buffer: buffer for resulting name * @len: size of buffer * * Determine network driver for device. */ char *netdev_drivername(const struct net_device *dev, char *buffer, int len) { const struct device_driver *driver; const struct device *parent; if (len <= 0 || !buffer) return buffer; buffer[0] = 0; parent = dev->dev.parent; if (!parent) return buffer; driver = parent->driver; if (driver && driver->name) strlcpy(buffer, driver->name, len); return buffer; } static int __netdev_printk(const char *level, const struct net_device *dev, struct va_format *vaf) { int r; if (dev && dev->dev.parent) r = dev_printk(level, dev->dev.parent, "%s: %pV", netdev_name(dev), vaf); else if (dev) r = printk("%s%s: %pV", level, netdev_name(dev), vaf); else r = printk("%s(NULL net_device): %pV", level, vaf); return r; } int netdev_printk(const char *level, const struct net_device *dev, const char *format, ...) { struct va_format vaf; va_list args; int r; va_start(args, format); vaf.fmt = format; vaf.va = &args; r = __netdev_printk(level, dev, &vaf); va_end(args); return r; } EXPORT_SYMBOL(netdev_printk); #define define_netdev_printk_level(func, level) \ int func(const struct net_device *dev, const char *fmt, ...) \ { \ int r; \ struct va_format vaf; \ va_list args; \ \ va_start(args, fmt); \ \ vaf.fmt = fmt; \ vaf.va = &args; \ \ r = __netdev_printk(level, dev, &vaf); \ va_end(args); \ \ return r; \ } \ EXPORT_SYMBOL(func); define_netdev_printk_level(netdev_emerg, KERN_EMERG); define_netdev_printk_level(netdev_alert, KERN_ALERT); define_netdev_printk_level(netdev_crit, KERN_CRIT); define_netdev_printk_level(netdev_err, KERN_ERR); define_netdev_printk_level(netdev_warn, KERN_WARNING); define_netdev_printk_level(netdev_notice, KERN_NOTICE); define_netdev_printk_level(netdev_info, KERN_INFO); static void __net_exit netdev_exit(struct net *net) { kfree(net->dev_name_head); kfree(net->dev_index_head); } static struct pernet_operations __net_initdata netdev_net_ops = { .init = netdev_init, .exit = netdev_exit, }; static void __net_exit default_device_exit(struct net *net) { struct net_device *dev, *aux; /* * Push all migratable network devices back to the * initial network namespace */ rtnl_lock(); for_each_netdev_safe(net, dev, aux) { int err; char fb_name[IFNAMSIZ]; /* Ignore unmoveable devices (i.e. loopback) */ if (dev->features & NETIF_F_NETNS_LOCAL) continue; /* Leave virtual devices for the generic cleanup */ if (dev->rtnl_link_ops) continue; /* Push remaing network devices to init_net */ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); err = dev_change_net_namespace(dev, &init_net, fb_name); if (err) { printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", __func__, dev->name, err); BUG(); } } rtnl_unlock(); } static void __net_exit default_device_exit_batch(struct list_head *net_list) { /* At exit all network devices most be removed from a network * namespace. Do this in the reverse order of registration. * Do this across as many network namespaces as possible to * improve batching efficiency. */ struct net_device *dev; struct net *net; LIST_HEAD(dev_kill_list); rtnl_lock(); list_for_each_entry(net, net_list, exit_list) { for_each_netdev_reverse(net, dev) { if (dev->rtnl_link_ops) dev->rtnl_link_ops->dellink(dev, &dev_kill_list); else unregister_netdevice_queue(dev, &dev_kill_list); } } unregister_netdevice_many(&dev_kill_list); list_del(&dev_kill_list); rtnl_unlock(); } static struct pernet_operations __net_initdata default_device_ops = { .exit = default_device_exit, .exit_batch = default_device_exit_batch, }; /* * Initialize the DEV module. At boot time this walks the device list and * unhooks any devices that fail to initialise (normally hardware not * present) and leaves us with a valid list of present and active devices. * */ /* * This is called single threaded during boot, so no need * to take the rtnl semaphore. */ static int __init net_dev_init(void) { int i, rc = -ENOMEM; BUG_ON(!dev_boot_phase); if (dev_proc_init()) goto out; if (netdev_kobject_init()) goto out; INIT_LIST_HEAD(&ptype_all); for (i = 0; i < PTYPE_HASH_SIZE; i++) INIT_LIST_HEAD(&ptype_base[i]); if (register_pernet_subsys(&netdev_net_ops)) goto out; /* * Initialise the packet receive queues. */ for_each_possible_cpu(i) { struct softnet_data *sd = &per_cpu(softnet_data, i); memset(sd, 0, sizeof(*sd)); skb_queue_head_init(&sd->input_pkt_queue); skb_queue_head_init(&sd->process_queue); sd->completion_queue = NULL; INIT_LIST_HEAD(&sd->poll_list); sd->output_queue = NULL; sd->output_queue_tailp = &sd->output_queue; #ifdef CONFIG_RPS sd->csd.func = rps_trigger_softirq; sd->csd.info = sd; sd->csd.flags = 0; sd->cpu = i; #endif sd->backlog.poll = process_backlog; sd->backlog.weight = weight_p; sd->backlog.gro_list = NULL; sd->backlog.gro_count = 0; } dev_boot_phase = 0; /* The loopback device is special if any other network devices * is present in a network namespace the loopback device must * be present. Since we now dynamically allocate and free the * loopback device ensure this invariant is maintained by * keeping the loopback device as the first device on the * list of network devices. Ensuring the loopback devices * is the first device that appears and the last network device * that disappears. */ if (register_pernet_device(&loopback_net_ops)) goto out; if (register_pernet_device(&default_device_ops)) goto out; open_softirq(NET_TX_SOFTIRQ, net_tx_action); open_softirq(NET_RX_SOFTIRQ, net_rx_action); hotcpu_notifier(dev_cpu_callback, 0); dst_init(); dev_mcast_init(); rc = 0; out: return rc; } subsys_initcall(net_dev_init); static int __init initialize_hashrnd(void) { get_random_bytes(&hashrnd, sizeof(hashrnd)); return 0; } late_initcall_sync(initialize_hashrnd);
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3438_1
crossvul-cpp_data_good_3591_0
/* * fs/cifs/dir.c * * vfs operations that deal with dentries * * Copyright (C) International Business Machines Corp., 2002,2009 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/file.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" static void renew_parental_timestamps(struct dentry *direntry) { /* BB check if there is a way to get the kernel to do this or if we really need this */ do { direntry->d_time = jiffies; direntry = direntry->d_parent; } while (!IS_ROOT(direntry)); } /* Note: caller must free return buffer */ char * build_path_from_dentry(struct dentry *direntry) { struct dentry *temp; int namelen; int dfsplen; char *full_path; char dirsep; struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); unsigned seq; dirsep = CIFS_DIR_SEP(cifs_sb); if (tcon->Flags & SMB_SHARE_IS_IN_DFS) dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); else dfsplen = 0; cifs_bp_rename_retry: namelen = dfsplen; seq = read_seqbegin(&rename_lock); rcu_read_lock(); for (temp = direntry; !IS_ROOT(temp);) { namelen += (1 + temp->d_name.len); temp = temp->d_parent; if (temp == NULL) { cERROR(1, "corrupt dentry"); rcu_read_unlock(); return NULL; } } rcu_read_unlock(); full_path = kmalloc(namelen+1, GFP_KERNEL); if (full_path == NULL) return full_path; full_path[namelen] = 0; /* trailing null */ rcu_read_lock(); for (temp = direntry; !IS_ROOT(temp);) { spin_lock(&temp->d_lock); namelen -= 1 + temp->d_name.len; if (namelen < 0) { spin_unlock(&temp->d_lock); break; } else { full_path[namelen] = dirsep; strncpy(full_path + namelen + 1, temp->d_name.name, temp->d_name.len); cFYI(0, "name: %s", full_path + namelen); } spin_unlock(&temp->d_lock); temp = temp->d_parent; if (temp == NULL) { cERROR(1, "corrupt dentry"); rcu_read_unlock(); kfree(full_path); return NULL; } } rcu_read_unlock(); if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { cFYI(1, "did not end path lookup where expected. namelen=%d " "dfsplen=%d", namelen, dfsplen); /* presumably this is only possible if racing with a rename of one of the parent directories (we can not lock the dentries above us to prevent this, but retrying should be harmless) */ kfree(full_path); goto cifs_bp_rename_retry; } /* DIR_SEP already set for byte 0 / vs \ but not for subsequent slashes in prepath which currently must be entered the right way - not sure if there is an alternative since the '\' is a valid posix character so we can not switch those safely to '/' if any are found in the middle of the prepath */ /* BB test paths to Windows with '/' in the midst of prepath */ if (dfsplen) { strncpy(full_path, tcon->treeName, dfsplen); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { int i; for (i = 0; i < dfsplen; i++) { if (full_path[i] == '\\') full_path[i] = '/'; } } } return full_path; } /* Inode operations in similar order to how they appear in Linux file fs.h */ int cifs_create(struct inode *inode, struct dentry *direntry, int mode, struct nameidata *nd) { int rc = -ENOENT; int xid; int create_options = CREATE_NOT_DIR; __u32 oplock = 0; int oflags; /* * BB below access is probably too much for mknod to request * but we have to do query and setpathinfo so requesting * less could fail (unless we want to request getatr and setatr * permissions (only). At least for POSIX we do not have to * request so much. */ int desiredAccess = GENERIC_READ | GENERIC_WRITE; __u16 fileHandle; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; char *full_path = NULL; FILE_ALL_INFO *buf = NULL; struct inode *newinode = NULL; int disposition = FILE_OVERWRITE_IF; xid = GetXid(); cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { FreeXid(xid); return PTR_ERR(tlink); } tcon = tlink_tcon(tlink); if (enable_oplocks) oplock = REQ_OPLOCK; if (nd) oflags = nd->intent.open.file->f_flags; else oflags = O_RDONLY | O_CREAT; full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; goto cifs_create_out; } if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = cifs_posix_open(full_path, &newinode, inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); /* EIO could indicate that (posix open) operation is not supported, despite what server claimed in capability negotiation. EREMOTE indicates DFS junction, which is not handled in posix open */ if (rc == 0) { if (newinode == NULL) /* query inode info */ goto cifs_create_get_file_info; else /* success, no need to query */ goto cifs_create_set_dentry; } else if ((rc != -EIO) && (rc != -EREMOTE) && (rc != -EOPNOTSUPP) && (rc != -EINVAL)) goto cifs_create_out; /* else fallthrough to retry, using older open call, this is case where server does not support this SMB level, and falsely claims capability (also get here for DFS case which should be rare for path not covered on files) */ } if (nd) { /* if the file is going to stay open, then we need to set the desired access properly */ desiredAccess = 0; if (OPEN_FMODE(oflags) & FMODE_READ) desiredAccess |= GENERIC_READ; /* is this too little? */ if (OPEN_FMODE(oflags) & FMODE_WRITE) desiredAccess |= GENERIC_WRITE; if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) disposition = FILE_CREATE; else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) disposition = FILE_OVERWRITE_IF; else if ((oflags & O_CREAT) == O_CREAT) disposition = FILE_OPEN_IF; else cFYI(1, "Create flag not set in create function"); } /* BB add processing to set equivalent of mode - e.g. via CreateX with ACLs */ buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (buf == NULL) { rc = -ENOMEM; goto cifs_create_out; } /* * if we're not using unix extensions, see if we need to set * ATTR_READONLY on the create call */ if (!tcon->unix_ext && (mode & S_IWUGO) == 0) create_options |= CREATE_OPTION_READONLY; if (backup_cred(cifs_sb)) create_options |= CREATE_OPEN_BACKUP_INTENT; if (tcon->ses->capabilities & CAP_NT_SMBS) rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess, create_options, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); else rc = -EIO; /* no NT SMB support fall into legacy open below */ if (rc == -EIO) { /* old server, retry the open legacy style */ rc = SMBLegacyOpen(xid, tcon, full_path, disposition, desiredAccess, create_options, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } if (rc) { cFYI(1, "cifs_create returned 0x%x", rc); goto cifs_create_out; } /* If Open reported that we actually created a file then we now have to set the mode if possible */ if ((tcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) { struct cifs_unix_set_info_args args = { .mode = mode, .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = 0, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = (__u64) current_fsuid(); if (inode->i_mode & S_ISGID) args.gid = (__u64) inode->i_gid; else args.gid = (__u64) current_fsgid(); } else { args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } CIFSSMBUnixSetFileInfo(xid, tcon, &args, fileHandle, current->tgid); } else { /* BB implement mode setting via Windows security descriptors e.g. */ /* CIFSSMBWinSetPerms(xid,tcon,path,mode,-1,-1,nls);*/ /* Could set r/o dos attribute if mode & 0222 == 0 */ } cifs_create_get_file_info: /* server might mask mode so we have to query for it */ if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); else { rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, xid, &fileHandle); if (newinode) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) newinode->i_mode = mode; if ((oplock & CIFS_CREATE_ACTION) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) { newinode->i_uid = current_fsuid(); if (inode->i_mode & S_ISGID) newinode->i_gid = inode->i_gid; else newinode->i_gid = current_fsgid(); } } } cifs_create_set_dentry: if (rc == 0) d_instantiate(direntry, newinode); else cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); if (newinode && nd) { struct cifsFileInfo *pfile_info; struct file *filp; filp = lookup_instantiate_filp(nd, direntry, generic_file_open); if (IS_ERR(filp)) { rc = PTR_ERR(filp); CIFSSMBClose(xid, tcon, fileHandle); goto cifs_create_out; } pfile_info = cifs_new_fileinfo(fileHandle, filp, tlink, oplock); if (pfile_info == NULL) { fput(filp); CIFSSMBClose(xid, tcon, fileHandle); rc = -ENOMEM; } } else { CIFSSMBClose(xid, tcon, fileHandle); } cifs_create_out: kfree(buf); kfree(full_path); cifs_put_tlink(tlink); FreeXid(xid); return rc; } int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, dev_t device_number) { int rc = -EPERM; int xid; int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *pTcon; struct cifs_io_parms io_parms; char *full_path = NULL; struct inode *newinode = NULL; int oplock = 0; u16 fileHandle; FILE_ALL_INFO *buf = NULL; unsigned int bytes_written; struct win_dev *pdev; if (!old_valid_dev(device_number)) return -EINVAL; cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); pTcon = tlink_tcon(tlink); xid = GetXid(); full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; goto mknod_out; } if (pTcon->unix_ext) { struct cifs_unix_set_info_args args = { .mode = mode & ~current_umask(), .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = device_number, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = (__u64) current_fsuid(); args.gid = (__u64) current_fsgid(); } else { args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc) goto mknod_out; rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); if (rc == 0) d_instantiate(direntry, newinode); goto mknod_out; } if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) goto mknod_out; cFYI(1, "sfu compat create special file"); buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (buf == NULL) { kfree(full_path); rc = -ENOMEM; FreeXid(xid); return rc; } if (backup_cred(cifs_sb)) create_options |= CREATE_OPEN_BACKUP_INTENT; rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE, GENERIC_WRITE, create_options, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc) goto mknod_out; /* BB Do not bother to decode buf since no local inode yet to put * timestamps in, but we can reuse it safely */ pdev = (struct win_dev *)buf; io_parms.netfid = fileHandle; io_parms.pid = current->tgid; io_parms.tcon = pTcon; io_parms.offset = 0; io_parms.length = sizeof(struct win_dev); if (S_ISCHR(mode)) { memcpy(pdev->type, "IntxCHR", 8); pdev->major = cpu_to_le64(MAJOR(device_number)); pdev->minor = cpu_to_le64(MINOR(device_number)); rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, (char *)pdev, NULL, 0); } else if (S_ISBLK(mode)) { memcpy(pdev->type, "IntxBLK", 8); pdev->major = cpu_to_le64(MAJOR(device_number)); pdev->minor = cpu_to_le64(MINOR(device_number)); rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, (char *)pdev, NULL, 0); } /* else if (S_ISFIFO) */ CIFSSMBClose(xid, pTcon, fileHandle); d_drop(direntry); /* FIXME: add code here to set EAs */ mknod_out: kfree(full_path); kfree(buf); FreeXid(xid); cifs_put_tlink(tlink); return rc; } struct dentry * cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, struct nameidata *nd) { int xid; int rc = 0; /* to get around spurious gcc warning, set to zero here */ __u32 oplock = enable_oplocks ? REQ_OPLOCK : 0; __u16 fileHandle = 0; bool posix_open = false; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *pTcon; struct cifsFileInfo *cfile; struct inode *newInode = NULL; char *full_path = NULL; struct file *filp; xid = GetXid(); cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p", parent_dir_inode, direntry->d_name.name, direntry); /* check whether path exists */ cifs_sb = CIFS_SB(parent_dir_inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { FreeXid(xid); return (struct dentry *)tlink; } pTcon = tlink_tcon(tlink); /* * Don't allow the separator character in a path component. * The VFS will not allow "/", but "\" is allowed by posix. */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { int i; for (i = 0; i < direntry->d_name.len; i++) if (direntry->d_name.name[i] == '\\') { cFYI(1, "Invalid file name"); rc = -EINVAL; goto lookup_out; } } /* * O_EXCL: optimize away the lookup, but don't hash the dentry. Let * the VFS handle the create. */ if (nd && (nd->flags & LOOKUP_EXCL)) { d_instantiate(direntry, NULL); rc = 0; goto lookup_out; } /* can not grab the rename sem here since it would deadlock in the cases (beginning of sys_rename itself) in which we already have the sb rename sem */ full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; goto lookup_out; } if (direntry->d_inode != NULL) { cFYI(1, "non-NULL inode in lookup"); } else { cFYI(1, "NULL inode in lookup"); } cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode); /* Posix open is only called (at lookup time) for file create now. * For opens (rather than creates), because we do not know if it * is a file or directory yet, and current Samba no longer allows * us to do posix open on dirs, we could end up wasting an open call * on what turns out to be a dir. For file opens, we wait to call posix * open till cifs_open. It could be added here (lookup) in the future * but the performance tradeoff of the extra network request when EISDIR * or EACCES is returned would have to be weighed against the 50% * reduction in network traffic in the other paths. */ if (pTcon->unix_ext) { if (nd && !(nd->flags & LOOKUP_DIRECTORY) && (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && (nd->intent.open.file->f_flags & O_CREAT)) { rc = cifs_posix_open(full_path, &newInode, parent_dir_inode->i_sb, nd->intent.open.create_mode, nd->intent.open.file->f_flags, &oplock, &fileHandle, xid); /* * The check below works around a bug in POSIX * open in samba versions 3.3.1 and earlier where * open could incorrectly fail with invalid parameter. * If either that or op not supported returned, follow * the normal lookup. */ switch (rc) { case 0: /* * The server may allow us to open things like * FIFOs, but the client isn't set up to deal * with that. If it's not a regular file, just * close it and proceed as if it were a normal * lookup. */ if (newInode && !S_ISREG(newInode->i_mode)) { CIFSSMBClose(xid, pTcon, fileHandle); break; } case -ENOENT: posix_open = true; case -EOPNOTSUPP: break; default: pTcon->broken_posix_open = true; } } if (!posix_open) rc = cifs_get_inode_info_unix(&newInode, full_path, parent_dir_inode->i_sb, xid); } else rc = cifs_get_inode_info(&newInode, full_path, NULL, parent_dir_inode->i_sb, xid, NULL); if ((rc == 0) && (newInode != NULL)) { d_add(direntry, newInode); if (posix_open) { filp = lookup_instantiate_filp(nd, direntry, generic_file_open); if (IS_ERR(filp)) { rc = PTR_ERR(filp); CIFSSMBClose(xid, pTcon, fileHandle); goto lookup_out; } cfile = cifs_new_fileinfo(fileHandle, filp, tlink, oplock); if (cfile == NULL) { fput(filp); CIFSSMBClose(xid, pTcon, fileHandle); rc = -ENOMEM; goto lookup_out; } } /* since paths are not looked up by component - the parent directories are presumed to be good here */ renew_parental_timestamps(direntry); } else if (rc == -ENOENT) { rc = 0; direntry->d_time = jiffies; d_add(direntry, NULL); /* if it was once a directory (but how can we tell?) we could do shrink_dcache_parent(direntry); */ } else if (rc != -EACCES) { cERROR(1, "Unexpected lookup error %d", rc); /* We special case check for Access Denied - since that is a common return code */ } lookup_out: kfree(full_path); cifs_put_tlink(tlink); FreeXid(xid); return ERR_PTR(rc); } static int cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) { if (nd && (nd->flags & LOOKUP_RCU)) return -ECHILD; if (direntry->d_inode) { if (cifs_revalidate_dentry(direntry)) return 0; else { /* * Forcibly invalidate automounting directory inodes * (remote DFS directories) so to have them * instantiated again for automount */ if (IS_AUTOMOUNT(direntry->d_inode)) return 0; return 1; } } /* * This may be nfsd (or something), anyway, we can't see the * intent of this. So, since this can be for creation, drop it. */ if (!nd) return 0; /* * Drop the negative dentry, in order to make sure to use the * case sensitive name which is specified by user if this is * for creation. */ if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled) return 0; return 1; } /* static int cifs_d_delete(struct dentry *direntry) { int rc = 0; cFYI(1, "In cifs d_delete, name = %s", direntry->d_name.name); return rc; } */ const struct dentry_operations cifs_dentry_ops = { .d_revalidate = cifs_d_revalidate, .d_automount = cifs_dfs_d_automount, /* d_delete: cifs_d_delete, */ /* not needed except for debugging */ }; static int cifs_ci_hash(const struct dentry *dentry, const struct inode *inode, struct qstr *q) { struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls; unsigned long hash; int i; hash = init_name_hash(); for (i = 0; i < q->len; i++) hash = partial_name_hash(nls_tolower(codepage, q->name[i]), hash); q->hash = end_name_hash(hash); return 0; } static int cifs_ci_compare(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { struct nls_table *codepage = CIFS_SB(pinode->i_sb)->local_nls; if ((name->len == len) && (nls_strnicmp(codepage, name->name, str, len) == 0)) return 0; return 1; } const struct dentry_operations cifs_ci_dentry_ops = { .d_revalidate = cifs_d_revalidate, .d_hash = cifs_ci_hash, .d_compare = cifs_ci_compare, .d_automount = cifs_dfs_d_automount, };
./CrossVul/dataset_final_sorted/CWE-264/c/good_3591_0
crossvul-cpp_data_bad_5605_1
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/export.h> #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/user_namespace.h> #include <linux/proc_fs.h> #include <linux/highuid.h> #include <linux/cred.h> #include <linux/securebits.h> #include <linux/keyctl.h> #include <linux/key-type.h> #include <keys/user-type.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/ctype.h> #include <linux/projid.h> static struct kmem_cache *user_ns_cachep __read_mostly; static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, struct uid_gid_map *map); static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns) { /* Start with the same capabilities as init but useless for doing * anything as the capabilities are bound to the new user namespace. */ cred->securebits = SECUREBITS_DEFAULT; cred->cap_inheritable = CAP_EMPTY_SET; cred->cap_permitted = CAP_FULL_SET; cred->cap_effective = CAP_FULL_SET; cred->cap_bset = CAP_FULL_SET; #ifdef CONFIG_KEYS key_put(cred->request_key_auth); cred->request_key_auth = NULL; #endif /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */ cred->user_ns = user_ns; } /* * Create a new user namespace, deriving the creator from the user in the * passed credentials, and replacing that user with the new root user for the * new namespace. * * This is called by copy_creds(), which will finish setting the target task's * credentials. */ int create_user_ns(struct cred *new) { struct user_namespace *ns, *parent_ns = new->user_ns; kuid_t owner = new->euid; kgid_t group = new->egid; int ret; /* The creator needs a mapping in the parent user namespace * or else we won't be able to reasonably tell userspace who * created a user_namespace. */ if (!kuid_has_mapping(parent_ns, owner) || !kgid_has_mapping(parent_ns, group)) return -EPERM; ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL); if (!ns) return -ENOMEM; ret = proc_alloc_inum(&ns->proc_inum); if (ret) { kmem_cache_free(user_ns_cachep, ns); return ret; } atomic_set(&ns->count, 1); /* Leave the new->user_ns reference with the new user namespace. */ ns->parent = parent_ns; ns->owner = owner; ns->group = group; set_cred_user_ns(new, ns); return 0; } int unshare_userns(unsigned long unshare_flags, struct cred **new_cred) { struct cred *cred; if (!(unshare_flags & CLONE_NEWUSER)) return 0; cred = prepare_creds(); if (!cred) return -ENOMEM; *new_cred = cred; return create_user_ns(cred); } void free_user_ns(struct user_namespace *ns) { struct user_namespace *parent; do { parent = ns->parent; proc_free_inum(ns->proc_inum); kmem_cache_free(user_ns_cachep, ns); ns = parent; } while (atomic_dec_and_test(&parent->count)); } EXPORT_SYMBOL(free_user_ns); static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) { unsigned idx, extents; u32 first, last, id2; id2 = id + count - 1; /* Find the matching extent */ extents = map->nr_extents; smp_read_barrier_depends(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].first; last = first + map->extent[idx].count - 1; if (id >= first && id <= last && (id2 >= first && id2 <= last)) break; } /* Map the id or note failure */ if (idx < extents) id = (id - first) + map->extent[idx].lower_first; else id = (u32) -1; return id; } static u32 map_id_down(struct uid_gid_map *map, u32 id) { unsigned idx, extents; u32 first, last; /* Find the matching extent */ extents = map->nr_extents; smp_read_barrier_depends(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].first; last = first + map->extent[idx].count - 1; if (id >= first && id <= last) break; } /* Map the id or note failure */ if (idx < extents) id = (id - first) + map->extent[idx].lower_first; else id = (u32) -1; return id; } static u32 map_id_up(struct uid_gid_map *map, u32 id) { unsigned idx, extents; u32 first, last; /* Find the matching extent */ extents = map->nr_extents; smp_read_barrier_depends(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].lower_first; last = first + map->extent[idx].count - 1; if (id >= first && id <= last) break; } /* Map the id or note failure */ if (idx < extents) id = (id - first) + map->extent[idx].first; else id = (u32) -1; return id; } /** * make_kuid - Map a user-namespace uid pair into a kuid. * @ns: User namespace that the uid is in * @uid: User identifier * * Maps a user-namespace uid pair into a kernel internal kuid, * and returns that kuid. * * When there is no mapping defined for the user-namespace uid * pair INVALID_UID is returned. Callers are expected to test * for and handle handle INVALID_UID being returned. INVALID_UID * may be tested for using uid_valid(). */ kuid_t make_kuid(struct user_namespace *ns, uid_t uid) { /* Map the uid to a global kernel uid */ return KUIDT_INIT(map_id_down(&ns->uid_map, uid)); } EXPORT_SYMBOL(make_kuid); /** * from_kuid - Create a uid from a kuid user-namespace pair. * @targ: The user namespace we want a uid in. * @kuid: The kernel internal uid to start with. * * Map @kuid into the user-namespace specified by @targ and * return the resulting uid. * * There is always a mapping into the initial user_namespace. * * If @kuid has no mapping in @targ (uid_t)-1 is returned. */ uid_t from_kuid(struct user_namespace *targ, kuid_t kuid) { /* Map the uid from a global kernel uid */ return map_id_up(&targ->uid_map, __kuid_val(kuid)); } EXPORT_SYMBOL(from_kuid); /** * from_kuid_munged - Create a uid from a kuid user-namespace pair. * @targ: The user namespace we want a uid in. * @kuid: The kernel internal uid to start with. * * Map @kuid into the user-namespace specified by @targ and * return the resulting uid. * * There is always a mapping into the initial user_namespace. * * Unlike from_kuid from_kuid_munged never fails and always * returns a valid uid. This makes from_kuid_munged appropriate * for use in syscalls like stat and getuid where failing the * system call and failing to provide a valid uid are not an * options. * * If @kuid has no mapping in @targ overflowuid is returned. */ uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid) { uid_t uid; uid = from_kuid(targ, kuid); if (uid == (uid_t) -1) uid = overflowuid; return uid; } EXPORT_SYMBOL(from_kuid_munged); /** * make_kgid - Map a user-namespace gid pair into a kgid. * @ns: User namespace that the gid is in * @uid: group identifier * * Maps a user-namespace gid pair into a kernel internal kgid, * and returns that kgid. * * When there is no mapping defined for the user-namespace gid * pair INVALID_GID is returned. Callers are expected to test * for and handle INVALID_GID being returned. INVALID_GID may be * tested for using gid_valid(). */ kgid_t make_kgid(struct user_namespace *ns, gid_t gid) { /* Map the gid to a global kernel gid */ return KGIDT_INIT(map_id_down(&ns->gid_map, gid)); } EXPORT_SYMBOL(make_kgid); /** * from_kgid - Create a gid from a kgid user-namespace pair. * @targ: The user namespace we want a gid in. * @kgid: The kernel internal gid to start with. * * Map @kgid into the user-namespace specified by @targ and * return the resulting gid. * * There is always a mapping into the initial user_namespace. * * If @kgid has no mapping in @targ (gid_t)-1 is returned. */ gid_t from_kgid(struct user_namespace *targ, kgid_t kgid) { /* Map the gid from a global kernel gid */ return map_id_up(&targ->gid_map, __kgid_val(kgid)); } EXPORT_SYMBOL(from_kgid); /** * from_kgid_munged - Create a gid from a kgid user-namespace pair. * @targ: The user namespace we want a gid in. * @kgid: The kernel internal gid to start with. * * Map @kgid into the user-namespace specified by @targ and * return the resulting gid. * * There is always a mapping into the initial user_namespace. * * Unlike from_kgid from_kgid_munged never fails and always * returns a valid gid. This makes from_kgid_munged appropriate * for use in syscalls like stat and getgid where failing the * system call and failing to provide a valid gid are not options. * * If @kgid has no mapping in @targ overflowgid is returned. */ gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid) { gid_t gid; gid = from_kgid(targ, kgid); if (gid == (gid_t) -1) gid = overflowgid; return gid; } EXPORT_SYMBOL(from_kgid_munged); /** * make_kprojid - Map a user-namespace projid pair into a kprojid. * @ns: User namespace that the projid is in * @projid: Project identifier * * Maps a user-namespace uid pair into a kernel internal kuid, * and returns that kuid. * * When there is no mapping defined for the user-namespace projid * pair INVALID_PROJID is returned. Callers are expected to test * for and handle handle INVALID_PROJID being returned. INVALID_PROJID * may be tested for using projid_valid(). */ kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid) { /* Map the uid to a global kernel uid */ return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid)); } EXPORT_SYMBOL(make_kprojid); /** * from_kprojid - Create a projid from a kprojid user-namespace pair. * @targ: The user namespace we want a projid in. * @kprojid: The kernel internal project identifier to start with. * * Map @kprojid into the user-namespace specified by @targ and * return the resulting projid. * * There is always a mapping into the initial user_namespace. * * If @kprojid has no mapping in @targ (projid_t)-1 is returned. */ projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid) { /* Map the uid from a global kernel uid */ return map_id_up(&targ->projid_map, __kprojid_val(kprojid)); } EXPORT_SYMBOL(from_kprojid); /** * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair. * @targ: The user namespace we want a projid in. * @kprojid: The kernel internal projid to start with. * * Map @kprojid into the user-namespace specified by @targ and * return the resulting projid. * * There is always a mapping into the initial user_namespace. * * Unlike from_kprojid from_kprojid_munged never fails and always * returns a valid projid. This makes from_kprojid_munged * appropriate for use in syscalls like stat and where * failing the system call and failing to provide a valid projid are * not an options. * * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned. */ projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid) { projid_t projid; projid = from_kprojid(targ, kprojid); if (projid == (projid_t) -1) projid = OVERFLOW_PROJID; return projid; } EXPORT_SYMBOL(from_kprojid_munged); static int uid_m_show(struct seq_file *seq, void *v) { struct user_namespace *ns = seq->private; struct uid_gid_extent *extent = v; struct user_namespace *lower_ns; uid_t lower; lower_ns = seq_user_ns(seq); if ((lower_ns == ns) && lower_ns->parent) lower_ns = lower_ns->parent; lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first)); seq_printf(seq, "%10u %10u %10u\n", extent->first, lower, extent->count); return 0; } static int gid_m_show(struct seq_file *seq, void *v) { struct user_namespace *ns = seq->private; struct uid_gid_extent *extent = v; struct user_namespace *lower_ns; gid_t lower; lower_ns = seq_user_ns(seq); if ((lower_ns == ns) && lower_ns->parent) lower_ns = lower_ns->parent; lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first)); seq_printf(seq, "%10u %10u %10u\n", extent->first, lower, extent->count); return 0; } static int projid_m_show(struct seq_file *seq, void *v) { struct user_namespace *ns = seq->private; struct uid_gid_extent *extent = v; struct user_namespace *lower_ns; projid_t lower; lower_ns = seq_user_ns(seq); if ((lower_ns == ns) && lower_ns->parent) lower_ns = lower_ns->parent; lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first)); seq_printf(seq, "%10u %10u %10u\n", extent->first, lower, extent->count); return 0; } static void *m_start(struct seq_file *seq, loff_t *ppos, struct uid_gid_map *map) { struct uid_gid_extent *extent = NULL; loff_t pos = *ppos; if (pos < map->nr_extents) extent = &map->extent[pos]; return extent; } static void *uid_m_start(struct seq_file *seq, loff_t *ppos) { struct user_namespace *ns = seq->private; return m_start(seq, ppos, &ns->uid_map); } static void *gid_m_start(struct seq_file *seq, loff_t *ppos) { struct user_namespace *ns = seq->private; return m_start(seq, ppos, &ns->gid_map); } static void *projid_m_start(struct seq_file *seq, loff_t *ppos) { struct user_namespace *ns = seq->private; return m_start(seq, ppos, &ns->projid_map); } static void *m_next(struct seq_file *seq, void *v, loff_t *pos) { (*pos)++; return seq->op->start(seq, pos); } static void m_stop(struct seq_file *seq, void *v) { return; } struct seq_operations proc_uid_seq_operations = { .start = uid_m_start, .stop = m_stop, .next = m_next, .show = uid_m_show, }; struct seq_operations proc_gid_seq_operations = { .start = gid_m_start, .stop = m_stop, .next = m_next, .show = gid_m_show, }; struct seq_operations proc_projid_seq_operations = { .start = projid_m_start, .stop = m_stop, .next = m_next, .show = projid_m_show, }; static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent *extent) { u32 upper_first, lower_first, upper_last, lower_last; unsigned idx; upper_first = extent->first; lower_first = extent->lower_first; upper_last = upper_first + extent->count - 1; lower_last = lower_first + extent->count - 1; for (idx = 0; idx < new_map->nr_extents; idx++) { u32 prev_upper_first, prev_lower_first; u32 prev_upper_last, prev_lower_last; struct uid_gid_extent *prev; prev = &new_map->extent[idx]; prev_upper_first = prev->first; prev_lower_first = prev->lower_first; prev_upper_last = prev_upper_first + prev->count - 1; prev_lower_last = prev_lower_first + prev->count - 1; /* Does the upper range intersect a previous extent? */ if ((prev_upper_first <= upper_last) && (prev_upper_last >= upper_first)) return true; /* Does the lower range intersect a previous extent? */ if ((prev_lower_first <= lower_last) && (prev_lower_last >= lower_first)) return true; } return false; } static DEFINE_MUTEX(id_map_mutex); static ssize_t map_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int cap_setid, struct uid_gid_map *map, struct uid_gid_map *parent_map) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct uid_gid_map new_map; unsigned idx; struct uid_gid_extent *extent = NULL; unsigned long page = 0; char *kbuf, *pos, *next_line; ssize_t ret = -EINVAL; /* * The id_map_mutex serializes all writes to any given map. * * Any map is only ever written once. * * An id map fits within 1 cache line on most architectures. * * On read nothing needs to be done unless you are on an * architecture with a crazy cache coherency model like alpha. * * There is a one time data dependency between reading the * count of the extents and the values of the extents. The * desired behavior is to see the values of the extents that * were written before the count of the extents. * * To achieve this smp_wmb() is used on guarantee the write * order and smp_read_barrier_depends() is guaranteed that we * don't have crazy architectures returning stale data. * */ mutex_lock(&id_map_mutex); ret = -EPERM; /* Only allow one successful write to the map */ if (map->nr_extents != 0) goto out; /* Require the appropriate privilege CAP_SETUID or CAP_SETGID * over the user namespace in order to set the id mapping. */ if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) goto out; /* Get a buffer */ ret = -ENOMEM; page = __get_free_page(GFP_TEMPORARY); kbuf = (char *) page; if (!page) goto out; /* Only allow <= page size writes at the beginning of the file */ ret = -EINVAL; if ((*ppos != 0) || (count >= PAGE_SIZE)) goto out; /* Slurp in the user data */ ret = -EFAULT; if (copy_from_user(kbuf, buf, count)) goto out; kbuf[count] = '\0'; /* Parse the user data */ ret = -EINVAL; pos = kbuf; new_map.nr_extents = 0; for (;pos; pos = next_line) { extent = &new_map.extent[new_map.nr_extents]; /* Find the end of line and ensure I don't look past it */ next_line = strchr(pos, '\n'); if (next_line) { *next_line = '\0'; next_line++; if (*next_line == '\0') next_line = NULL; } pos = skip_spaces(pos); extent->first = simple_strtoul(pos, &pos, 10); if (!isspace(*pos)) goto out; pos = skip_spaces(pos); extent->lower_first = simple_strtoul(pos, &pos, 10); if (!isspace(*pos)) goto out; pos = skip_spaces(pos); extent->count = simple_strtoul(pos, &pos, 10); if (*pos && !isspace(*pos)) goto out; /* Verify there is not trailing junk on the line */ pos = skip_spaces(pos); if (*pos != '\0') goto out; /* Verify we have been given valid starting values */ if ((extent->first == (u32) -1) || (extent->lower_first == (u32) -1 )) goto out; /* Verify count is not zero and does not cause the extent to wrap */ if ((extent->first + extent->count) <= extent->first) goto out; if ((extent->lower_first + extent->count) <= extent->lower_first) goto out; /* Do the ranges in extent overlap any previous extents? */ if (mappings_overlap(&new_map, extent)) goto out; new_map.nr_extents++; /* Fail if the file contains too many extents */ if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) && (next_line != NULL)) goto out; } /* Be very certaint the new map actually exists */ if (new_map.nr_extents == 0) goto out; ret = -EPERM; /* Validate the user is allowed to use user id's mapped to. */ if (!new_idmap_permitted(ns, cap_setid, &new_map)) goto out; /* Map the lower ids from the parent user namespace to the * kernel global id space. */ for (idx = 0; idx < new_map.nr_extents; idx++) { u32 lower_first; extent = &new_map.extent[idx]; lower_first = map_id_range_down(parent_map, extent->lower_first, extent->count); /* Fail if we can not map the specified extent to * the kernel global id space. */ if (lower_first == (u32) -1) goto out; extent->lower_first = lower_first; } /* Install the map */ memcpy(map->extent, new_map.extent, new_map.nr_extents*sizeof(new_map.extent[0])); smp_wmb(); map->nr_extents = new_map.nr_extents; *ppos = count; ret = count; out: mutex_unlock(&id_map_mutex); if (page) free_page(page); return ret; } ssize_t proc_uid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct user_namespace *seq_ns = seq_user_ns(seq); if (!ns->parent) return -EPERM; if ((seq_ns != ns) && (seq_ns != ns->parent)) return -EPERM; return map_write(file, buf, size, ppos, CAP_SETUID, &ns->uid_map, &ns->parent->uid_map); } ssize_t proc_gid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct user_namespace *seq_ns = seq_user_ns(seq); if (!ns->parent) return -EPERM; if ((seq_ns != ns) && (seq_ns != ns->parent)) return -EPERM; return map_write(file, buf, size, ppos, CAP_SETGID, &ns->gid_map, &ns->parent->gid_map); } ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct user_namespace *seq_ns = seq_user_ns(seq); if (!ns->parent) return -EPERM; if ((seq_ns != ns) && (seq_ns != ns->parent)) return -EPERM; /* Anyone can set any valid project id no capability needed */ return map_write(file, buf, size, ppos, -1, &ns->projid_map, &ns->parent->projid_map); } static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, struct uid_gid_map *new_map) { /* Allow mapping to your own filesystem ids */ if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) { u32 id = new_map->extent[0].lower_first; if (cap_setid == CAP_SETUID) { kuid_t uid = make_kuid(ns->parent, id); if (uid_eq(uid, current_fsuid())) return true; } else if (cap_setid == CAP_SETGID) { kgid_t gid = make_kgid(ns->parent, id); if (gid_eq(gid, current_fsgid())) return true; } } /* Allow anyone to set a mapping that doesn't require privilege */ if (!cap_valid(cap_setid)) return true; /* Allow the specified ids if we have the appropriate capability * (CAP_SETUID or CAP_SETGID) over the parent user namespace. */ if (ns_capable(ns->parent, cap_setid)) return true; return false; } static void *userns_get(struct task_struct *task) { struct user_namespace *user_ns; rcu_read_lock(); user_ns = get_user_ns(__task_cred(task)->user_ns); rcu_read_unlock(); return user_ns; } static void userns_put(void *ns) { put_user_ns(ns); } static int userns_install(struct nsproxy *nsproxy, void *ns) { struct user_namespace *user_ns = ns; struct cred *cred; /* Don't allow gaining capabilities by reentering * the same user namespace. */ if (user_ns == current_user_ns()) return -EINVAL; /* Threaded processes may not enter a different user namespace */ if (atomic_read(&current->mm->mm_users) > 1) return -EINVAL; if (!ns_capable(user_ns, CAP_SYS_ADMIN)) return -EPERM; cred = prepare_creds(); if (!cred) return -ENOMEM; put_user_ns(cred->user_ns); set_cred_user_ns(cred, get_user_ns(user_ns)); return commit_creds(cred); } static unsigned int userns_inum(void *ns) { struct user_namespace *user_ns = ns; return user_ns->proc_inum; } const struct proc_ns_operations userns_operations = { .name = "user", .type = CLONE_NEWUSER, .get = userns_get, .put = userns_put, .install = userns_install, .inum = userns_inum, }; static __init int user_namespaces_init(void) { user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); return 0; } module_init(user_namespaces_init);
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5605_1
crossvul-cpp_data_bad_1475_0
/* lxcfs * * Copyright © 2014,2015 Canonical, Inc * Author: Serge Hallyn <serge.hallyn@ubuntu.com> * * See COPYING file for details. */ /* * TODO XXX * sanitize paths for '..', cgmanager's not doing that for us any more * does fuse help us? * Surely there are more paths we'll need to sanitize - look back through * cgmanager's sources. */ #define FUSE_USE_VERSION 26 #include <stdio.h> #include <dirent.h> #include <fcntl.h> #include <fuse.h> #include <unistd.h> #include <errno.h> #include <stdbool.h> #include <time.h> #include <string.h> #include <stdlib.h> #include <libgen.h> #include <sched.h> #include <linux/sched.h> #include <sys/socket.h> #include <sys/mount.h> #include <wait.h> #ifdef FORTRAVIS #define GLIB_DISABLE_DEPRECATION_WARNINGS #include <glib-object.h> #endif #include "cgfs.h" #include "config.h" // for VERSION enum { LXC_TYPE_CGDIR, LXC_TYPE_CGFILE, LXC_TYPE_PROC_MEMINFO, LXC_TYPE_PROC_CPUINFO, LXC_TYPE_PROC_UPTIME, LXC_TYPE_PROC_STAT, LXC_TYPE_PROC_DISKSTATS, }; struct file_info { char *controller; char *cgroup; char *file; int type; char *buf; // unused as of yet int buflen; int size; //actual data size int cached; }; /* reserve buffer size, for cpuall in /proc/stat */ #define BUF_RESERVE_SIZE 256 /* * append pid to *src. * src: a pointer to a char* in which ot append the pid. * sz: the number of characters printed so far, minus trailing \0. * asz: the allocated size so far * pid: the pid to append */ static void must_strcat_pid(char **src, size_t *sz, size_t *asz, pid_t pid) { char *d = *src; char tmp[30]; sprintf(tmp, "%d\n", (int)pid); if (!d) { do { d = malloc(BUF_RESERVE_SIZE); } while (!d); *src = d; *asz = BUF_RESERVE_SIZE; } else if (strlen(tmp) + sz + 1 >= asz) { do { d = realloc(d, *asz + BUF_RESERVE_SIZE); } while (!d); *src = d; *asz += BUF_RESERVE_SIZE; } memcpy(d+*sz, tmp, strlen(tmp)); *sz += strlen(tmp); d[*sz] = '\0'; } static int wait_for_pid(pid_t pid) { int status, ret; again: ret = waitpid(pid, &status, 0); if (ret == -1) { if (errno == EINTR) goto again; return -1; } if (ret != pid) goto again; if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) return -1; return 0; } /* * Given a open file * to /proc/pid/{u,g}id_map, and an id * valid in the caller's namespace, return the id mapped into * pid's namespace. * Returns the mapped id, or -1 on error. */ unsigned int convert_id_to_ns(FILE *idfile, unsigned int in_id) { unsigned int nsuid, // base id for a range in the idfile's namespace hostuid, // base id for a range in the caller's namespace count; // number of ids in this range char line[400]; int ret; fseek(idfile, 0L, SEEK_SET); while (fgets(line, 400, idfile)) { ret = sscanf(line, "%u %u %u\n", &nsuid, &hostuid, &count); if (ret != 3) continue; if (hostuid + count < hostuid || nsuid + count < nsuid) { /* * uids wrapped around - unexpected as this is a procfile, * so just bail. */ fprintf(stderr, "pid wrapparound at entry %u %u %u in %s\n", nsuid, hostuid, count, line); return -1; } if (hostuid <= in_id && hostuid+count > in_id) { /* * now since hostuid <= in_id < hostuid+count, and * hostuid+count and nsuid+count do not wrap around, * we know that nsuid+(in_id-hostuid) which must be * less that nsuid+(count) must not wrap around */ return (in_id - hostuid) + nsuid; } } // no answer found return -1; } /* * for is_privileged_over, * specify whether we require the calling uid to be root in his * namespace */ #define NS_ROOT_REQD true #define NS_ROOT_OPT false #define PROCLEN 100 static bool is_privileged_over(pid_t pid, uid_t uid, uid_t victim, bool req_ns_root) { char fpath[PROCLEN]; int ret; bool answer = false; uid_t nsuid; if (victim == -1 || uid == -1) return false; /* * If the request is one not requiring root in the namespace, * then having the same uid suffices. (i.e. uid 1000 has write * access to files owned by uid 1000 */ if (!req_ns_root && uid == victim) return true; ret = snprintf(fpath, PROCLEN, "/proc/%d/uid_map", pid); if (ret < 0 || ret >= PROCLEN) return false; FILE *f = fopen(fpath, "r"); if (!f) return false; /* if caller's not root in his namespace, reject */ nsuid = convert_id_to_ns(f, uid); if (nsuid) goto out; /* * If victim is not mapped into caller's ns, reject. * XXX I'm not sure this check is needed given that fuse * will be sending requests where the vfs has converted */ nsuid = convert_id_to_ns(f, victim); if (nsuid == -1) goto out; answer = true; out: fclose(f); return answer; } static bool perms_include(int fmode, mode_t req_mode) { mode_t r; switch (req_mode & O_ACCMODE) { case O_RDONLY: r = S_IROTH; break; case O_WRONLY: r = S_IWOTH; break; case O_RDWR: r = S_IROTH | S_IWOTH; break; default: return false; } return ((fmode & r) == r); } /* * taskcg is a/b/c * querycg is /a/b/c/d/e * we return 'd' */ static char *get_next_cgroup_dir(const char *taskcg, const char *querycg) { char *start, *end; if (strlen(taskcg) <= strlen(querycg)) { fprintf(stderr, "%s: I was fed bad input\n", __func__); return NULL; } if (strcmp(querycg, "/") == 0) start = strdup(taskcg + 1); else start = strdup(taskcg + strlen(querycg) + 1); if (!start) return NULL; end = strchr(start, '/'); if (end) *end = '\0'; return start; } static void stripnewline(char *x) { size_t l = strlen(x); if (l && x[l-1] == '\n') x[l-1] = '\0'; } static char *get_pid_cgroup(pid_t pid, const char *contrl) { char fnam[PROCLEN]; FILE *f; char *answer = NULL; char *line = NULL; size_t len = 0; int ret; const char *h = find_mounted_controller(contrl); if (!h) return NULL; ret = snprintf(fnam, PROCLEN, "/proc/%d/cgroup", pid); if (ret < 0 || ret >= PROCLEN) return NULL; if (!(f = fopen(fnam, "r"))) return NULL; while (getline(&line, &len, f) != -1) { char *c1, *c2; if (!line[0]) continue; c1 = strchr(line, ':'); if (!c1) goto out; c1++; c2 = strchr(c1, ':'); if (!c2) goto out; *c2 = '\0'; if (strcmp(c1, h) != 0) continue; c2++; stripnewline(c2); do { answer = strdup(c2); } while (!answer); break; } out: fclose(f); free(line); return answer; } /* * check whether a fuse context may access a cgroup dir or file * * If file is not null, it is a cgroup file to check under cg. * If file is null, then we are checking perms on cg itself. * * For files we can check the mode of the list_keys result. * For cgroups, we must make assumptions based on the files under the * cgroup, because cgmanager doesn't tell us ownership/perms of cgroups * yet. */ static bool fc_may_access(struct fuse_context *fc, const char *contrl, const char *cg, const char *file, mode_t mode) { struct cgfs_files *k = NULL; bool ret = false; if (!file) file = "tasks"; if (*file == '/') file++; k = cgfs_get_key(contrl, cg, file); if (!k) return false; if (is_privileged_over(fc->pid, fc->uid, k->uid, NS_ROOT_OPT)) { if (perms_include(k->mode >> 6, mode)) { ret = true; goto out; } } if (fc->gid == k->gid) { if (perms_include(k->mode >> 3, mode)) { ret = true; goto out; } } ret = perms_include(k->mode, mode); out: free_key(k); return ret; } #define INITSCOPE "/init.scope" static void prune_init_slice(char *cg) { char *point; point = cg + strlen(cg) - strlen(INITSCOPE); if (point < cg) return; if (strcmp(point, INITSCOPE) == 0) { if (point == cg) *(point+1) = '\0'; else *point = '\0'; } } /* * If caller is in /a/b/c/d, he may only act on things under cg=/a/b/c/d. * If caller is in /a, he may act on /a/b, but not on /b. * if the answer is false and nextcg is not NULL, then *nextcg will point * to a string containing the next cgroup directory under cg, which must be * freed by the caller. */ static bool caller_is_in_ancestor(pid_t pid, const char *contrl, const char *cg, char **nextcg) { bool answer = false; char *c2 = get_pid_cgroup(pid, contrl); char *linecmp; if (!c2) return false; prune_init_slice(c2); /* * callers pass in '/' for root cgroup, otherwise they pass * in a cgroup without leading '/' */ linecmp = *cg == '/' ? c2 : c2+1; if (strncmp(linecmp, cg, strlen(linecmp)) != 0) { if (nextcg) { *nextcg = get_next_cgroup_dir(linecmp, cg); } goto out; } answer = true; out: free(c2); return answer; } /* * If caller is in /a/b/c, he may see that /a exists, but not /b or /a/c. */ static bool caller_may_see_dir(pid_t pid, const char *contrl, const char *cg) { bool answer = false; char *c2, *task_cg; size_t target_len, task_len; if (strcmp(cg, "/") == 0) return true; c2 = get_pid_cgroup(pid, contrl); if (!c2) return false; task_cg = c2 + 1; target_len = strlen(cg); task_len = strlen(task_cg); if (strcmp(cg, task_cg) == 0) { answer = true; goto out; } if (target_len < task_len) { /* looking up a parent dir */ if (strncmp(task_cg, cg, target_len) == 0 && task_cg[target_len] == '/') answer = true; goto out; } if (target_len > task_len) { /* looking up a child dir */ if (strncmp(task_cg, cg, task_len) == 0 && cg[task_len] == '/') answer = true; goto out; } out: free(c2); return answer; } /* * given /cgroup/freezer/a/b, return "freezer". * the returned char* should NOT be freed. */ static char *pick_controller_from_path(struct fuse_context *fc, const char *path) { const char *p1; char *contr, *slash; if (strlen(path) < 9) return NULL; if (*(path+7) != '/') return NULL; p1 = path+8; contr = strdupa(p1); if (!contr) return NULL; slash = strstr(contr, "/"); if (slash) *slash = '\0'; int i; for (i = 0; i < num_hierarchies; i++) { if (hierarchies[i] && strcmp(hierarchies[i], contr) == 0) return hierarchies[i]; } return NULL; } /* * Find the start of cgroup in /cgroup/controller/the/cgroup/path * Note that the returned value may include files (keynames) etc */ static const char *find_cgroup_in_path(const char *path) { const char *p1; if (strlen(path) < 9) return NULL; p1 = strstr(path+8, "/"); if (!p1) return NULL; return p1+1; } /* * dir should be freed, file not */ static void get_cgdir_and_path(const char *cg, char **dir, char **file) { char *p; do { *dir = strdup(cg); } while (!*dir); *file = strrchr(cg, '/'); if (!*file) { *file = NULL; return; } p = strrchr(*dir, '/'); *p = '\0'; } /* * FUSE ops for /cgroup */ static int cg_getattr(const char *path, struct stat *sb) { struct timespec now; struct fuse_context *fc = fuse_get_context(); char * cgdir = NULL; char *fpath = NULL, *path1, *path2; struct cgfs_files *k = NULL; const char *cgroup; const char *controller = NULL; int ret = -ENOENT; if (!fc) return -EIO; memset(sb, 0, sizeof(struct stat)); if (clock_gettime(CLOCK_REALTIME, &now) < 0) return -EINVAL; sb->st_uid = sb->st_gid = 0; sb->st_atim = sb->st_mtim = sb->st_ctim = now; sb->st_size = 0; if (strcmp(path, "/cgroup") == 0) { sb->st_mode = S_IFDIR | 00755; sb->st_nlink = 2; return 0; } controller = pick_controller_from_path(fc, path); if (!controller) return -EIO; cgroup = find_cgroup_in_path(path); if (!cgroup) { /* this is just /cgroup/controller, return it as a dir */ sb->st_mode = S_IFDIR | 00755; sb->st_nlink = 2; return 0; } get_cgdir_and_path(cgroup, &cgdir, &fpath); if (!fpath) { path1 = "/"; path2 = cgdir; } else { path1 = cgdir; path2 = fpath; } /* check that cgcopy is either a child cgroup of cgdir, or listed in its keys. * Then check that caller's cgroup is under path if fpath is a child * cgroup, or cgdir if fpath is a file */ if (is_child_cgroup(controller, path1, path2)) { if (!caller_may_see_dir(fc->pid, controller, cgroup)) { ret = -ENOENT; goto out; } if (!caller_is_in_ancestor(fc->pid, controller, cgroup, NULL)) { /* this is just /cgroup/controller, return it as a dir */ sb->st_mode = S_IFDIR | 00555; sb->st_nlink = 2; ret = 0; goto out; } if (!fc_may_access(fc, controller, cgroup, NULL, O_RDONLY)) { ret = -EACCES; goto out; } // get uid, gid, from '/tasks' file and make up a mode // That is a hack, until cgmanager gains a GetCgroupPerms fn. sb->st_mode = S_IFDIR | 00755; k = cgfs_get_key(controller, cgroup, "tasks"); if (!k) { sb->st_uid = sb->st_gid = 0; } else { sb->st_uid = k->uid; sb->st_gid = k->gid; } free_key(k); sb->st_nlink = 2; ret = 0; goto out; } if ((k = cgfs_get_key(controller, path1, path2)) != NULL) { sb->st_mode = S_IFREG | k->mode; sb->st_nlink = 1; sb->st_uid = k->uid; sb->st_gid = k->gid; sb->st_size = 0; free_key(k); if (!caller_is_in_ancestor(fc->pid, controller, path1, NULL)) { ret = -ENOENT; goto out; } if (!fc_may_access(fc, controller, path1, path2, O_RDONLY)) { ret = -EACCES; goto out; } ret = 0; } out: free(cgdir); return ret; } static int cg_opendir(const char *path, struct fuse_file_info *fi) { struct fuse_context *fc = fuse_get_context(); const char *cgroup; struct file_info *dir_info; char *controller = NULL; if (!fc) return -EIO; if (strcmp(path, "/cgroup") == 0) { cgroup = NULL; controller = NULL; } else { // return list of keys for the controller, and list of child cgroups controller = pick_controller_from_path(fc, path); if (!controller) return -EIO; cgroup = find_cgroup_in_path(path); if (!cgroup) { /* this is just /cgroup/controller, return its contents */ cgroup = "/"; } } if (cgroup) { if (!caller_may_see_dir(fc->pid, controller, cgroup)) return -ENOENT; if (!fc_may_access(fc, controller, cgroup, NULL, O_RDONLY)) return -EACCES; } /* we'll free this at cg_releasedir */ dir_info = malloc(sizeof(*dir_info)); if (!dir_info) return -ENOMEM; dir_info->controller = must_copy_string(controller); dir_info->cgroup = must_copy_string(cgroup); dir_info->type = LXC_TYPE_CGDIR; dir_info->buf = NULL; dir_info->file = NULL; dir_info->buflen = 0; fi->fh = (unsigned long)dir_info; return 0; } static int cg_readdir(const char *path, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi) { struct file_info *d = (struct file_info *)fi->fh; struct cgfs_files **list = NULL; int i, ret; char *nextcg = NULL; struct fuse_context *fc = fuse_get_context(); char **clist = NULL; if (d->type != LXC_TYPE_CGDIR) { fprintf(stderr, "Internal error: file cache info used in readdir\n"); return -EIO; } if (!d->cgroup && !d->controller) { // ls /var/lib/lxcfs/cgroup - just show list of controllers int i; for (i = 0; i < num_hierarchies; i++) { if (hierarchies[i] && filler(buf, hierarchies[i], NULL, 0) != 0) { return -EIO; } } return 0; } if (!cgfs_list_keys(d->controller, d->cgroup, &list)) { // not a valid cgroup ret = -EINVAL; goto out; } if (!caller_is_in_ancestor(fc->pid, d->controller, d->cgroup, &nextcg)) { if (nextcg) { int ret; ret = filler(buf, nextcg, NULL, 0); free(nextcg); if (ret != 0) { ret = -EIO; goto out; } } ret = 0; goto out; } for (i = 0; list[i]; i++) { if (filler(buf, list[i]->name, NULL, 0) != 0) { ret = -EIO; goto out; } } // now get the list of child cgroups if (!cgfs_list_children(d->controller, d->cgroup, &clist)) { ret = 0; goto out; } for (i = 0; clist[i]; i++) { if (filler(buf, clist[i], NULL, 0) != 0) { ret = -EIO; goto out; } } ret = 0; out: free_keys(list); if (clist) { for (i = 0; clist[i]; i++) free(clist[i]); free(clist); } return ret; } static void do_release_file_info(struct file_info *f) { if (!f) return; free(f->controller); free(f->cgroup); free(f->file); free(f->buf); free(f); } static int cg_releasedir(const char *path, struct fuse_file_info *fi) { struct file_info *d = (struct file_info *)fi->fh; do_release_file_info(d); return 0; } static int cg_open(const char *path, struct fuse_file_info *fi) { const char *cgroup; char *fpath = NULL, *path1, *path2, * cgdir = NULL, *controller; struct cgfs_files *k = NULL; struct file_info *file_info; struct fuse_context *fc = fuse_get_context(); int ret; if (!fc) return -EIO; controller = pick_controller_from_path(fc, path); if (!controller) return -EIO; cgroup = find_cgroup_in_path(path); if (!cgroup) return -EINVAL; get_cgdir_and_path(cgroup, &cgdir, &fpath); if (!fpath) { path1 = "/"; path2 = cgdir; } else { path1 = cgdir; path2 = fpath; } k = cgfs_get_key(controller, path1, path2); if (!k) { ret = -EINVAL; goto out; } free_key(k); if (!caller_may_see_dir(fc->pid, controller, path1)) { ret = -ENOENT; goto out; } if (!fc_may_access(fc, controller, path1, path2, fi->flags)) { // should never get here ret = -EACCES; goto out; } /* we'll free this at cg_release */ file_info = malloc(sizeof(*file_info)); if (!file_info) { ret = -ENOMEM; goto out; } file_info->controller = must_copy_string(controller); file_info->cgroup = must_copy_string(path1); file_info->file = must_copy_string(path2); file_info->type = LXC_TYPE_CGFILE; file_info->buf = NULL; file_info->buflen = 0; fi->fh = (unsigned long)file_info; ret = 0; out: free(cgdir); return ret; } static int cg_release(const char *path, struct fuse_file_info *fi) { struct file_info *f = (struct file_info *)fi->fh; do_release_file_info(f); return 0; } static int msgrecv(int sockfd, void *buf, size_t len) { struct timeval tv; fd_set rfds; FD_ZERO(&rfds); FD_SET(sockfd, &rfds); tv.tv_sec = 2; tv.tv_usec = 0; if (select(sockfd+1, &rfds, NULL, NULL, &tv) <= 0) return -1; return recv(sockfd, buf, len, MSG_DONTWAIT); } #define SEND_CREDS_OK 0 #define SEND_CREDS_NOTSK 1 #define SEND_CREDS_FAIL 2 static int send_creds(int sock, struct ucred *cred, char v, bool pingfirst) { struct msghdr msg = { 0 }; struct iovec iov; struct cmsghdr *cmsg; char cmsgbuf[CMSG_SPACE(sizeof(*cred))]; char buf[1]; buf[0] = 'p'; if (pingfirst) { if (msgrecv(sock, buf, 1) != 1) { fprintf(stderr, "%s: Error getting reply from server over socketpair\n", __func__); return SEND_CREDS_FAIL; } } msg.msg_control = cmsgbuf; msg.msg_controllen = sizeof(cmsgbuf); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_len = CMSG_LEN(sizeof(struct ucred)); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_CREDENTIALS; memcpy(CMSG_DATA(cmsg), cred, sizeof(*cred)); msg.msg_name = NULL; msg.msg_namelen = 0; buf[0] = v; iov.iov_base = buf; iov.iov_len = sizeof(buf); msg.msg_iov = &iov; msg.msg_iovlen = 1; if (sendmsg(sock, &msg, 0) < 0) { fprintf(stderr, "%s: failed at sendmsg: %s\n", __func__, strerror(errno)); if (errno == 3) return SEND_CREDS_NOTSK; return SEND_CREDS_FAIL; } return SEND_CREDS_OK; } static bool recv_creds(int sock, struct ucred *cred, char *v) { struct msghdr msg = { 0 }; struct iovec iov; struct cmsghdr *cmsg; char cmsgbuf[CMSG_SPACE(sizeof(*cred))]; char buf[1]; int ret; int optval = 1; struct timeval tv; fd_set rfds; *v = '1'; cred->pid = -1; cred->uid = -1; cred->gid = -1; if (setsockopt(sock, SOL_SOCKET, SO_PASSCRED, &optval, sizeof(optval)) == -1) { fprintf(stderr, "Failed to set passcred: %s\n", strerror(errno)); return false; } buf[0] = '1'; if (write(sock, buf, 1) != 1) { fprintf(stderr, "Failed to start write on scm fd: %s\n", strerror(errno)); return false; } msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = cmsgbuf; msg.msg_controllen = sizeof(cmsgbuf); iov.iov_base = buf; iov.iov_len = sizeof(buf); msg.msg_iov = &iov; msg.msg_iovlen = 1; FD_ZERO(&rfds); FD_SET(sock, &rfds); tv.tv_sec = 2; tv.tv_usec = 0; if (select(sock+1, &rfds, NULL, NULL, &tv) <= 0) { fprintf(stderr, "Failed to select for scm_cred: %s\n", strerror(errno)); return false; } ret = recvmsg(sock, &msg, MSG_DONTWAIT); if (ret < 0) { fprintf(stderr, "Failed to receive scm_cred: %s\n", strerror(errno)); return false; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg && cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)) && cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_CREDENTIALS) { memcpy(cred, CMSG_DATA(cmsg), sizeof(*cred)); } *v = buf[0]; return true; } /* * pid_to_ns - reads pids from a ucred over a socket, then writes the * int value back over the socket. This shifts the pid from the * sender's pidns into tpid's pidns. */ static void pid_to_ns(int sock, pid_t tpid) { char v = '0'; struct ucred cred; while (recv_creds(sock, &cred, &v)) { if (v == '1') _exit(0); if (write(sock, &cred.pid, sizeof(pid_t)) != sizeof(pid_t)) _exit(1); } _exit(0); } /* * pid_to_ns_wrapper: when you setns into a pidns, you yourself remain * in your old pidns. Only children which you fork will be in the target * pidns. So the pid_to_ns_wrapper does the setns, then forks a child to * actually convert pids */ static void pid_to_ns_wrapper(int sock, pid_t tpid) { int newnsfd = -1, ret, cpipe[2]; char fnam[100]; pid_t cpid; struct timeval tv; fd_set s; char v; ret = snprintf(fnam, sizeof(fnam), "/proc/%d/ns/pid", tpid); if (ret < 0 || ret >= sizeof(fnam)) _exit(1); newnsfd = open(fnam, O_RDONLY); if (newnsfd < 0) _exit(1); if (setns(newnsfd, 0) < 0) _exit(1); close(newnsfd); if (pipe(cpipe) < 0) _exit(1); loop: cpid = fork(); if (cpid < 0) _exit(1); if (!cpid) { char b = '1'; close(cpipe[0]); if (write(cpipe[1], &b, sizeof(char)) < 0) { fprintf(stderr, "%s (child): erorr on write: %s\n", __func__, strerror(errno)); } close(cpipe[1]); pid_to_ns(sock, tpid); } // give the child 1 second to be done forking and // write it's ack FD_ZERO(&s); FD_SET(cpipe[0], &s); tv.tv_sec = 1; tv.tv_usec = 0; ret = select(cpipe[0]+1, &s, NULL, NULL, &tv); if (ret <= 0) goto again; ret = read(cpipe[0], &v, 1); if (ret != sizeof(char) || v != '1') { goto again; } if (!wait_for_pid(cpid)) _exit(1); _exit(0); again: kill(cpid, SIGKILL); wait_for_pid(cpid); goto loop; } /* * To read cgroup files with a particular pid, we will setns into the child * pidns, open a pipe, fork a child - which will be the first to really be in * the child ns - which does the cgfs_get_value and writes the data to the pipe. */ static bool do_read_pids(pid_t tpid, const char *contrl, const char *cg, const char *file, char **d) { int sock[2] = {-1, -1}; char *tmpdata = NULL; int ret; pid_t qpid, cpid = -1; bool answer = false; char v = '0'; struct ucred cred; struct timeval tv; size_t sz = 0, asz = 0; fd_set s; if (!cgfs_get_value(contrl, cg, file, &tmpdata)) return false; /* * Now we read the pids from returned data one by one, pass * them into a child in the target namespace, read back the * translated pids, and put them into our to-return data */ if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sock) < 0) { perror("socketpair"); free(tmpdata); return false; } cpid = fork(); if (cpid == -1) goto out; if (!cpid) // child pid_to_ns_wrapper(sock[1], tpid); char *ptr = tmpdata; cred.uid = 0; cred.gid = 0; while (sscanf(ptr, "%d\n", &qpid) == 1) { cred.pid = qpid; ret = send_creds(sock[0], &cred, v, true); if (ret == SEND_CREDS_NOTSK) goto next; if (ret == SEND_CREDS_FAIL) goto out; // read converted results FD_ZERO(&s); FD_SET(sock[0], &s); tv.tv_sec = 2; tv.tv_usec = 0; ret = select(sock[0]+1, &s, NULL, NULL, &tv); if (ret <= 0) { fprintf(stderr, "%s: select error waiting for pid from child: %s\n", __func__, strerror(errno)); goto out; } if (read(sock[0], &qpid, sizeof(qpid)) != sizeof(qpid)) { fprintf(stderr, "%s: error reading pid from child: %s\n", __func__, strerror(errno)); goto out; } must_strcat_pid(d, &sz, &asz, qpid); next: ptr = strchr(ptr, '\n'); if (!ptr) break; ptr++; } cred.pid = getpid(); v = '1'; if (send_creds(sock[0], &cred, v, true) != SEND_CREDS_OK) { // failed to ask child to exit fprintf(stderr, "%s: failed to ask child to exit: %s\n", __func__, strerror(errno)); goto out; } answer = true; out: free(tmpdata); if (cpid != -1) wait_for_pid(cpid); if (sock[0] != -1) { close(sock[0]); close(sock[1]); } return answer; } static int cg_read(const char *path, char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { struct fuse_context *fc = fuse_get_context(); struct file_info *f = (struct file_info *)fi->fh; struct cgfs_files *k = NULL; char *data = NULL; int ret, s; bool r; if (f->type != LXC_TYPE_CGFILE) { fprintf(stderr, "Internal error: directory cache info used in cg_read\n"); return -EIO; } if (offset) return 0; if (!fc) return -EIO; if (!f->controller) return -EINVAL; if ((k = cgfs_get_key(f->controller, f->cgroup, f->file)) == NULL) { return -EINVAL; } free_key(k); if (!fc_may_access(fc, f->controller, f->cgroup, f->file, O_RDONLY)) { // should never get here ret = -EACCES; goto out; } if (strcmp(f->file, "tasks") == 0 || strcmp(f->file, "/tasks") == 0 || strcmp(f->file, "/cgroup.procs") == 0 || strcmp(f->file, "cgroup.procs") == 0) // special case - we have to translate the pids r = do_read_pids(fc->pid, f->controller, f->cgroup, f->file, &data); else r = cgfs_get_value(f->controller, f->cgroup, f->file, &data); if (!r) { ret = -EINVAL; goto out; } if (!data) { ret = 0; goto out; } s = strlen(data); if (s > size) s = size; memcpy(buf, data, s); if (s > 0 && s < size && data[s-1] != '\n') buf[s++] = '\n'; ret = s; out: free(data); return ret; } static void pid_from_ns(int sock, pid_t tpid) { pid_t vpid; struct ucred cred; char v; struct timeval tv; fd_set s; int ret; cred.uid = 0; cred.gid = 0; while (1) { FD_ZERO(&s); FD_SET(sock, &s); tv.tv_sec = 2; tv.tv_usec = 0; ret = select(sock+1, &s, NULL, NULL, &tv); if (ret <= 0) { fprintf(stderr, "%s: bad select before read from parent: %s\n", __func__, strerror(errno)); _exit(1); } if ((ret = read(sock, &vpid, sizeof(pid_t))) != sizeof(pid_t)) { fprintf(stderr, "%s: bad read from parent: %s\n", __func__, strerror(errno)); _exit(1); } if (vpid == -1) // done break; v = '0'; cred.pid = vpid; if (send_creds(sock, &cred, v, true) != SEND_CREDS_OK) { v = '1'; cred.pid = getpid(); if (send_creds(sock, &cred, v, false) != SEND_CREDS_OK) _exit(1); } } _exit(0); } static void pid_from_ns_wrapper(int sock, pid_t tpid) { int newnsfd = -1, ret, cpipe[2]; char fnam[100]; pid_t cpid; fd_set s; struct timeval tv; char v; ret = snprintf(fnam, sizeof(fnam), "/proc/%d/ns/pid", tpid); if (ret < 0 || ret >= sizeof(fnam)) _exit(1); newnsfd = open(fnam, O_RDONLY); if (newnsfd < 0) _exit(1); if (setns(newnsfd, 0) < 0) _exit(1); close(newnsfd); if (pipe(cpipe) < 0) _exit(1); loop: cpid = fork(); if (cpid < 0) _exit(1); if (!cpid) { char b = '1'; close(cpipe[0]); if (write(cpipe[1], &b, sizeof(char)) < 0) { fprintf(stderr, "%s (child): erorr on write: %s\n", __func__, strerror(errno)); } close(cpipe[1]); pid_from_ns(sock, tpid); } // give the child 1 second to be done forking and // write it's ack FD_ZERO(&s); FD_SET(cpipe[0], &s); tv.tv_sec = 1; tv.tv_usec = 0; ret = select(cpipe[0]+1, &s, NULL, NULL, &tv); if (ret <= 0) goto again; ret = read(cpipe[0], &v, 1); if (ret != sizeof(char) || v != '1') { goto again; } if (!wait_for_pid(cpid)) _exit(1); _exit(0); again: kill(cpid, SIGKILL); wait_for_pid(cpid); goto loop; } static bool do_write_pids(pid_t tpid, const char *contrl, const char *cg, const char *file, const char *buf) { int sock[2] = {-1, -1}; pid_t qpid, cpid = -1; FILE *pids_file = NULL; bool answer = false, fail = false; pids_file = open_pids_file(contrl, cg); if (!pids_file) return false; /* * write the pids to a socket, have helper in writer's pidns * call movepid for us */ if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sock) < 0) { perror("socketpair"); goto out; } cpid = fork(); if (cpid == -1) goto out; if (!cpid) { // child fclose(pids_file); pid_from_ns_wrapper(sock[1], tpid); } const char *ptr = buf; while (sscanf(ptr, "%d", &qpid) == 1) { struct ucred cred; char v; if (write(sock[0], &qpid, sizeof(qpid)) != sizeof(qpid)) { fprintf(stderr, "%s: error writing pid to child: %s\n", __func__, strerror(errno)); goto out; } if (recv_creds(sock[0], &cred, &v)) { if (v == '0') { if (fprintf(pids_file, "%d", (int) cred.pid) < 0) fail = true; } } ptr = strchr(ptr, '\n'); if (!ptr) break; ptr++; } /* All good, write the value */ qpid = -1; if (write(sock[0], &qpid ,sizeof(qpid)) != sizeof(qpid)) fprintf(stderr, "Warning: failed to ask child to exit\n"); if (!fail) answer = true; out: if (cpid != -1) wait_for_pid(cpid); if (sock[0] != -1) { close(sock[0]); close(sock[1]); } if (pids_file) { if (fclose(pids_file) != 0) answer = false; } return answer; } int cg_write(const char *path, const char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { struct fuse_context *fc = fuse_get_context(); char *localbuf = NULL; struct cgfs_files *k = NULL; struct file_info *f = (struct file_info *)fi->fh; bool r; if (f->type != LXC_TYPE_CGFILE) { fprintf(stderr, "Internal error: directory cache info used in cg_write\n"); return -EIO; } if (offset) return 0; if (!fc) return -EIO; localbuf = alloca(size+1); localbuf[size] = '\0'; memcpy(localbuf, buf, size); if ((k = cgfs_get_key(f->controller, f->cgroup, f->file)) == NULL) { size = -EINVAL; goto out; } if (!fc_may_access(fc, f->controller, f->cgroup, f->file, O_WRONLY)) { size = -EACCES; goto out; } if (strcmp(f->file, "tasks") == 0 || strcmp(f->file, "/tasks") == 0 || strcmp(f->file, "/cgroup.procs") == 0 || strcmp(f->file, "cgroup.procs") == 0) // special case - we have to translate the pids r = do_write_pids(fc->pid, f->controller, f->cgroup, f->file, localbuf); else r = cgfs_set_value(f->controller, f->cgroup, f->file, localbuf); if (!r) size = -EINVAL; out: free_key(k); return size; } int cg_chown(const char *path, uid_t uid, gid_t gid) { struct fuse_context *fc = fuse_get_context(); char *cgdir = NULL, *fpath = NULL, *path1, *path2, *controller; struct cgfs_files *k = NULL; const char *cgroup; int ret; if (!fc) return -EIO; if (strcmp(path, "/cgroup") == 0) return -EINVAL; controller = pick_controller_from_path(fc, path); if (!controller) return -EINVAL; cgroup = find_cgroup_in_path(path); if (!cgroup) /* this is just /cgroup/controller */ return -EINVAL; get_cgdir_and_path(cgroup, &cgdir, &fpath); if (!fpath) { path1 = "/"; path2 = cgdir; } else { path1 = cgdir; path2 = fpath; } if (is_child_cgroup(controller, path1, path2)) { // get uid, gid, from '/tasks' file and make up a mode // That is a hack, until cgmanager gains a GetCgroupPerms fn. k = cgfs_get_key(controller, cgroup, "tasks"); } else k = cgfs_get_key(controller, path1, path2); if (!k) { ret = -EINVAL; goto out; } /* * This being a fuse request, the uid and gid must be valid * in the caller's namespace. So we can just check to make * sure that the caller is root in his uid, and privileged * over the file's current owner. */ if (!is_privileged_over(fc->pid, fc->uid, k->uid, NS_ROOT_REQD)) { ret = -EACCES; goto out; } ret = cgfs_chown_file(controller, cgroup, uid, gid); out: free_key(k); free(cgdir); return ret; } int cg_chmod(const char *path, mode_t mode) { struct fuse_context *fc = fuse_get_context(); char * cgdir = NULL, *fpath = NULL, *path1, *path2, *controller; struct cgfs_files *k = NULL; const char *cgroup; int ret; if (!fc) return -EIO; if (strcmp(path, "/cgroup") == 0) return -EINVAL; controller = pick_controller_from_path(fc, path); if (!controller) return -EINVAL; cgroup = find_cgroup_in_path(path); if (!cgroup) /* this is just /cgroup/controller */ return -EINVAL; get_cgdir_and_path(cgroup, &cgdir, &fpath); if (!fpath) { path1 = "/"; path2 = cgdir; } else { path1 = cgdir; path2 = fpath; } if (is_child_cgroup(controller, path1, path2)) { // get uid, gid, from '/tasks' file and make up a mode // That is a hack, until cgmanager gains a GetCgroupPerms fn. k = cgfs_get_key(controller, cgroup, "tasks"); } else k = cgfs_get_key(controller, path1, path2); if (!k) { ret = -EINVAL; goto out; } /* * This being a fuse request, the uid and gid must be valid * in the caller's namespace. So we can just check to make * sure that the caller is root in his uid, and privileged * over the file's current owner. */ if (!is_privileged_over(fc->pid, fc->uid, k->uid, NS_ROOT_OPT)) { ret = -EPERM; goto out; } if (!cgfs_chmod_file(controller, cgroup, mode)) { ret = -EINVAL; goto out; } ret = 0; out: free_key(k); free(cgdir); return ret; } int cg_mkdir(const char *path, mode_t mode) { struct fuse_context *fc = fuse_get_context(); char *fpath = NULL, *path1, *cgdir = NULL, *controller, *next = NULL; const char *cgroup; int ret; if (!fc) return -EIO; controller = pick_controller_from_path(fc, path); if (!controller) return -EINVAL; cgroup = find_cgroup_in_path(path); if (!cgroup) return -EINVAL; get_cgdir_and_path(cgroup, &cgdir, &fpath); if (!fpath) path1 = "/"; else path1 = cgdir; if (!caller_is_in_ancestor(fc->pid, controller, path1, &next)) { if (fpath && strcmp(next, fpath) == 0) ret = -EEXIST; else ret = -ENOENT; goto out; } if (!fc_may_access(fc, controller, path1, NULL, O_RDWR)) { ret = -EACCES; goto out; } if (!caller_is_in_ancestor(fc->pid, controller, path1, NULL)) { ret = -EACCES; goto out; } ret = cgfs_create(controller, cgroup, fc->uid, fc->gid); printf("cgfs_create returned %d for %s %s\n", ret, controller, cgroup); out: free(cgdir); free(next); return ret; } static int cg_rmdir(const char *path) { struct fuse_context *fc = fuse_get_context(); char *fpath = NULL, *cgdir = NULL, *controller, *next = NULL; const char *cgroup; int ret; if (!fc) return -EIO; controller = pick_controller_from_path(fc, path); if (!controller) return -EINVAL; cgroup = find_cgroup_in_path(path); if (!cgroup) return -EINVAL; get_cgdir_and_path(cgroup, &cgdir, &fpath); if (!fpath) { ret = -EINVAL; goto out; } if (!caller_is_in_ancestor(fc->pid, controller, cgroup, &next)) { if (!fpath || strcmp(next, fpath) == 0) ret = -EBUSY; else ret = -ENOENT; goto out; } if (!fc_may_access(fc, controller, cgdir, NULL, O_WRONLY)) { ret = -EACCES; goto out; } if (!caller_is_in_ancestor(fc->pid, controller, cgroup, NULL)) { ret = -EACCES; goto out; } if (!cgfs_remove(controller, cgroup)) { ret = -EINVAL; goto out; } ret = 0; out: free(cgdir); free(next); return ret; } static bool startswith(const char *line, const char *pref) { if (strncmp(line, pref, strlen(pref)) == 0) return true; return false; } static void get_mem_cached(char *memstat, unsigned long *v) { char *eol; *v = 0; while (*memstat) { if (startswith(memstat, "total_cache")) { sscanf(memstat + 11, "%lu", v); *v /= 1024; return; } eol = strchr(memstat, '\n'); if (!eol) return; memstat = eol+1; } } static void get_blkio_io_value(char *str, unsigned major, unsigned minor, char *iotype, unsigned long *v) { char *eol; char key[32]; memset(key, 0, 32); snprintf(key, 32, "%u:%u %s", major, minor, iotype); size_t len = strlen(key); *v = 0; while (*str) { if (startswith(str, key)) { sscanf(str + len, "%lu", v); return; } eol = strchr(str, '\n'); if (!eol) return; str = eol+1; } } static int read_file(const char *path, char *buf, size_t size, struct file_info *d) { size_t linelen = 0, total_len = 0, rv = 0; char *line = NULL; char *cache = d->buf; size_t cache_size = d->buflen; FILE *f = fopen(path, "r"); if (!f) return 0; while (getline(&line, &linelen, f) != -1) { size_t l = snprintf(cache, cache_size, "%s", line); if (l < 0) { perror("Error writing to cache"); rv = 0; goto err; } if (l >= cache_size) { fprintf(stderr, "Internal error: truncated write to cache\n"); rv = 0; goto err; } if (l < cache_size) { cache += l; cache_size -= l; total_len += l; } else { cache += cache_size; total_len += cache_size; cache_size = 0; break; } } d->size = total_len; if (total_len > size ) total_len = size; /* read from off 0 */ memcpy(buf, d->buf, total_len); rv = total_len; err: fclose(f); free(line); return rv; } /* * FUSE ops for /proc */ static unsigned long get_memlimit(const char *cgroup) { char *memlimit_str = NULL; unsigned long memlimit = -1; if (cgfs_get_value("memory", cgroup, "memory.limit_in_bytes", &memlimit_str)) memlimit = strtoul(memlimit_str, NULL, 10); free(memlimit_str); return memlimit; } static unsigned long get_min_memlimit(const char *cgroup) { char *copy = strdupa(cgroup); unsigned long memlimit = 0, retlimit; retlimit = get_memlimit(copy); while (strcmp(copy, "/") != 0) { copy = dirname(copy); memlimit = get_memlimit(copy); if (memlimit != -1 && memlimit < retlimit) retlimit = memlimit; }; return retlimit; } static int proc_meminfo_read(char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { struct fuse_context *fc = fuse_get_context(); struct file_info *d = (struct file_info *)fi->fh; char *cg; char *memusage_str = NULL, *memstat_str = NULL, *memswlimit_str = NULL, *memswusage_str = NULL; unsigned long memlimit = 0, memusage = 0, memswlimit = 0, memswusage = 0, cached = 0, hosttotal = 0; char *line = NULL; size_t linelen = 0, total_len = 0, rv = 0; char *cache = d->buf; size_t cache_size = d->buflen; FILE *f = NULL; if (offset){ if (offset > d->size) return -EINVAL; if (!d->cached) return 0; int left = d->size - offset; total_len = left > size ? size: left; memcpy(buf, cache + offset, total_len); return total_len; } cg = get_pid_cgroup(fc->pid, "memory"); if (!cg) return read_file("/proc/meminfo", buf, size, d); memlimit = get_min_memlimit(cg); if (!cgfs_get_value("memory", cg, "memory.usage_in_bytes", &memusage_str)) goto err; if (!cgfs_get_value("memory", cg, "memory.stat", &memstat_str)) goto err; // Following values are allowed to fail, because swapaccount might be turned // off for current kernel if(cgfs_get_value("memory", cg, "memory.memsw.limit_in_bytes", &memswlimit_str) && cgfs_get_value("memory", cg, "memory.memsw.usage_in_bytes", &memswusage_str)) { memswlimit = strtoul(memswlimit_str, NULL, 10); memswusage = strtoul(memswusage_str, NULL, 10); memswlimit /= 1024; memswusage /= 1024; } memusage = strtoul(memusage_str, NULL, 10); memlimit /= 1024; memusage /= 1024; get_mem_cached(memstat_str, &cached); f = fopen("/proc/meminfo", "r"); if (!f) goto err; while (getline(&line, &linelen, f) != -1) { size_t l; char *printme, lbuf[100]; memset(lbuf, 0, 100); if (startswith(line, "MemTotal:")) { sscanf(line+14, "%lu", &hosttotal); if (hosttotal < memlimit) memlimit = hosttotal; snprintf(lbuf, 100, "MemTotal: %8lu kB\n", memlimit); printme = lbuf; } else if (startswith(line, "MemFree:")) { snprintf(lbuf, 100, "MemFree: %8lu kB\n", memlimit - memusage); printme = lbuf; } else if (startswith(line, "MemAvailable:")) { snprintf(lbuf, 100, "MemAvailable: %8lu kB\n", memlimit - memusage); printme = lbuf; } else if (startswith(line, "SwapTotal:") && memswlimit > 0) { snprintf(lbuf, 100, "SwapTotal: %8lu kB\n", memswlimit - memlimit); printme = lbuf; } else if (startswith(line, "SwapFree:") && memswlimit > 0 && memswusage > 0) { snprintf(lbuf, 100, "SwapFree: %8lu kB\n", (memswlimit - memlimit) - (memswusage - memusage)); printme = lbuf; } else if (startswith(line, "Buffers:")) { snprintf(lbuf, 100, "Buffers: %8lu kB\n", 0UL); printme = lbuf; } else if (startswith(line, "Cached:")) { snprintf(lbuf, 100, "Cached: %8lu kB\n", cached); printme = lbuf; } else if (startswith(line, "SwapCached:")) { snprintf(lbuf, 100, "SwapCached: %8lu kB\n", 0UL); printme = lbuf; } else printme = line; l = snprintf(cache, cache_size, "%s", printme); if (l < 0) { perror("Error writing to cache"); rv = 0; goto err; } if (l >= cache_size) { fprintf(stderr, "Internal error: truncated write to cache\n"); rv = 0; goto err; } cache += l; cache_size -= l; total_len += l; } d->cached = 1; d->size = total_len; if (total_len > size ) total_len = size; memcpy(buf, d->buf, total_len); rv = total_len; err: if (f) fclose(f); free(line); free(cg); free(memusage_str); free(memswlimit_str); free(memswusage_str); free(memstat_str); return rv; } /* * Read the cpuset.cpus for cg * Return the answer in a newly allocated string which must be freed */ static char *get_cpuset(const char *cg) { char *answer; if (!cgfs_get_value("cpuset", cg, "cpuset.cpus", &answer)) return NULL; return answer; } bool cpu_in_cpuset(int cpu, const char *cpuset); static bool cpuline_in_cpuset(const char *line, const char *cpuset) { int cpu; if (sscanf(line, "processor : %d", &cpu) != 1) return false; return cpu_in_cpuset(cpu, cpuset); } /* * check whether this is a '^processor" line in /proc/cpuinfo */ static bool is_processor_line(const char *line) { int cpu; if (sscanf(line, "processor : %d", &cpu) == 1) return true; return false; } static int proc_cpuinfo_read(char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { struct fuse_context *fc = fuse_get_context(); struct file_info *d = (struct file_info *)fi->fh; char *cg; char *cpuset = NULL; char *line = NULL; size_t linelen = 0, total_len = 0, rv = 0; bool am_printing = false; int curcpu = -1; char *cache = d->buf; size_t cache_size = d->buflen; FILE *f = NULL; if (offset){ if (offset > d->size) return -EINVAL; if (!d->cached) return 0; int left = d->size - offset; total_len = left > size ? size: left; memcpy(buf, cache + offset, total_len); return total_len; } cg = get_pid_cgroup(fc->pid, "cpuset"); if (!cg) return read_file("proc/cpuinfo", buf, size, d); cpuset = get_cpuset(cg); if (!cpuset) goto err; f = fopen("/proc/cpuinfo", "r"); if (!f) goto err; while (getline(&line, &linelen, f) != -1) { size_t l; if (is_processor_line(line)) { am_printing = cpuline_in_cpuset(line, cpuset); if (am_printing) { curcpu ++; l = snprintf(cache, cache_size, "processor : %d\n", curcpu); if (l < 0) { perror("Error writing to cache"); rv = 0; goto err; } if (l >= cache_size) { fprintf(stderr, "Internal error: truncated write to cache\n"); rv = 0; goto err; } if (l < cache_size){ cache += l; cache_size -= l; total_len += l; }else{ cache += cache_size; total_len += cache_size; cache_size = 0; break; } } continue; } if (am_printing) { l = snprintf(cache, cache_size, "%s", line); if (l < 0) { perror("Error writing to cache"); rv = 0; goto err; } if (l >= cache_size) { fprintf(stderr, "Internal error: truncated write to cache\n"); rv = 0; goto err; } if (l < cache_size) { cache += l; cache_size -= l; total_len += l; } else { cache += cache_size; total_len += cache_size; cache_size = 0; break; } } } d->cached = 1; d->size = total_len; if (total_len > size ) total_len = size; /* read from off 0 */ memcpy(buf, d->buf, total_len); rv = total_len; err: if (f) fclose(f); free(line); free(cpuset); free(cg); return rv; } static int proc_stat_read(char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { struct fuse_context *fc = fuse_get_context(); struct file_info *d = (struct file_info *)fi->fh; char *cg; char *cpuset = NULL; char *line = NULL; size_t linelen = 0, total_len = 0, rv = 0; int curcpu = -1; /* cpu numbering starts at 0 */ unsigned long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0; unsigned long user_sum = 0, nice_sum = 0, system_sum = 0, idle_sum = 0, iowait_sum = 0, irq_sum = 0, softirq_sum = 0, steal_sum = 0, guest_sum = 0; #define CPUALL_MAX_SIZE BUF_RESERVE_SIZE char cpuall[CPUALL_MAX_SIZE]; /* reserve for cpu all */ char *cache = d->buf + CPUALL_MAX_SIZE; size_t cache_size = d->buflen - CPUALL_MAX_SIZE; FILE *f = NULL; if (offset){ if (offset > d->size) return -EINVAL; if (!d->cached) return 0; int left = d->size - offset; total_len = left > size ? size: left; memcpy(buf, d->buf + offset, total_len); return total_len; } cg = get_pid_cgroup(fc->pid, "cpuset"); if (!cg) return read_file("/proc/stat", buf, size, d); cpuset = get_cpuset(cg); if (!cpuset) goto err; f = fopen("/proc/stat", "r"); if (!f) goto err; //skip first line if (getline(&line, &linelen, f) < 0) { fprintf(stderr, "proc_stat_read read first line failed\n"); goto err; } while (getline(&line, &linelen, f) != -1) { size_t l; int cpu; char cpu_char[10]; /* That's a lot of cores */ char *c; if (sscanf(line, "cpu%9[^ ]", cpu_char) != 1) { /* not a ^cpuN line containing a number N, just print it */ l = snprintf(cache, cache_size, "%s", line); if (l < 0) { perror("Error writing to cache"); rv = 0; goto err; } if (l >= cache_size) { fprintf(stderr, "Internal error: truncated write to cache\n"); rv = 0; goto err; } if (l < cache_size) { cache += l; cache_size -= l; total_len += l; continue; } else { //no more space, break it cache += cache_size; total_len += cache_size; cache_size = 0; break; } } if (sscanf(cpu_char, "%d", &cpu) != 1) continue; if (!cpu_in_cpuset(cpu, cpuset)) continue; curcpu ++; c = strchr(line, ' '); if (!c) continue; l = snprintf(cache, cache_size, "cpu%d%s", curcpu, c); if (l < 0) { perror("Error writing to cache"); rv = 0; goto err; } if (l >= cache_size) { fprintf(stderr, "Internal error: truncated write to cache\n"); rv = 0; goto err; } cache += l; cache_size -= l; total_len += l; if (sscanf(line, "%*s %lu %lu %lu %lu %lu %lu %lu %lu %lu", &user, &nice, &system, &idle, &iowait, &irq, &softirq, &steal, &guest) != 9) continue; user_sum += user; nice_sum += nice; system_sum += system; idle_sum += idle; iowait_sum += iowait; irq_sum += irq; softirq_sum += softirq; steal_sum += steal; guest_sum += guest; } cache = d->buf; int cpuall_len = snprintf(cpuall, CPUALL_MAX_SIZE, "%s %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", "cpu ", user_sum, nice_sum, system_sum, idle_sum, iowait_sum, irq_sum, softirq_sum, steal_sum, guest_sum); if (cpuall_len > 0 && cpuall_len < CPUALL_MAX_SIZE){ memcpy(cache, cpuall, cpuall_len); cache += cpuall_len; } else{ /* shouldn't happen */ fprintf(stderr, "proc_stat_read copy cpuall failed, cpuall_len=%d\n", cpuall_len); cpuall_len = 0; } memmove(cache, d->buf + CPUALL_MAX_SIZE, total_len); total_len += cpuall_len; d->cached = 1; d->size = total_len; if (total_len > size ) total_len = size; memcpy(buf, d->buf, total_len); rv = total_len; err: if (f) fclose(f); free(line); free(cpuset); free(cg); return rv; } /* * How to guess what to present for uptime? * One thing we could do would be to take the date on the caller's * memory.usage_in_bytes file, which should equal the time of creation * of his cgroup. However, a task could be in a sub-cgroup of the * container. The same problem exists if we try to look at the ages * of processes in the caller's cgroup. * * So we'll fork a task that will enter the caller's pidns, mount a * fresh procfs, get the age of /proc/1, and pass that back over a pipe. * * For the second uptime #, we'll do as Stéphane had done, just copy * the number from /proc/uptime. Not sure how to best emulate 'idle' * time. Maybe someone can come up with a good algorithm and submit a * patch. Maybe something based on cpushare info? */ /* return age of the reaper for $pid, taken from ctime of its procdir */ static long int get_pid1_time(pid_t pid) { char fnam[100]; int fd, cpipe[2], ret; struct stat sb; pid_t cpid; struct timeval tv; fd_set s; char v; if (unshare(CLONE_NEWNS)) return 0; if (mount(NULL, "/", NULL, MS_SLAVE|MS_REC, NULL)) { perror("rslave mount failed"); return 0; } ret = snprintf(fnam, sizeof(fnam), "/proc/%d/ns/pid", pid); if (ret < 0 || ret >= sizeof(fnam)) return 0; fd = open(fnam, O_RDONLY); if (fd < 0) { perror("get_pid1_time open of ns/pid"); return 0; } if (setns(fd, 0)) { perror("get_pid1_time setns 1"); close(fd); return 0; } close(fd); if (pipe(cpipe) < 0) exit(1); loop: cpid = fork(); if (cpid < 0) return 0; if (!cpid) { char b = '1'; close(cpipe[0]); if (write(cpipe[1], &b, sizeof(char)) < 0) { fprintf(stderr, "%s (child): erorr on write: %s\n", __func__, strerror(errno)); } close(cpipe[1]); umount2("/proc", MNT_DETACH); if (mount("proc", "/proc", "proc", 0, NULL)) { perror("get_pid1_time mount"); return 0; } ret = lstat("/proc/1", &sb); if (ret) { perror("get_pid1_time lstat"); return 0; } return time(NULL) - sb.st_ctime; } // give the child 1 second to be done forking and // write it's ack FD_ZERO(&s); FD_SET(cpipe[0], &s); tv.tv_sec = 1; tv.tv_usec = 0; ret = select(cpipe[0]+1, &s, NULL, NULL, &tv); if (ret <= 0) goto again; ret = read(cpipe[0], &v, 1); if (ret != sizeof(char) || v != '1') { goto again; } wait_for_pid(cpid); _exit(0); again: kill(cpid, SIGKILL); wait_for_pid(cpid); goto loop; } static long int getreaperage(pid_t qpid) { int pid, mypipe[2], ret; struct timeval tv; fd_set s; long int mtime, answer = 0; if (pipe(mypipe)) { return 0; } pid = fork(); if (!pid) { // child mtime = get_pid1_time(qpid); if (write(mypipe[1], &mtime, sizeof(mtime)) != sizeof(mtime)) fprintf(stderr, "Warning: bad write from getreaperage\n"); _exit(0); } close(mypipe[1]); FD_ZERO(&s); FD_SET(mypipe[0], &s); tv.tv_sec = 1; tv.tv_usec = 0; ret = select(mypipe[0]+1, &s, NULL, NULL, &tv); if (ret <= 0) { perror("select"); goto out; } if (!ret) { fprintf(stderr, "timed out\n"); goto out; } if (read(mypipe[0], &mtime, sizeof(mtime)) != sizeof(mtime)) { perror("read"); goto out; } answer = mtime; out: wait_for_pid(pid); close(mypipe[0]); return answer; } /* * fork a task which switches to @task's namespace and writes '1'. * over a unix sock so we can read the task's reaper's pid in our * namespace */ void write_task_init_pid_exit(int sock, pid_t target) { struct ucred cred; char fnam[100]; pid_t pid; char v; int fd, ret; ret = snprintf(fnam, sizeof(fnam), "/proc/%d/ns/pid", (int)target); if (ret < 0 || ret >= sizeof(fnam)) exit(1); fd = open(fnam, O_RDONLY); if (fd < 0) { perror("get_pid1_time open of ns/pid"); exit(1); } if (setns(fd, 0)) { perror("get_pid1_time setns 1"); close(fd); exit(1); } pid = fork(); if (pid < 0) exit(1); if (pid != 0) { wait_for_pid(pid); exit(0); } /* we are the child */ cred.uid = 0; cred.gid = 0; cred.pid = 1; v = '1'; send_creds(sock, &cred, v, true); exit(0); } static pid_t get_task_reaper_pid(pid_t task) { int sock[2]; pid_t pid; pid_t ret = -1; char v = '0'; struct ucred cred; if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sock) < 0) { perror("socketpair"); return -1; } pid = fork(); if (pid < 0) goto out; if (!pid) { close(sock[1]); write_task_init_pid_exit(sock[0], task); } if (!recv_creds(sock[1], &cred, &v)) goto out; ret = cred.pid; out: close(sock[0]); close(sock[1]); return ret; } static unsigned long get_reaper_busy(pid_t task) { pid_t init = get_task_reaper_pid(task); char *cgroup = NULL, *usage_str = NULL; unsigned long usage = 0; if (init == -1) return 0; cgroup = get_pid_cgroup(task, "cpuacct"); if (!cgroup) goto out; if (!cgfs_get_value("cpuacct", cgroup, "cpuacct.usage", &usage_str)) goto out; usage = strtoul(usage_str, NULL, 10); usage /= 100000000; out: free(cgroup); free(usage_str); return usage; } /* * We read /proc/uptime and reuse its second field. * For the first field, we use the mtime for the reaper for * the calling pid as returned by getreaperage */ static int proc_uptime_read(char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { struct fuse_context *fc = fuse_get_context(); struct file_info *d = (struct file_info *)fi->fh; long int reaperage = getreaperage(fc->pid);; unsigned long int busytime = get_reaper_busy(fc->pid), idletime; char *cache = d->buf; size_t total_len = 0; if (offset){ if (offset > d->size) return -EINVAL; if (!d->cached) return 0; int left = d->size - offset; total_len = left > size ? size: left; memcpy(buf, cache + offset, total_len); return total_len; } idletime = reaperage - busytime; if (idletime > reaperage) idletime = reaperage; total_len = snprintf(d->buf, d->size, "%ld.0 %lu.0\n", reaperage, idletime); if (total_len < 0){ perror("Error writing to cache"); return 0; } d->size = (int)total_len; d->cached = 1; if (total_len > size) total_len = size; memcpy(buf, d->buf, total_len); return total_len; } static int proc_diskstats_read(char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { char dev_name[72]; struct fuse_context *fc = fuse_get_context(); struct file_info *d = (struct file_info *)fi->fh; char *cg; char *io_serviced_str = NULL, *io_merged_str = NULL, *io_service_bytes_str = NULL, *io_wait_time_str = NULL, *io_service_time_str = NULL; unsigned long read = 0, write = 0; unsigned long read_merged = 0, write_merged = 0; unsigned long read_sectors = 0, write_sectors = 0; unsigned long read_ticks = 0, write_ticks = 0; unsigned long ios_pgr = 0, tot_ticks = 0, rq_ticks = 0; unsigned long rd_svctm = 0, wr_svctm = 0, rd_wait = 0, wr_wait = 0; char *cache = d->buf; size_t cache_size = d->buflen; char *line = NULL; size_t linelen = 0, total_len = 0, rv = 0; unsigned int major = 0, minor = 0; int i = 0; FILE *f = NULL; if (offset){ if (offset > d->size) return -EINVAL; if (!d->cached) return 0; int left = d->size - offset; total_len = left > size ? size: left; memcpy(buf, cache + offset, total_len); return total_len; } cg = get_pid_cgroup(fc->pid, "blkio"); if (!cg) return read_file("/proc/diskstats", buf, size, d); if (!cgfs_get_value("blkio", cg, "blkio.io_serviced", &io_serviced_str)) goto err; if (!cgfs_get_value("blkio", cg, "blkio.io_merged", &io_merged_str)) goto err; if (!cgfs_get_value("blkio", cg, "blkio.io_service_bytes", &io_service_bytes_str)) goto err; if (!cgfs_get_value("blkio", cg, "blkio.io_wait_time", &io_wait_time_str)) goto err; if (!cgfs_get_value("blkio", cg, "blkio.io_service_time", &io_service_time_str)) goto err; f = fopen("/proc/diskstats", "r"); if (!f) goto err; while (getline(&line, &linelen, f) != -1) { size_t l; char *printme, lbuf[256]; i = sscanf(line, "%u %u %71s", &major, &minor, dev_name); if(i == 3){ get_blkio_io_value(io_serviced_str, major, minor, "Read", &read); get_blkio_io_value(io_serviced_str, major, minor, "Write", &write); get_blkio_io_value(io_merged_str, major, minor, "Read", &read_merged); get_blkio_io_value(io_merged_str, major, minor, "Write", &write_merged); get_blkio_io_value(io_service_bytes_str, major, minor, "Read", &read_sectors); read_sectors = read_sectors/512; get_blkio_io_value(io_service_bytes_str, major, minor, "Write", &write_sectors); write_sectors = write_sectors/512; get_blkio_io_value(io_service_time_str, major, minor, "Read", &rd_svctm); rd_svctm = rd_svctm/1000000; get_blkio_io_value(io_wait_time_str, major, minor, "Read", &rd_wait); rd_wait = rd_wait/1000000; read_ticks = rd_svctm + rd_wait; get_blkio_io_value(io_service_time_str, major, minor, "Write", &wr_svctm); wr_svctm = wr_svctm/1000000; get_blkio_io_value(io_wait_time_str, major, minor, "Write", &wr_wait); wr_wait = wr_wait/1000000; write_ticks = wr_svctm + wr_wait; get_blkio_io_value(io_service_time_str, major, minor, "Total", &tot_ticks); tot_ticks = tot_ticks/1000000; }else{ continue; } memset(lbuf, 0, 256); if (read || write || read_merged || write_merged || read_sectors || write_sectors || read_ticks || write_ticks) { snprintf(lbuf, 256, "%u %u %s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", major, minor, dev_name, read, read_merged, read_sectors, read_ticks, write, write_merged, write_sectors, write_ticks, ios_pgr, tot_ticks, rq_ticks); printme = lbuf; } else continue; l = snprintf(cache, cache_size, "%s", printme); if (l < 0) { perror("Error writing to fuse buf"); rv = 0; goto err; } if (l >= cache_size) { fprintf(stderr, "Internal error: truncated write to cache\n"); rv = 0; goto err; } cache += l; cache_size -= l; total_len += l; } d->cached = 1; d->size = total_len; if (total_len > size ) total_len = size; memcpy(buf, d->buf, total_len); rv = total_len; err: free(cg); if (f) fclose(f); free(line); free(io_serviced_str); free(io_merged_str); free(io_service_bytes_str); free(io_wait_time_str); free(io_service_time_str); return rv; } static off_t get_procfile_size(const char *which) { FILE *f = fopen(which, "r"); char *line = NULL; size_t len = 0; ssize_t sz, answer = 0; if (!f) return 0; while ((sz = getline(&line, &len, f)) != -1) answer += sz; fclose (f); free(line); return answer; } static int proc_getattr(const char *path, struct stat *sb) { struct timespec now; memset(sb, 0, sizeof(struct stat)); if (clock_gettime(CLOCK_REALTIME, &now) < 0) return -EINVAL; sb->st_uid = sb->st_gid = 0; sb->st_atim = sb->st_mtim = sb->st_ctim = now; if (strcmp(path, "/proc") == 0) { sb->st_mode = S_IFDIR | 00555; sb->st_nlink = 2; return 0; } if (strcmp(path, "/proc/meminfo") == 0 || strcmp(path, "/proc/cpuinfo") == 0 || strcmp(path, "/proc/uptime") == 0 || strcmp(path, "/proc/stat") == 0 || strcmp(path, "/proc/diskstats") == 0) { sb->st_size = 0; sb->st_mode = S_IFREG | 00444; sb->st_nlink = 1; return 0; } return -ENOENT; } static int proc_readdir(const char *path, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi) { if (filler(buf, "cpuinfo", NULL, 0) != 0 || filler(buf, "meminfo", NULL, 0) != 0 || filler(buf, "stat", NULL, 0) != 0 || filler(buf, "uptime", NULL, 0) != 0 || filler(buf, "diskstats", NULL, 0) != 0) return -EINVAL; return 0; } static int proc_open(const char *path, struct fuse_file_info *fi) { int type = -1; struct file_info *info; if (strcmp(path, "/proc/meminfo") == 0) type = LXC_TYPE_PROC_MEMINFO; else if (strcmp(path, "/proc/cpuinfo") == 0) type = LXC_TYPE_PROC_CPUINFO; else if (strcmp(path, "/proc/uptime") == 0) type = LXC_TYPE_PROC_UPTIME; else if (strcmp(path, "/proc/stat") == 0) type = LXC_TYPE_PROC_STAT; else if (strcmp(path, "/proc/diskstats") == 0) type = LXC_TYPE_PROC_DISKSTATS; if (type == -1) return -ENOENT; info = malloc(sizeof(*info)); if (!info) return -ENOMEM; memset(info, 0, sizeof(*info)); info->type = type; info->buflen = get_procfile_size(path) + BUF_RESERVE_SIZE; do { info->buf = malloc(info->buflen); } while (!info->buf); memset(info->buf, 0, info->buflen); /* set actual size to buffer size */ info->size = info->buflen; fi->fh = (unsigned long)info; return 0; } static int proc_release(const char *path, struct fuse_file_info *fi) { struct file_info *f = (struct file_info *)fi->fh; do_release_file_info(f); return 0; } static int proc_read(const char *path, char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { struct file_info *f = (struct file_info *) fi->fh; switch (f->type) { case LXC_TYPE_PROC_MEMINFO: return proc_meminfo_read(buf, size, offset, fi); case LXC_TYPE_PROC_CPUINFO: return proc_cpuinfo_read(buf, size, offset, fi); case LXC_TYPE_PROC_UPTIME: return proc_uptime_read(buf, size, offset, fi); case LXC_TYPE_PROC_STAT: return proc_stat_read(buf, size, offset, fi); case LXC_TYPE_PROC_DISKSTATS: return proc_diskstats_read(buf, size, offset, fi); default: return -EINVAL; } } /* * FUSE ops for / * these just delegate to the /proc and /cgroup ops as * needed */ static int lxcfs_getattr(const char *path, struct stat *sb) { if (strcmp(path, "/") == 0) { sb->st_mode = S_IFDIR | 00755; sb->st_nlink = 2; return 0; } if (strncmp(path, "/cgroup", 7) == 0) { return cg_getattr(path, sb); } if (strncmp(path, "/proc", 5) == 0) { return proc_getattr(path, sb); } return -EINVAL; } static int lxcfs_opendir(const char *path, struct fuse_file_info *fi) { if (strcmp(path, "/") == 0) return 0; if (strncmp(path, "/cgroup", 7) == 0) { return cg_opendir(path, fi); } if (strcmp(path, "/proc") == 0) return 0; return -ENOENT; } static int lxcfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi) { if (strcmp(path, "/") == 0) { if (filler(buf, "proc", NULL, 0) != 0 || filler(buf, "cgroup", NULL, 0) != 0) return -EINVAL; return 0; } if (strncmp(path, "/cgroup", 7) == 0) return cg_readdir(path, buf, filler, offset, fi); if (strcmp(path, "/proc") == 0) return proc_readdir(path, buf, filler, offset, fi); return -EINVAL; } static int lxcfs_releasedir(const char *path, struct fuse_file_info *fi) { if (strcmp(path, "/") == 0) return 0; if (strncmp(path, "/cgroup", 7) == 0) { return cg_releasedir(path, fi); } if (strcmp(path, "/proc") == 0) return 0; return -EINVAL; } static int lxcfs_open(const char *path, struct fuse_file_info *fi) { if (strncmp(path, "/cgroup", 7) == 0) return cg_open(path, fi); if (strncmp(path, "/proc", 5) == 0) return proc_open(path, fi); return -EINVAL; } static int lxcfs_read(const char *path, char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { if (strncmp(path, "/cgroup", 7) == 0) return cg_read(path, buf, size, offset, fi); if (strncmp(path, "/proc", 5) == 0) return proc_read(path, buf, size, offset, fi); return -EINVAL; } int lxcfs_write(const char *path, const char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { if (strncmp(path, "/cgroup", 7) == 0) { return cg_write(path, buf, size, offset, fi); } return -EINVAL; } static int lxcfs_flush(const char *path, struct fuse_file_info *fi) { return 0; } static int lxcfs_release(const char *path, struct fuse_file_info *fi) { if (strncmp(path, "/cgroup", 7) == 0) return cg_release(path, fi); if (strncmp(path, "/proc", 5) == 0) return proc_release(path, fi); return -EINVAL; } static int lxcfs_fsync(const char *path, int datasync, struct fuse_file_info *fi) { return 0; } int lxcfs_mkdir(const char *path, mode_t mode) { if (strncmp(path, "/cgroup", 7) == 0) return cg_mkdir(path, mode); return -EINVAL; } int lxcfs_chown(const char *path, uid_t uid, gid_t gid) { if (strncmp(path, "/cgroup", 7) == 0) return cg_chown(path, uid, gid); return -EINVAL; } /* * cat first does a truncate before doing ops->write. This doesn't * really make sense for cgroups. So just return 0 always but do * nothing. */ int lxcfs_truncate(const char *path, off_t newsize) { if (strncmp(path, "/cgroup", 7) == 0) return 0; return -EINVAL; } int lxcfs_rmdir(const char *path) { if (strncmp(path, "/cgroup", 7) == 0) return cg_rmdir(path); return -EINVAL; } int lxcfs_chmod(const char *path, mode_t mode) { if (strncmp(path, "/cgroup", 7) == 0) return cg_chmod(path, mode); return -EINVAL; } const struct fuse_operations lxcfs_ops = { .getattr = lxcfs_getattr, .readlink = NULL, .getdir = NULL, .mknod = NULL, .mkdir = lxcfs_mkdir, .unlink = NULL, .rmdir = lxcfs_rmdir, .symlink = NULL, .rename = NULL, .link = NULL, .chmod = lxcfs_chmod, .chown = lxcfs_chown, .truncate = lxcfs_truncate, .utime = NULL, .open = lxcfs_open, .read = lxcfs_read, .release = lxcfs_release, .write = lxcfs_write, .statfs = NULL, .flush = lxcfs_flush, .fsync = lxcfs_fsync, .setxattr = NULL, .getxattr = NULL, .listxattr = NULL, .removexattr = NULL, .opendir = lxcfs_opendir, .readdir = lxcfs_readdir, .releasedir = lxcfs_releasedir, .fsyncdir = NULL, .init = NULL, .destroy = NULL, .access = NULL, .create = NULL, .ftruncate = NULL, .fgetattr = NULL, }; static void usage(const char *me) { fprintf(stderr, "Usage:\n"); fprintf(stderr, "\n"); fprintf(stderr, "%s mountpoint\n", me); fprintf(stderr, "%s -h\n", me); exit(1); } static bool is_help(char *w) { if (strcmp(w, "-h") == 0 || strcmp(w, "--help") == 0 || strcmp(w, "-help") == 0 || strcmp(w, "help") == 0) return true; return false; } void swallow_arg(int *argcp, char *argv[], char *which) { int i; for (i = 1; argv[i]; i++) { if (strcmp(argv[i], which) != 0) continue; for (; argv[i]; i++) { argv[i] = argv[i+1]; } (*argcp)--; return; } } void swallow_option(int *argcp, char *argv[], char *opt, char *v) { int i; for (i = 1; argv[i]; i++) { if (!argv[i+1]) continue; if (strcmp(argv[i], opt) != 0) continue; if (strcmp(argv[i+1], v) != 0) { fprintf(stderr, "Warning: unexpected fuse option %s\n", v); exit(1); } for (; argv[i+1]; i++) { argv[i] = argv[i+2]; } (*argcp) -= 2; return; } } int main(int argc, char *argv[]) { int ret = -1; /* * what we pass to fuse_main is: * argv[0] -s -f -o allow_other,directio argv[1] NULL */ int nargs = 5, cnt = 0; char *newargv[6]; #ifdef FORTRAVIS /* for travis which runs on 12.04 */ if (glib_check_version (2, 36, 0) != NULL) g_type_init (); #endif /* accomodate older init scripts */ swallow_arg(&argc, argv, "-s"); swallow_arg(&argc, argv, "-f"); swallow_option(&argc, argv, "-o", "allow_other"); if (argc == 2 && strcmp(argv[1], "--version") == 0) { fprintf(stderr, "%s\n", VERSION); exit(0); } if (argc != 2 || is_help(argv[1])) usage(argv[0]); newargv[cnt++] = argv[0]; newargv[cnt++] = "-f"; newargv[cnt++] = "-o"; newargv[cnt++] = "allow_other,direct_io,entry_timeout=0.5,attr_timeout=0.5"; newargv[cnt++] = argv[1]; newargv[cnt++] = NULL; if (!cgfs_setup_controllers()) goto out; ret = fuse_main(nargs, newargv, &lxcfs_ops, NULL); out: return ret; }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_1475_0
crossvul-cpp_data_good_5605_0
/* * linux/kernel/fork.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * 'fork.c' contains the help-routines for the 'fork' system call * (see also entry.S and others). * Fork is rather simple, once you get the hang of it, but the memory * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' */ #include <linux/slab.h> #include <linux/init.h> #include <linux/unistd.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/completion.h> #include <linux/personality.h> #include <linux/mempolicy.h> #include <linux/sem.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/iocontext.h> #include <linux/key.h> #include <linux/binfmts.h> #include <linux/mman.h> #include <linux/mmu_notifier.h> #include <linux/fs.h> #include <linux/nsproxy.h> #include <linux/capability.h> #include <linux/cpu.h> #include <linux/cgroup.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/seccomp.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/jiffies.h> #include <linux/futex.h> #include <linux/compat.h> #include <linux/kthread.h> #include <linux/task_io_accounting_ops.h> #include <linux/rcupdate.h> #include <linux/ptrace.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/memcontrol.h> #include <linux/ftrace.h> #include <linux/proc_fs.h> #include <linux/profile.h> #include <linux/rmap.h> #include <linux/ksm.h> #include <linux/acct.h> #include <linux/tsacct_kern.h> #include <linux/cn_proc.h> #include <linux/freezer.h> #include <linux/delayacct.h> #include <linux/taskstats_kern.h> #include <linux/random.h> #include <linux/tty.h> #include <linux/blkdev.h> #include <linux/fs_struct.h> #include <linux/magic.h> #include <linux/perf_event.h> #include <linux/posix-timers.h> #include <linux/user-return-notifier.h> #include <linux/oom.h> #include <linux/khugepaged.h> #include <linux/signalfd.h> #include <linux/uprobes.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <trace/events/sched.h> #define CREATE_TRACE_POINTS #include <trace/events/task.h> /* * Protected counters by write_lock_irq(&tasklist_lock) */ unsigned long total_forks; /* Handle normal Linux uptimes. */ int nr_threads; /* The idle threads do not count.. */ int max_threads; /* tunable limit on nr_threads */ DEFINE_PER_CPU(unsigned long, process_counts) = 0; __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ #ifdef CONFIG_PROVE_RCU int lockdep_tasklist_lock_is_held(void) { return lockdep_is_held(&tasklist_lock); } EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); #endif /* #ifdef CONFIG_PROVE_RCU */ int nr_processes(void) { int cpu; int total = 0; for_each_possible_cpu(cpu) total += per_cpu(process_counts, cpu); return total; } void __weak arch_release_task_struct(struct task_struct *tsk) { } #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR static struct kmem_cache *task_struct_cachep; static inline struct task_struct *alloc_task_struct_node(int node) { return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); } static inline void free_task_struct(struct task_struct *tsk) { kmem_cache_free(task_struct_cachep, tsk); } #endif void __weak arch_release_thread_info(struct thread_info *ti) { } #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR /* * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a * kmemcache based allocator. */ # if THREAD_SIZE >= PAGE_SIZE static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) { struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED, THREAD_SIZE_ORDER); return page ? page_address(page) : NULL; } static inline void free_thread_info(struct thread_info *ti) { free_memcg_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER); } # else static struct kmem_cache *thread_info_cache; static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) { return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); } static void free_thread_info(struct thread_info *ti) { kmem_cache_free(thread_info_cache, ti); } void thread_info_cache_init(void) { thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, THREAD_SIZE, 0, NULL); BUG_ON(thread_info_cache == NULL); } # endif #endif /* SLAB cache for signal_struct structures (tsk->signal) */ static struct kmem_cache *signal_cachep; /* SLAB cache for sighand_struct structures (tsk->sighand) */ struct kmem_cache *sighand_cachep; /* SLAB cache for files_struct structures (tsk->files) */ struct kmem_cache *files_cachep; /* SLAB cache for fs_struct structures (tsk->fs) */ struct kmem_cache *fs_cachep; /* SLAB cache for vm_area_struct structures */ struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; static void account_kernel_stack(struct thread_info *ti, int account) { struct zone *zone = page_zone(virt_to_page(ti)); mod_zone_page_state(zone, NR_KERNEL_STACK, account); } void free_task(struct task_struct *tsk) { account_kernel_stack(tsk->stack, -1); arch_release_thread_info(tsk->stack); free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); put_seccomp_filter(tsk); arch_release_task_struct(tsk); free_task_struct(tsk); } EXPORT_SYMBOL(free_task); static inline void free_signal_struct(struct signal_struct *sig) { taskstats_tgid_free(sig); sched_autogroup_exit(sig); kmem_cache_free(signal_cachep, sig); } static inline void put_signal_struct(struct signal_struct *sig) { if (atomic_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); security_task_free(tsk); exit_creds(tsk); delayacct_tsk_free(tsk); put_signal_struct(tsk->signal); if (!profile_handoff_task(tsk)) free_task(tsk); } EXPORT_SYMBOL_GPL(__put_task_struct); void __init __weak arch_task_cache_init(void) { } void __init fork_init(unsigned long mempages) { #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR #ifndef ARCH_MIN_TASKALIGN #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES #endif /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", sizeof(struct task_struct), ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); #endif /* do the arch specific task caches init */ arch_task_cache_init(); /* * The default maximum number of threads is set to a safe * value: the thread structures can take up at most half * of memory. */ max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); /* * we need to allow at least 20 threads to boot a system */ if (max_threads < 20) max_threads = 20; init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; init_task.signal->rlim[RLIMIT_SIGPENDING] = init_task.signal->rlim[RLIMIT_NPROC]; } int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; return 0; } static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; unsigned long *stackend; int node = tsk_fork_get_node(orig); int err; tsk = alloc_task_struct_node(node); if (!tsk) return NULL; ti = alloc_thread_info_node(tsk, node); if (!ti) goto free_tsk; err = arch_dup_task_struct(tsk, orig); if (err) goto free_ti; tsk->stack = ti; setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* * One for us, one for whoever does the "release_task()" (usually * parent) */ atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; account_kernel_stack(ti, 1); return tsk; free_ti: free_thread_info(ti); free_tsk: free_task_struct(tsk); return NULL; } #ifdef CONFIG_MMU static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp, *prev, **pprev; struct rb_node **rb_link, *rb_parent; int retval; unsigned long charge; struct mempolicy *pol; uprobe_start_dup_mmap(); down_write(&oldmm->mmap_sem); flush_cache_dup_mm(oldmm); uprobe_dup_mmap(oldmm, mm); /* * Not linked in yet - no deadlock potential: */ down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); mm->locked_vm = 0; mm->mmap = NULL; mm->mmap_cache = NULL; mm->free_area_cache = oldmm->mmap_base; mm->cached_hole_size = ~0UL; mm->map_count = 0; cpumask_clear(mm_cpumask(mm)); mm->mm_rb = RB_ROOT; rb_link = &mm->mm_rb.rb_node; rb_parent = NULL; pprev = &mm->mmap; retval = ksm_fork(mm, oldmm); if (retval) goto out; retval = khugepaged_fork(mm, oldmm); if (retval) goto out; prev = NULL; for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { struct file *file; if (mpnt->vm_flags & VM_DONTCOPY) { vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, -vma_pages(mpnt)); continue; } charge = 0; if (mpnt->vm_flags & VM_ACCOUNT) { unsigned long len = vma_pages(mpnt); if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ goto fail_nomem; charge = len; } tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!tmp) goto fail_nomem; *tmp = *mpnt; INIT_LIST_HEAD(&tmp->anon_vma_chain); pol = mpol_dup(vma_policy(mpnt)); retval = PTR_ERR(pol); if (IS_ERR(pol)) goto fail_nomem_policy; vma_set_policy(tmp, pol); tmp->vm_mm = mm; if (anon_vma_fork(tmp, mpnt)) goto fail_nomem_anon_vma_fork; tmp->vm_flags &= ~VM_LOCKED; tmp->vm_next = tmp->vm_prev = NULL; file = tmp->vm_file; if (file) { struct inode *inode = file_inode(file); struct address_space *mapping = file->f_mapping; get_file(file); if (tmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); mutex_lock(&mapping->i_mmap_mutex); if (tmp->vm_flags & VM_SHARED) mapping->i_mmap_writable++; flush_dcache_mmap_lock(mapping); /* insert tmp into the share list, just after mpnt */ if (unlikely(tmp->vm_flags & VM_NONLINEAR)) vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear); else vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); mutex_unlock(&mapping->i_mmap_mutex); } /* * Clear hugetlb-related page reserves for children. This only * affects MAP_PRIVATE mappings. Faults generated by the child * are not guaranteed to succeed, even if read-only */ if (is_vm_hugetlb_page(tmp)) reset_vma_resv_huge_pages(tmp); /* * Link in the new vma and copy the page table entries. */ *pprev = tmp; pprev = &tmp->vm_next; tmp->vm_prev = prev; prev = tmp; __vma_link_rb(mm, tmp, rb_link, rb_parent); rb_link = &tmp->vm_rb.rb_right; rb_parent = &tmp->vm_rb; mm->map_count++; retval = copy_page_range(mm, oldmm, mpnt); if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); if (retval) goto out; } /* a new mm has just been created */ arch_dup_mmap(oldmm, mm); retval = 0; out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); up_write(&oldmm->mmap_sem); uprobe_end_dup_mmap(); return retval; fail_nomem_anon_vma_fork: mpol_put(pol); fail_nomem_policy: kmem_cache_free(vm_area_cachep, tmp); fail_nomem: retval = -ENOMEM; vm_unacct_memory(charge); goto out; } static inline int mm_alloc_pgd(struct mm_struct *mm) { mm->pgd = pgd_alloc(mm); if (unlikely(!mm->pgd)) return -ENOMEM; return 0; } static inline void mm_free_pgd(struct mm_struct *mm) { pgd_free(mm, mm->pgd); } #else #define dup_mmap(mm, oldmm) (0) #define mm_alloc_pgd(mm) (0) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; static int __init coredump_filter_setup(char *s) { default_dump_filter = (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & MMF_DUMP_FILTER_MASK; return 1; } __setup("coredump_filter=", coredump_filter_setup); #include <linux/init_task.h> static void mm_init_aio(struct mm_struct *mm) { #ifdef CONFIG_AIO spin_lock_init(&mm->ioctx_lock); INIT_HLIST_HEAD(&mm->ioctx_list); #endif } static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) { atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); INIT_LIST_HEAD(&mm->mmlist); mm->flags = (current->mm) ? (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; mm->core_state = NULL; mm->nr_ptes = 0; memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); spin_lock_init(&mm->page_table_lock); mm->free_area_cache = TASK_UNMAPPED_BASE; mm->cached_hole_size = ~0UL; mm_init_aio(mm); mm_init_owner(mm, p); if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; mmu_notifier_mm_init(mm); return mm; } free_mm(mm); return NULL; } static void check_mm(struct mm_struct *mm) { int i; for (i = 0; i < NR_MM_COUNTERS; i++) { long x = atomic_long_read(&mm->rss_stat.count[i]); if (unlikely(x)) printk(KERN_ALERT "BUG: Bad rss-counter state " "mm:%p idx:%d val:%ld\n", mm, i, x); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE VM_BUG_ON(mm->pmd_huge_pte); #endif } /* * Allocate and initialize an mm_struct. */ struct mm_struct *mm_alloc(void) { struct mm_struct *mm; mm = allocate_mm(); if (!mm) return NULL; memset(mm, 0, sizeof(*mm)); mm_init_cpumask(mm); return mm_init(mm, current); } /* * Called when the last reference to the mm * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */ void __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); mm_free_pgd(mm); destroy_context(mm); mmu_notifier_mm_destroy(mm); check_mm(mm); free_mm(mm); } EXPORT_SYMBOL_GPL(__mmdrop); /* * Decrement the use count and release all resources for an mm. */ void mmput(struct mm_struct *mm) { might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) { uprobe_clear_state(mm); exit_aio(mm); ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ exit_mmap(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); list_del(&mm->mmlist); spin_unlock(&mmlist_lock); } if (mm->binfmt) module_put(mm->binfmt->module); mmdrop(mm); } } EXPORT_SYMBOL_GPL(mmput); void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { if (new_exe_file) get_file(new_exe_file); if (mm->exe_file) fput(mm->exe_file); mm->exe_file = new_exe_file; } struct file *get_mm_exe_file(struct mm_struct *mm) { struct file *exe_file; /* We need mmap_sem to protect against races with removal of exe_file */ down_read(&mm->mmap_sem); exe_file = mm->exe_file; if (exe_file) get_file(exe_file); up_read(&mm->mmap_sem); return exe_file; } static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) { /* It's safe to write the exe_file pointer without exe_file_lock because * this is called during fork when the task is not yet in /proc */ newmm->exe_file = get_mm_exe_file(oldmm); } /** * get_task_mm - acquire a reference to the task's mm * * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning * this kernel workthread has transiently adopted a user mm with use_mm, * to do its AIO) is not set and if so returns a reference to it, after * bumping up the use count. User must release the mm via mmput() * after use. Typically used by /proc and ptrace. */ struct mm_struct *get_task_mm(struct task_struct *task) { struct mm_struct *mm; task_lock(task); mm = task->mm; if (mm) { if (task->flags & PF_KTHREAD) mm = NULL; else atomic_inc(&mm->mm_users); } task_unlock(task); return mm; } EXPORT_SYMBOL_GPL(get_task_mm); struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) { struct mm_struct *mm; int err; err = mutex_lock_killable(&task->signal->cred_guard_mutex); if (err) return ERR_PTR(err); mm = get_task_mm(task); if (mm && mm != current->mm && !ptrace_may_access(task, mode)) { mmput(mm); mm = ERR_PTR(-EACCES); } mutex_unlock(&task->signal->cred_guard_mutex); return mm; } static void complete_vfork_done(struct task_struct *tsk) { struct completion *vfork; task_lock(tsk); vfork = tsk->vfork_done; if (likely(vfork)) { tsk->vfork_done = NULL; complete(vfork); } task_unlock(tsk); } static int wait_for_vfork_done(struct task_struct *child, struct completion *vfork) { int killed; freezer_do_not_count(); killed = wait_for_completion_killable(vfork); freezer_count(); if (killed) { task_lock(child); child->vfork_done = NULL; task_unlock(child); } put_task_struct(child); return killed; } /* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */ void mm_release(struct task_struct *tsk, struct mm_struct *mm) { /* Get rid of any futexes when releasing the mm */ #ifdef CONFIG_FUTEX if (unlikely(tsk->robust_list)) { exit_robust_list(tsk); tsk->robust_list = NULL; } #ifdef CONFIG_COMPAT if (unlikely(tsk->compat_robust_list)) { compat_exit_robust_list(tsk); tsk->compat_robust_list = NULL; } #endif if (unlikely(!list_empty(&tsk->pi_state_list))) exit_pi_state_list(tsk); #endif uprobe_free_utask(tsk); /* Get rid of any cached register state */ deactivate_mm(tsk, mm); /* * If we're exiting normally, clear a user-space tid field if * requested. We leave this alone when dying by signal, to leave * the value intact in a core dump, and to save the unnecessary * trouble, say, a killed vfork parent shouldn't touch this mm. * Userland only wants this done for a sys_exit. */ if (tsk->clear_child_tid) { if (!(tsk->flags & PF_SIGNALED) && atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tsk->clear_child_tid); sys_futex(tsk->clear_child_tid, FUTEX_WAKE, 1, NULL, NULL, 0); } tsk->clear_child_tid = NULL; } /* * All done, finally we can wake up parent and return this mm to him. * Also kthread_stop() uses this completion for synchronization. */ if (tsk->vfork_done) complete_vfork_done(tsk); } /* * Allocate a new mm structure and copy contents from the * mm structure of the passed in task structure. */ struct mm_struct *dup_mm(struct task_struct *tsk) { struct mm_struct *mm, *oldmm = current->mm; int err; if (!oldmm) return NULL; mm = allocate_mm(); if (!mm) goto fail_nomem; memcpy(mm, oldmm, sizeof(*mm)); mm_init_cpumask(mm); #ifdef CONFIG_TRANSPARENT_HUGEPAGE mm->pmd_huge_pte = NULL; #endif #ifdef CONFIG_NUMA_BALANCING mm->first_nid = NUMA_PTE_SCAN_INIT; #endif if (!mm_init(mm, tsk)) goto fail_nomem; if (init_new_context(tsk, mm)) goto fail_nocontext; dup_mm_exe_file(oldmm, mm); err = dup_mmap(mm, oldmm); if (err) goto free_pt; mm->hiwater_rss = get_mm_rss(mm); mm->hiwater_vm = mm->total_vm; if (mm->binfmt && !try_module_get(mm->binfmt->module)) goto free_pt; return mm; free_pt: /* don't put binfmt in mmput, we haven't got module yet */ mm->binfmt = NULL; mmput(mm); fail_nomem: return NULL; fail_nocontext: /* * If init_new_context() failed, we cannot use mmput() to free the mm * because it calls destroy_context() */ mm_free_pgd(mm); free_mm(mm); return NULL; } static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) { struct mm_struct *mm, *oldmm; int retval; tsk->min_flt = tsk->maj_flt = 0; tsk->nvcsw = tsk->nivcsw = 0; #ifdef CONFIG_DETECT_HUNG_TASK tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; #endif tsk->mm = NULL; tsk->active_mm = NULL; /* * Are we cloning a kernel thread? * * We need to steal a active VM for that.. */ oldmm = current->mm; if (!oldmm) return 0; if (clone_flags & CLONE_VM) { atomic_inc(&oldmm->mm_users); mm = oldmm; goto good_mm; } retval = -ENOMEM; mm = dup_mm(tsk); if (!mm) goto fail_nomem; good_mm: tsk->mm = mm; tsk->active_mm = mm; return 0; fail_nomem: return retval; } static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) { struct fs_struct *fs = current->fs; if (clone_flags & CLONE_FS) { /* tsk->fs is already what we want */ spin_lock(&fs->lock); if (fs->in_exec) { spin_unlock(&fs->lock); return -EAGAIN; } fs->users++; spin_unlock(&fs->lock); return 0; } tsk->fs = copy_fs_struct(fs); if (!tsk->fs) return -ENOMEM; return 0; } static int copy_files(unsigned long clone_flags, struct task_struct *tsk) { struct files_struct *oldf, *newf; int error = 0; /* * A background process may not have any files ... */ oldf = current->files; if (!oldf) goto out; if (clone_flags & CLONE_FILES) { atomic_inc(&oldf->count); goto out; } newf = dup_fd(oldf, &error); if (!newf) goto out; tsk->files = newf; error = 0; out: return error; } static int copy_io(unsigned long clone_flags, struct task_struct *tsk) { #ifdef CONFIG_BLOCK struct io_context *ioc = current->io_context; struct io_context *new_ioc; if (!ioc) return 0; /* * Share io context with parent, if CLONE_IO is set */ if (clone_flags & CLONE_IO) { ioc_task_link(ioc); tsk->io_context = ioc; } else if (ioprio_valid(ioc->ioprio)) { new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); if (unlikely(!new_ioc)) return -ENOMEM; new_ioc->ioprio = ioc->ioprio; put_io_context(new_ioc); } #endif return 0; } static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) { struct sighand_struct *sig; if (clone_flags & CLONE_SIGHAND) { atomic_inc(&current->sighand->count); return 0; } sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); rcu_assign_pointer(tsk->sighand, sig); if (!sig) return -ENOMEM; atomic_set(&sig->count, 1); memcpy(sig->action, current->sighand->action, sizeof(sig->action)); return 0; } void __cleanup_sighand(struct sighand_struct *sighand) { if (atomic_dec_and_test(&sighand->count)) { signalfd_cleanup(sighand); kmem_cache_free(sighand_cachep, sighand); } } /* * Initialize POSIX timer handling for a thread group. */ static void posix_cpu_timers_init_group(struct signal_struct *sig) { unsigned long cpu_limit; /* Thread group counters. */ thread_group_cputime_init(sig); cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (cpu_limit != RLIM_INFINITY) { sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); sig->cputimer.running = 1; } /* The timer lists. */ INIT_LIST_HEAD(&sig->cpu_timers[0]); INIT_LIST_HEAD(&sig->cpu_timers[1]); INIT_LIST_HEAD(&sig->cpu_timers[2]); } static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; if (clone_flags & CLONE_THREAD) return 0; sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); tsk->signal = sig; if (!sig) return -ENOMEM; sig->nr_threads = 1; atomic_set(&sig->live, 1); atomic_set(&sig->sigcnt, 1); init_waitqueue_head(&sig->wait_chldexit); sig->curr_target = tsk; init_sigpending(&sig->shared_pending); INIT_LIST_HEAD(&sig->posix_timers); hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sig->real_timer.function = it_real_fn; task_lock(current->group_leader); memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); posix_cpu_timers_init_group(sig); tty_audit_fork(sig); sched_autogroup_fork(sig); #ifdef CONFIG_CGROUPS init_rwsem(&sig->group_rwsem); #endif sig->oom_score_adj = current->signal->oom_score_adj; sig->oom_score_adj_min = current->signal->oom_score_adj_min; sig->has_child_subreaper = current->signal->has_child_subreaper || current->signal->is_child_subreaper; mutex_init(&sig->cred_guard_mutex); return 0; } static void copy_flags(unsigned long clone_flags, struct task_struct *p) { unsigned long new_flags = p->flags; new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); new_flags |= PF_FORKNOEXEC; p->flags = new_flags; } SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) { current->clear_child_tid = tidptr; return task_pid_vnr(current); } static void rt_mutex_init_task(struct task_struct *p) { raw_spin_lock_init(&p->pi_lock); #ifdef CONFIG_RT_MUTEXES plist_head_init(&p->pi_waiters); p->pi_blocked_on = NULL; #endif } #ifdef CONFIG_MM_OWNER void mm_init_owner(struct mm_struct *mm, struct task_struct *p) { mm->owner = p; } #endif /* CONFIG_MM_OWNER */ /* * Initialize POSIX timer handling for a single task. */ static void posix_cpu_timers_init(struct task_struct *tsk) { tsk->cputime_expires.prof_exp = 0; tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; INIT_LIST_HEAD(&tsk->cpu_timers[0]); INIT_LIST_HEAD(&tsk->cpu_timers[1]); INIT_LIST_HEAD(&tsk->cpu_timers[2]); } /* * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ static struct task_struct *copy_process(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *child_tidptr, struct pid *pid, int trace) { int retval; struct task_struct *p; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); /* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */ if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) return ERR_PTR(-EINVAL); /* * Siblings of global init remain as zombies on exit since they are * not reaped by their parent (swapper). To solve this and to avoid * multi-rooted process trees, prevent global and container-inits * from creating siblings. */ if ((clone_flags & CLONE_PARENT) && current->signal->flags & SIGNAL_UNKILLABLE) return ERR_PTR(-EINVAL); /* * If the new process will be in a different pid namespace * don't allow the creation of threads. */ if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && (task_active_pid_ns(current) != current->nsproxy->pid_ns)) return ERR_PTR(-EINVAL); retval = security_task_create(clone_flags); if (retval) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current); if (!p) goto fork_out; ftrace_graph_init_task(p); get_seccomp_filter(p); rt_mutex_init_task(p); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = -EAGAIN; if (atomic_read(&p->real_cred->user->processes) >= task_rlimit(p, RLIMIT_NPROC)) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && p->real_cred->user != INIT_USER) goto bad_fork_free; } current->flags &= ~PF_NPROC_EXCEEDED; retval = copy_creds(p, clone_flags); if (retval < 0) goto bad_fork_free; /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ retval = -EAGAIN; if (nr_threads >= max_threads) goto bad_fork_cleanup_count; if (!try_module_get(task_thread_info(p)->exec_domain->module)) goto bad_fork_cleanup_count; p->did_exec = 0; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ copy_flags(clone_flags, p); INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); rcu_copy_process(p); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); p->utime = p->stime = p->gtime = 0; p->utimescaled = p->stimescaled = 0; #ifndef CONFIG_VIRT_CPU_ACCOUNTING p->prev_cputime.utime = p->prev_cputime.stime = 0; #endif #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqlock_init(&p->vtime_seqlock); p->vtime_snap = 0; p->vtime_snap_whence = VTIME_SLEEPING; #endif #if defined(SPLIT_RSS_COUNTING) memset(&p->rss_stat, 0, sizeof(p->rss_stat)); #endif p->default_timer_slack_ns = current->timer_slack_ns; task_io_accounting_init(&p->ioac); acct_clear_integrals(p); posix_cpu_timers_init(p); do_posix_clock_monotonic_gettime(&p->start_time); p->real_start_time = p->start_time; monotonic_to_bootbased(&p->real_start_time); p->io_context = NULL; p->audit_context = NULL; if (clone_flags & CLONE_THREAD) threadgroup_change_begin(current); cgroup_fork(p); #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup_cgroup; } mpol_fix_fork_child_flag(p); #endif #ifdef CONFIG_CPUSETS p->cpuset_mem_spread_rotor = NUMA_NO_NODE; p->cpuset_slab_spread_rotor = NUMA_NO_NODE; seqcount_init(&p->mems_allowed_seq); #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; p->hardirqs_enabled = 0; p->hardirq_enable_ip = 0; p->hardirq_enable_event = 0; p->hardirq_disable_ip = _THIS_IP_; p->hardirq_disable_event = 0; p->softirqs_enabled = 1; p->softirq_enable_ip = _THIS_IP_; p->softirq_enable_event = 0; p->softirq_disable_ip = 0; p->softirq_disable_event = 0; p->hardirq_context = 0; p->softirq_context = 0; #endif #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; p->lockdep_recursion = 0; #endif #ifdef CONFIG_DEBUG_MUTEXES p->blocked_on = NULL; /* not blocked yet */ #endif #ifdef CONFIG_MEMCG p->memcg_batch.do_batch = 0; p->memcg_batch.memcg = NULL; #endif /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p); retval = perf_event_init_task(p); if (retval) goto bad_fork_cleanup_policy; retval = audit_alloc(p); if (retval) goto bad_fork_cleanup_policy; /* copy all the process information */ retval = copy_semundo(clone_flags, p); if (retval) goto bad_fork_cleanup_audit; retval = copy_files(clone_flags, p); if (retval) goto bad_fork_cleanup_semundo; retval = copy_fs(clone_flags, p); if (retval) goto bad_fork_cleanup_files; retval = copy_sighand(clone_flags, p); if (retval) goto bad_fork_cleanup_fs; retval = copy_signal(clone_flags, p); if (retval) goto bad_fork_cleanup_sighand; retval = copy_mm(clone_flags, p); if (retval) goto bad_fork_cleanup_signal; retval = copy_namespaces(clone_flags, p); if (retval) goto bad_fork_cleanup_mm; retval = copy_io(clone_flags, p); if (retval) goto bad_fork_cleanup_namespaces; retval = copy_thread(clone_flags, stack_start, stack_size, p); if (retval) goto bad_fork_cleanup_io; if (pid != &init_struct_pid) { retval = -ENOMEM; pid = alloc_pid(p->nsproxy->pid_ns); if (!pid) goto bad_fork_cleanup_io; } p->pid = pid_nr(pid); p->tgid = p->pid; if (clone_flags & CLONE_THREAD) p->tgid = current->tgid; p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; #ifdef CONFIG_BLOCK p->plug = NULL; #endif #ifdef CONFIG_FUTEX p->robust_list = NULL; #ifdef CONFIG_COMPAT p->compat_robust_list = NULL; #endif INIT_LIST_HEAD(&p->pi_state_list); p->pi_state_cache = NULL; #endif uprobe_copy_process(p); /* * sigaltstack should be cleared when sharing the same VM */ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) p->sas_ss_sp = p->sas_ss_size = 0; /* * Syscall tracing and stepping should be turned off in the * child regardless of CLONE_PTRACE. */ user_disable_single_step(p); clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); #endif clear_all_latency_tracing(p); /* ok, now we should be set up.. */ if (clone_flags & CLONE_THREAD) p->exit_signal = -1; else if (clone_flags & CLONE_PARENT) p->exit_signal = current->group_leader->exit_signal; else p->exit_signal = (clone_flags & CSIGNAL); p->pdeath_signal = 0; p->exit_state = 0; p->nr_dirtied = 0; p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); p->dirty_paused_when = 0; /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */ p->group_leader = p; INIT_LIST_HEAD(&p->thread_group); p->task_works = NULL; /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; } spin_lock(&current->sighand->siglock); /* * Process group and session signals need to be delivered to just the * parent before the fork or both the parent and the child after the * fork. Restart if a signal comes in before we add the new process to * it's process group. * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */ recalc_sigpending(); if (signal_pending(current)) { spin_unlock(&current->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_free_pid; } if (clone_flags & CLONE_THREAD) { current->signal->nr_threads++; atomic_inc(&current->signal->live); atomic_inc(&current->signal->sigcnt); p->group_leader = current->group_leader; list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); } if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); if (thread_group_leader(p)) { if (is_child_reaper(pid)) { ns_of_pid(pid)->child_reaper = p; p->signal->flags |= SIGNAL_UNKILLABLE; } p->signal->leader_pid = pid; p->signal->tty = tty_kref_get(current->signal->tty); attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); attach_pid(p, PIDTYPE_SID, task_session(current)); list_add_tail(&p->sibling, &p->real_parent->children); list_add_tail_rcu(&p->tasks, &init_task.tasks); __this_cpu_inc(process_counts); } attach_pid(p, PIDTYPE_PID, pid); nr_threads++; } total_forks++; spin_unlock(&current->sighand->siglock); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p); if (clone_flags & CLONE_THREAD) threadgroup_change_end(current); perf_event_fork(p); trace_task_newtask(p, clone_flags); return p; bad_fork_free_pid: if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_io: if (p->io_context) exit_io_context(p); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) mmput(p->mm); bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) free_signal_struct(p->signal); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_policy: perf_event_free_task(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_cgroup: #endif if (clone_flags & CLONE_THREAD) threadgroup_change_end(current); cgroup_exit(p, 0); delayacct_tsk_free(p); module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: atomic_dec(&p->cred->user->processes); exit_creds(p); bad_fork_free: free_task(p); fork_out: return ERR_PTR(retval); } static inline void init_idle_pids(struct pid_link *links) { enum pid_type type; for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { INIT_HLIST_NODE(&links[type].node); /* not really needed */ links[type].pid = &init_struct_pid; } } struct task_struct * __cpuinit fork_idle(int cpu) { struct task_struct *task; task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0); if (!IS_ERR(task)) { init_idle_pids(task->pids); init_idle(task, cpu); } return task; } /* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Do some preliminary argument and permissions checking before we * actually start allocating stuff */ if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) { if (clone_flags & (CLONE_THREAD|CLONE_PARENT)) return -EINVAL; } /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (!(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; trace_sched_process_fork(current, p); nr = task_pid_vnr(p); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event(trace, nr); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); } } else { nr = PTR_ERR(p); } return nr; } /* * Create a kernel thread. */ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { return do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, (unsigned long)arg, NULL, NULL); } #ifdef __ARCH_WANT_SYS_FORK SYSCALL_DEFINE0(fork) { #ifdef CONFIG_MMU return do_fork(SIGCHLD, 0, 0, NULL, NULL); #else /* can not support in nommu mode */ return(-EINVAL); #endif } #endif #ifdef __ARCH_WANT_SYS_VFORK SYSCALL_DEFINE0(vfork) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 0, NULL, NULL); } #endif #ifdef __ARCH_WANT_SYS_CLONE #ifdef CONFIG_CLONE_BACKWARDS SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, int, tls_val, int __user *, child_tidptr) #elif defined(CONFIG_CLONE_BACKWARDS2) SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, int, tls_val) #else SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, int __user *, child_tidptr, int, tls_val) #endif { long ret = do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr); asmlinkage_protect(5, ret, clone_flags, newsp, parent_tidptr, child_tidptr, tls_val); return ret; } #endif #ifndef ARCH_MIN_MMSTRUCT_ALIGN #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif static void sighand_ctor(void *data) { struct sighand_struct *sighand = data; spin_lock_init(&sighand->siglock); init_waitqueue_head(&sighand->signalfd_wqh); } void __init proc_caches_init(void) { sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| SLAB_NOTRACK, sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); /* * FIXME! The "sizeof(struct mm_struct)" currently includes the * whole struct cpumask for the OFFSTACK case. We could change * this to *only* allocate as much of it as required by the * maximum number of CPU's we can ever have. The cpumask_allocation * is at the end of the structure, exactly for that reason. */ mm_cachep = kmem_cache_create("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); mmap_init(); nsproxy_cache_init(); } /* * Check constraints on flags passed to the unshare system call. */ static int check_unshare_flags(unsigned long unshare_flags) { if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| CLONE_NEWUSER|CLONE_NEWPID)) return -EINVAL; /* * Not implemented, but pretend it works if there is nothing to * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND * needs to unshare vm. */ if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { /* FIXME: get_task_mm() increments ->mm_users */ if (atomic_read(&current->mm->mm_users) > 1) return -EINVAL; } return 0; } /* * Unshare the filesystem structure if it is being shared */ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) { struct fs_struct *fs = current->fs; if (!(unshare_flags & CLONE_FS) || !fs) return 0; /* don't need lock here; in the worst case we'll do useless copy */ if (fs->users == 1) return 0; *new_fsp = copy_fs_struct(fs); if (!*new_fsp) return -ENOMEM; return 0; } /* * Unshare file descriptor table if it is being shared */ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) { struct files_struct *fd = current->files; int error = 0; if ((unshare_flags & CLONE_FILES) && (fd && atomic_read(&fd->count) > 1)) { *new_fdp = dup_fd(fd, &error); if (!*new_fdp) return error; } return 0; } /* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone. copy_* * functions used by do_fork() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. */ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) { struct fs_struct *fs, *new_fs = NULL; struct files_struct *fd, *new_fd = NULL; struct cred *new_cred = NULL; struct nsproxy *new_nsproxy = NULL; int do_sysvsem = 0; int err; /* * If unsharing a user namespace must also unshare the thread. */ if (unshare_flags & CLONE_NEWUSER) unshare_flags |= CLONE_THREAD | CLONE_FS; /* * If unsharing a pid namespace must also unshare the thread. */ if (unshare_flags & CLONE_NEWPID) unshare_flags |= CLONE_THREAD; /* * If unsharing a thread from a thread group, must also unshare vm. */ if (unshare_flags & CLONE_THREAD) unshare_flags |= CLONE_VM; /* * If unsharing vm, must also unshare signal handlers. */ if (unshare_flags & CLONE_VM) unshare_flags |= CLONE_SIGHAND; /* * If unsharing namespace, must also unshare filesystem information. */ if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; err = check_unshare_flags(unshare_flags); if (err) goto bad_unshare_out; /* * CLONE_NEWIPC must also detach from the undolist: after switching * to a new ipc namespace, the semaphore arrays from the old * namespace are unreachable. */ if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) do_sysvsem = 1; err = unshare_fs(unshare_flags, &new_fs); if (err) goto bad_unshare_out; err = unshare_fd(unshare_flags, &new_fd); if (err) goto bad_unshare_cleanup_fs; err = unshare_userns(unshare_flags, &new_cred); if (err) goto bad_unshare_cleanup_fd; err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_cred, new_fs); if (err) goto bad_unshare_cleanup_cred; if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { if (do_sysvsem) { /* * CLONE_SYSVSEM is equivalent to sys_exit(). */ exit_sem(current); } if (new_nsproxy) switch_task_namespaces(current, new_nsproxy); task_lock(current); if (new_fs) { fs = current->fs; spin_lock(&fs->lock); current->fs = new_fs; if (--fs->users) new_fs = NULL; else new_fs = fs; spin_unlock(&fs->lock); } if (new_fd) { fd = current->files; current->files = new_fd; new_fd = fd; } task_unlock(current); if (new_cred) { /* Install the new user namespace */ commit_creds(new_cred); new_cred = NULL; } } bad_unshare_cleanup_cred: if (new_cred) put_cred(new_cred); bad_unshare_cleanup_fd: if (new_fd) put_files_struct(new_fd); bad_unshare_cleanup_fs: if (new_fs) free_fs_struct(new_fs); bad_unshare_out: return err; } /* * Helper to unshare the files of the current task. * We don't want to expose copy_files internals to * the exec layer of the kernel. */ int unshare_files(struct files_struct **displaced) { struct task_struct *task = current; struct files_struct *copy = NULL; int error; error = unshare_fd(CLONE_FILES, &copy); if (error || !copy) { *displaced = NULL; return error; } *displaced = task->files; task_lock(task); task->files = copy; task_unlock(task); return 0; }
./CrossVul/dataset_final_sorted/CWE-264/c/good_5605_0
crossvul-cpp_data_good_2190_1
/* * (C) 1997 Linus Torvalds * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) */ #include <linux/export.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/backing-dev.h> #include <linux/hash.h> #include <linux/swap.h> #include <linux/security.h> #include <linux/cdev.h> #include <linux/bootmem.h> #include <linux/fsnotify.h> #include <linux/mount.h> #include <linux/posix_acl.h> #include <linux/prefetch.h> #include <linux/buffer_head.h> /* for inode_has_buffers */ #include <linux/ratelimit.h> #include <linux/list_lru.h> #include "internal.h" /* * Inode locking rules: * * inode->i_lock protects: * inode->i_state, inode->i_hash, __iget() * Inode LRU list locks protect: * inode->i_sb->s_inode_lru, inode->i_lru * inode_sb_list_lock protects: * sb->s_inodes, inode->i_sb_list * bdi->wb.list_lock protects: * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list * inode_hash_lock protects: * inode_hashtable, inode->i_hash * * Lock ordering: * * inode_sb_list_lock * inode->i_lock * Inode LRU list locks * * bdi->wb.list_lock * inode->i_lock * * inode_hash_lock * inode_sb_list_lock * inode->i_lock * * iunique_lock * inode_hash_lock */ static unsigned int i_hash_mask __read_mostly; static unsigned int i_hash_shift __read_mostly; static struct hlist_head *inode_hashtable __read_mostly; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); /* * Empty aops. Can be used for the cases where the user does not * define any of the address_space operations. */ const struct address_space_operations empty_aops = { }; EXPORT_SYMBOL(empty_aops); /* * Statistics gathering.. */ struct inodes_stat_t inodes_stat; static DEFINE_PER_CPU(unsigned long, nr_inodes); static DEFINE_PER_CPU(unsigned long, nr_unused); static struct kmem_cache *inode_cachep __read_mostly; static long get_nr_inodes(void) { int i; long sum = 0; for_each_possible_cpu(i) sum += per_cpu(nr_inodes, i); return sum < 0 ? 0 : sum; } static inline long get_nr_inodes_unused(void) { int i; long sum = 0; for_each_possible_cpu(i) sum += per_cpu(nr_unused, i); return sum < 0 ? 0 : sum; } long get_nr_dirty_inodes(void) { /* not actually dirty inodes, but a wild approximation */ long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); return nr_dirty > 0 ? nr_dirty : 0; } /* * Handle nr_inode sysctl */ #ifdef CONFIG_SYSCTL int proc_nr_inodes(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { inodes_stat.nr_inodes = get_nr_inodes(); inodes_stat.nr_unused = get_nr_inodes_unused(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } #endif /** * inode_init_always - perform inode structure intialisation * @sb: superblock inode belongs to * @inode: inode to initialise * * These are initializations that need to be done on every inode * allocation as the fields are not initialised by slab allocation. */ int inode_init_always(struct super_block *sb, struct inode *inode) { static const struct inode_operations empty_iops; static const struct file_operations empty_fops; struct address_space *const mapping = &inode->i_data; inode->i_sb = sb; inode->i_blkbits = sb->s_blocksize_bits; inode->i_flags = 0; atomic_set(&inode->i_count, 1); inode->i_op = &empty_iops; inode->i_fop = &empty_fops; inode->__i_nlink = 1; inode->i_opflags = 0; i_uid_write(inode, 0); i_gid_write(inode, 0); atomic_set(&inode->i_writecount, 0); inode->i_size = 0; inode->i_blocks = 0; inode->i_bytes = 0; inode->i_generation = 0; #ifdef CONFIG_QUOTA memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); #endif inode->i_pipe = NULL; inode->i_bdev = NULL; inode->i_cdev = NULL; inode->i_rdev = 0; inode->dirtied_when = 0; if (security_inode_alloc(inode)) goto out; spin_lock_init(&inode->i_lock); lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); mutex_init(&inode->i_mutex); lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); atomic_set(&inode->i_dio_count, 0); mapping->a_ops = &empty_aops; mapping->host = inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); mapping->private_data = NULL; mapping->backing_dev_info = &default_backing_dev_info; mapping->writeback_index = 0; /* * If the block_device provides a backing_dev_info for client * inodes then use that. Otherwise the inode share the bdev's * backing_dev_info. */ if (sb->s_bdev) { struct backing_dev_info *bdi; bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; mapping->backing_dev_info = bdi; } inode->i_private = NULL; inode->i_mapping = mapping; INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ #ifdef CONFIG_FS_POSIX_ACL inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; #endif #ifdef CONFIG_FSNOTIFY inode->i_fsnotify_mask = 0; #endif this_cpu_inc(nr_inodes); return 0; out: return -ENOMEM; } EXPORT_SYMBOL(inode_init_always); static struct inode *alloc_inode(struct super_block *sb) { struct inode *inode; if (sb->s_op->alloc_inode) inode = sb->s_op->alloc_inode(sb); else inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); if (!inode) return NULL; if (unlikely(inode_init_always(sb, inode))) { if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); else kmem_cache_free(inode_cachep, inode); return NULL; } return inode; } void free_inode_nonrcu(struct inode *inode) { kmem_cache_free(inode_cachep, inode); } EXPORT_SYMBOL(free_inode_nonrcu); void __destroy_inode(struct inode *inode) { BUG_ON(inode_has_buffers(inode)); security_inode_free(inode); fsnotify_inode_delete(inode); if (!inode->i_nlink) { WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); atomic_long_dec(&inode->i_sb->s_remove_count); } #ifdef CONFIG_FS_POSIX_ACL if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED) posix_acl_release(inode->i_acl); if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) posix_acl_release(inode->i_default_acl); #endif this_cpu_dec(nr_inodes); } EXPORT_SYMBOL(__destroy_inode); static void i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(inode_cachep, inode); } static void destroy_inode(struct inode *inode) { BUG_ON(!list_empty(&inode->i_lru)); __destroy_inode(inode); if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); else call_rcu(&inode->i_rcu, i_callback); } /** * drop_nlink - directly drop an inode's link count * @inode: inode * * This is a low-level filesystem helper to replace any * direct filesystem manipulation of i_nlink. In cases * where we are attempting to track writes to the * filesystem, a decrement to zero means an imminent * write when the file is truncated and actually unlinked * on the filesystem. */ void drop_nlink(struct inode *inode) { WARN_ON(inode->i_nlink == 0); inode->__i_nlink--; if (!inode->i_nlink) atomic_long_inc(&inode->i_sb->s_remove_count); } EXPORT_SYMBOL(drop_nlink); /** * clear_nlink - directly zero an inode's link count * @inode: inode * * This is a low-level filesystem helper to replace any * direct filesystem manipulation of i_nlink. See * drop_nlink() for why we care about i_nlink hitting zero. */ void clear_nlink(struct inode *inode) { if (inode->i_nlink) { inode->__i_nlink = 0; atomic_long_inc(&inode->i_sb->s_remove_count); } } EXPORT_SYMBOL(clear_nlink); /** * set_nlink - directly set an inode's link count * @inode: inode * @nlink: new nlink (should be non-zero) * * This is a low-level filesystem helper to replace any * direct filesystem manipulation of i_nlink. */ void set_nlink(struct inode *inode, unsigned int nlink) { if (!nlink) { clear_nlink(inode); } else { /* Yes, some filesystems do change nlink from zero to one */ if (inode->i_nlink == 0) atomic_long_dec(&inode->i_sb->s_remove_count); inode->__i_nlink = nlink; } } EXPORT_SYMBOL(set_nlink); /** * inc_nlink - directly increment an inode's link count * @inode: inode * * This is a low-level filesystem helper to replace any * direct filesystem manipulation of i_nlink. Currently, * it is only here for parity with dec_nlink(). */ void inc_nlink(struct inode *inode) { if (unlikely(inode->i_nlink == 0)) { WARN_ON(!(inode->i_state & I_LINKABLE)); atomic_long_dec(&inode->i_sb->s_remove_count); } inode->__i_nlink++; } EXPORT_SYMBOL(inc_nlink); void address_space_init_once(struct address_space *mapping) { memset(mapping, 0, sizeof(*mapping)); INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); spin_lock_init(&mapping->tree_lock); mutex_init(&mapping->i_mmap_mutex); INIT_LIST_HEAD(&mapping->private_list); spin_lock_init(&mapping->private_lock); mapping->i_mmap = RB_ROOT; INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); } EXPORT_SYMBOL(address_space_init_once); /* * These are initializations that only need to be done * once, because the fields are idempotent across use * of the inode, so let the slab aware of that. */ void inode_init_once(struct inode *inode) { memset(inode, 0, sizeof(*inode)); INIT_HLIST_NODE(&inode->i_hash); INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_wb_list); INIT_LIST_HEAD(&inode->i_lru); address_space_init_once(&inode->i_data); i_size_ordered_init(inode); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&inode->i_fsnotify_marks); #endif } EXPORT_SYMBOL(inode_init_once); static void init_once(void *foo) { struct inode *inode = (struct inode *) foo; inode_init_once(inode); } /* * inode->i_lock must be held */ void __iget(struct inode *inode) { atomic_inc(&inode->i_count); } /* * get additional reference to inode; caller must already hold one. */ void ihold(struct inode *inode) { WARN_ON(atomic_inc_return(&inode->i_count) < 2); } EXPORT_SYMBOL(ihold); static void inode_lru_list_add(struct inode *inode) { if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) this_cpu_inc(nr_unused); } /* * Add inode to LRU if needed (inode is unused and clean). * * Needs inode->i_lock held. */ void inode_add_lru(struct inode *inode) { if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) && !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) inode_lru_list_add(inode); } static void inode_lru_list_del(struct inode *inode) { if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) this_cpu_dec(nr_unused); } /** * inode_sb_list_add - add inode to the superblock list of inodes * @inode: inode to add */ void inode_sb_list_add(struct inode *inode) { spin_lock(&inode_sb_list_lock); list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); spin_unlock(&inode_sb_list_lock); } EXPORT_SYMBOL_GPL(inode_sb_list_add); static inline void inode_sb_list_del(struct inode *inode) { if (!list_empty(&inode->i_sb_list)) { spin_lock(&inode_sb_list_lock); list_del_init(&inode->i_sb_list); spin_unlock(&inode_sb_list_lock); } } static unsigned long hash(struct super_block *sb, unsigned long hashval) { unsigned long tmp; tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / L1_CACHE_BYTES; tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); return tmp & i_hash_mask; } /** * __insert_inode_hash - hash an inode * @inode: unhashed inode * @hashval: unsigned long value used to locate this object in the * inode_hashtable. * * Add an inode to the inode hash for this superblock. */ void __insert_inode_hash(struct inode *inode, unsigned long hashval) { struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); spin_lock(&inode_hash_lock); spin_lock(&inode->i_lock); hlist_add_head(&inode->i_hash, b); spin_unlock(&inode->i_lock); spin_unlock(&inode_hash_lock); } EXPORT_SYMBOL(__insert_inode_hash); /** * __remove_inode_hash - remove an inode from the hash * @inode: inode to unhash * * Remove an inode from the superblock. */ void __remove_inode_hash(struct inode *inode) { spin_lock(&inode_hash_lock); spin_lock(&inode->i_lock); hlist_del_init(&inode->i_hash); spin_unlock(&inode->i_lock); spin_unlock(&inode_hash_lock); } EXPORT_SYMBOL(__remove_inode_hash); void clear_inode(struct inode *inode) { might_sleep(); /* * We have to cycle tree_lock here because reclaim can be still in the * process of removing the last page (in __delete_from_page_cache()) * and we must not free mapping under it. */ spin_lock_irq(&inode->i_data.tree_lock); BUG_ON(inode->i_data.nrpages); BUG_ON(inode->i_data.nrshadows); spin_unlock_irq(&inode->i_data.tree_lock); BUG_ON(!list_empty(&inode->i_data.private_list)); BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(inode->i_state & I_CLEAR); /* don't need i_lock here, no concurrent mods to i_state */ inode->i_state = I_FREEING | I_CLEAR; } EXPORT_SYMBOL(clear_inode); /* * Free the inode passed in, removing it from the lists it is still connected * to. We remove any pages still attached to the inode and wait for any IO that * is still in progress before finally destroying the inode. * * An inode must already be marked I_FREEING so that we avoid the inode being * moved back onto lists if we race with other code that manipulates the lists * (e.g. writeback_single_inode). The caller is responsible for setting this. * * An inode must already be removed from the LRU list before being evicted from * the cache. This should occur atomically with setting the I_FREEING state * flag, so no inodes here should ever be on the LRU when being evicted. */ static void evict(struct inode *inode) { const struct super_operations *op = inode->i_sb->s_op; BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(!list_empty(&inode->i_lru)); if (!list_empty(&inode->i_wb_list)) inode_wb_list_del(inode); inode_sb_list_del(inode); /* * Wait for flusher thread to be done with the inode so that filesystem * does not start destroying it while writeback is still running. Since * the inode has I_FREEING set, flusher thread won't start new work on * the inode. We just have to wait for running writeback to finish. */ inode_wait_for_writeback(inode); if (op->evict_inode) { op->evict_inode(inode); } else { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); } if (S_ISBLK(inode->i_mode) && inode->i_bdev) bd_forget(inode); if (S_ISCHR(inode->i_mode) && inode->i_cdev) cd_forget(inode); remove_inode_hash(inode); spin_lock(&inode->i_lock); wake_up_bit(&inode->i_state, __I_NEW); BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); spin_unlock(&inode->i_lock); destroy_inode(inode); } /* * dispose_list - dispose of the contents of a local list * @head: the head of the list to free * * Dispose-list gets a local list with local inodes in it, so it doesn't * need to worry about list corruption and SMP locks. */ static void dispose_list(struct list_head *head) { while (!list_empty(head)) { struct inode *inode; inode = list_first_entry(head, struct inode, i_lru); list_del_init(&inode->i_lru); evict(inode); } } /** * evict_inodes - evict all evictable inodes for a superblock * @sb: superblock to operate on * * Make sure that no inodes with zero refcount are retained. This is * called by superblock shutdown after having MS_ACTIVE flag removed, * so any inode reaching zero refcount during or after that call will * be immediately evicted. */ void evict_inodes(struct super_block *sb) { struct inode *inode, *next; LIST_HEAD(dispose); spin_lock(&inode_sb_list_lock); list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { if (atomic_read(&inode->i_count)) continue; spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { spin_unlock(&inode->i_lock); continue; } inode->i_state |= I_FREEING; inode_lru_list_del(inode); spin_unlock(&inode->i_lock); list_add(&inode->i_lru, &dispose); } spin_unlock(&inode_sb_list_lock); dispose_list(&dispose); } /** * invalidate_inodes - attempt to free all inodes on a superblock * @sb: superblock to operate on * @kill_dirty: flag to guide handling of dirty inodes * * Attempts to free all inodes for a given superblock. If there were any * busy inodes return a non-zero value, else zero. * If @kill_dirty is set, discard dirty inodes too, otherwise treat * them as busy. */ int invalidate_inodes(struct super_block *sb, bool kill_dirty) { int busy = 0; struct inode *inode, *next; LIST_HEAD(dispose); spin_lock(&inode_sb_list_lock); list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { spin_unlock(&inode->i_lock); continue; } if (inode->i_state & I_DIRTY && !kill_dirty) { spin_unlock(&inode->i_lock); busy = 1; continue; } if (atomic_read(&inode->i_count)) { spin_unlock(&inode->i_lock); busy = 1; continue; } inode->i_state |= I_FREEING; inode_lru_list_del(inode); spin_unlock(&inode->i_lock); list_add(&inode->i_lru, &dispose); } spin_unlock(&inode_sb_list_lock); dispose_list(&dispose); return busy; } /* * Isolate the inode from the LRU in preparation for freeing it. * * Any inodes which are pinned purely because of attached pagecache have their * pagecache removed. If the inode has metadata buffers attached to * mapping->private_list then try to remove them. * * If the inode has the I_REFERENCED flag set, then it means that it has been * used recently - the flag is set in iput_final(). When we encounter such an * inode, clear the flag and move it to the back of the LRU so it gets another * pass through the LRU before it gets reclaimed. This is necessary because of * the fact we are doing lazy LRU updates to minimise lock contention so the * LRU does not have strict ordering. Hence we don't want to reclaim inodes * with this flag set because they are the inodes that are out of order. */ static enum lru_status inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) { struct list_head *freeable = arg; struct inode *inode = container_of(item, struct inode, i_lru); /* * we are inverting the lru lock/inode->i_lock here, so use a trylock. * If we fail to get the lock, just skip it. */ if (!spin_trylock(&inode->i_lock)) return LRU_SKIP; /* * Referenced or dirty inodes are still in use. Give them another pass * through the LRU as we canot reclaim them now. */ if (atomic_read(&inode->i_count) || (inode->i_state & ~I_REFERENCED)) { list_del_init(&inode->i_lru); spin_unlock(&inode->i_lock); this_cpu_dec(nr_unused); return LRU_REMOVED; } /* recently referenced inodes get one more pass */ if (inode->i_state & I_REFERENCED) { inode->i_state &= ~I_REFERENCED; spin_unlock(&inode->i_lock); return LRU_ROTATE; } if (inode_has_buffers(inode) || inode->i_data.nrpages) { __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(lru_lock); if (remove_inode_buffers(inode)) { unsigned long reap; reap = invalidate_mapping_pages(&inode->i_data, 0, -1); if (current_is_kswapd()) __count_vm_events(KSWAPD_INODESTEAL, reap); else __count_vm_events(PGINODESTEAL, reap); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += reap; } iput(inode); spin_lock(lru_lock); return LRU_RETRY; } WARN_ON(inode->i_state & I_NEW); inode->i_state |= I_FREEING; list_move(&inode->i_lru, freeable); spin_unlock(&inode->i_lock); this_cpu_dec(nr_unused); return LRU_REMOVED; } /* * Walk the superblock inode LRU for freeable inodes and attempt to free them. * This is called from the superblock shrinker function with a number of inodes * to trim from the LRU. Inodes to be freed are moved to a temporary list and * then are freed outside inode_lock by dispose_list(). */ long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan, int nid) { LIST_HEAD(freeable); long freed; freed = list_lru_walk_node(&sb->s_inode_lru, nid, inode_lru_isolate, &freeable, &nr_to_scan); dispose_list(&freeable); return freed; } static void __wait_on_freeing_inode(struct inode *inode); /* * Called with the inode lock held. */ static struct inode *find_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data) { struct inode *inode = NULL; repeat: hlist_for_each_entry(inode, head, i_hash) { if (inode->i_sb != sb) continue; if (!test(inode, data)) continue; spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE)) { __wait_on_freeing_inode(inode); goto repeat; } __iget(inode); spin_unlock(&inode->i_lock); return inode; } return NULL; } /* * find_inode_fast is the fast path version of find_inode, see the comment at * iget_locked for details. */ static struct inode *find_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino) { struct inode *inode = NULL; repeat: hlist_for_each_entry(inode, head, i_hash) { if (inode->i_ino != ino) continue; if (inode->i_sb != sb) continue; spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE)) { __wait_on_freeing_inode(inode); goto repeat; } __iget(inode); spin_unlock(&inode->i_lock); return inode; } return NULL; } /* * Each cpu owns a range of LAST_INO_BATCH numbers. * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, * to renew the exhausted range. * * This does not significantly increase overflow rate because every CPU can * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the * 2^32 range, and is a worst-case. Even a 50% wastage would only increase * overflow rate by 2x, which does not seem too significant. * * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW * error if st_ino won't fit in target struct field. Use 32bit counter * here to attempt to avoid that. */ #define LAST_INO_BATCH 1024 static DEFINE_PER_CPU(unsigned int, last_ino); unsigned int get_next_ino(void) { unsigned int *p = &get_cpu_var(last_ino); unsigned int res = *p; #ifdef CONFIG_SMP if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { static atomic_t shared_last_ino; int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); res = next - LAST_INO_BATCH; } #endif *p = ++res; put_cpu_var(last_ino); return res; } EXPORT_SYMBOL(get_next_ino); /** * new_inode_pseudo - obtain an inode * @sb: superblock * * Allocates a new inode for given superblock. * Inode wont be chained in superblock s_inodes list * This means : * - fs can't be unmount * - quotas, fsnotify, writeback can't work */ struct inode *new_inode_pseudo(struct super_block *sb) { struct inode *inode = alloc_inode(sb); if (inode) { spin_lock(&inode->i_lock); inode->i_state = 0; spin_unlock(&inode->i_lock); INIT_LIST_HEAD(&inode->i_sb_list); } return inode; } /** * new_inode - obtain an inode * @sb: superblock * * Allocates a new inode for given superblock. The default gfp_mask * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. * If HIGHMEM pages are unsuitable or it is known that pages allocated * for the page cache are not reclaimable or migratable, * mapping_set_gfp_mask() must be called with suitable flags on the * newly created inode's mapping * */ struct inode *new_inode(struct super_block *sb) { struct inode *inode; spin_lock_prefetch(&inode_sb_list_lock); inode = new_inode_pseudo(sb); if (inode) inode_sb_list_add(inode); return inode; } EXPORT_SYMBOL(new_inode); #ifdef CONFIG_DEBUG_LOCK_ALLOC void lockdep_annotate_inode_mutex_key(struct inode *inode) { if (S_ISDIR(inode->i_mode)) { struct file_system_type *type = inode->i_sb->s_type; /* Set new key only if filesystem hasn't already changed it */ if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) { /* * ensure nobody is actually holding i_mutex */ mutex_destroy(&inode->i_mutex); mutex_init(&inode->i_mutex); lockdep_set_class(&inode->i_mutex, &type->i_mutex_dir_key); } } } EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); #endif /** * unlock_new_inode - clear the I_NEW state and wake up any waiters * @inode: new inode to unlock * * Called when the inode is fully initialised to clear the new state of the * inode and wake up anyone waiting for the inode to finish initialisation. */ void unlock_new_inode(struct inode *inode) { lockdep_annotate_inode_mutex_key(inode); spin_lock(&inode->i_lock); WARN_ON(!(inode->i_state & I_NEW)); inode->i_state &= ~I_NEW; smp_mb(); wake_up_bit(&inode->i_state, __I_NEW); spin_unlock(&inode->i_lock); } EXPORT_SYMBOL(unlock_new_inode); /** * lock_two_nondirectories - take two i_mutexes on non-directory objects * * Lock any non-NULL argument that is not a directory. * Zero, one or two objects may be locked by this function. * * @inode1: first inode to lock * @inode2: second inode to lock */ void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) { if (inode1 > inode2) swap(inode1, inode2); if (inode1 && !S_ISDIR(inode1->i_mode)) mutex_lock(&inode1->i_mutex); if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2); } EXPORT_SYMBOL(lock_two_nondirectories); /** * unlock_two_nondirectories - release locks from lock_two_nondirectories() * @inode1: first inode to unlock * @inode2: second inode to unlock */ void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) { if (inode1 && !S_ISDIR(inode1->i_mode)) mutex_unlock(&inode1->i_mutex); if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) mutex_unlock(&inode2->i_mutex); } EXPORT_SYMBOL(unlock_two_nondirectories); /** * iget5_locked - obtain an inode from a mounted file system * @sb: super block of file system * @hashval: hash value (usually inode number) to get * @test: callback used for comparisons between inodes * @set: callback used to initialize a new struct inode * @data: opaque data pointer to pass to @test and @set * * Search for the inode specified by @hashval and @data in the inode cache, * and if present it is return it with an increased reference count. This is * a generalized version of iget_locked() for file systems where the inode * number is not sufficient for unique identification of an inode. * * If the inode is not in cache, allocate a new inode and return it locked, * hashed, and with the I_NEW flag set. The file system gets to fill it in * before unlocking it via unlock_new_inode(). * * Note both @test and @set are called with the inode_hash_lock held, so can't * sleep. */ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data) { struct hlist_head *head = inode_hashtable + hash(sb, hashval); struct inode *inode; spin_lock(&inode_hash_lock); inode = find_inode(sb, head, test, data); spin_unlock(&inode_hash_lock); if (inode) { wait_on_inode(inode); return inode; } inode = alloc_inode(sb); if (inode) { struct inode *old; spin_lock(&inode_hash_lock); /* We released the lock, so.. */ old = find_inode(sb, head, test, data); if (!old) { if (set(inode, data)) goto set_failed; spin_lock(&inode->i_lock); inode->i_state = I_NEW; hlist_add_head(&inode->i_hash, head); spin_unlock(&inode->i_lock); inode_sb_list_add(inode); spin_unlock(&inode_hash_lock); /* Return the locked inode with I_NEW set, the * caller is responsible for filling in the contents */ return inode; } /* * Uhhuh, somebody else created the same inode under * us. Use the old inode instead of the one we just * allocated. */ spin_unlock(&inode_hash_lock); destroy_inode(inode); inode = old; wait_on_inode(inode); } return inode; set_failed: spin_unlock(&inode_hash_lock); destroy_inode(inode); return NULL; } EXPORT_SYMBOL(iget5_locked); /** * iget_locked - obtain an inode from a mounted file system * @sb: super block of file system * @ino: inode number to get * * Search for the inode specified by @ino in the inode cache and if present * return it with an increased reference count. This is for file systems * where the inode number is sufficient for unique identification of an inode. * * If the inode is not in cache, allocate a new inode and return it locked, * hashed, and with the I_NEW flag set. The file system gets to fill it in * before unlocking it via unlock_new_inode(). */ struct inode *iget_locked(struct super_block *sb, unsigned long ino) { struct hlist_head *head = inode_hashtable + hash(sb, ino); struct inode *inode; spin_lock(&inode_hash_lock); inode = find_inode_fast(sb, head, ino); spin_unlock(&inode_hash_lock); if (inode) { wait_on_inode(inode); return inode; } inode = alloc_inode(sb); if (inode) { struct inode *old; spin_lock(&inode_hash_lock); /* We released the lock, so.. */ old = find_inode_fast(sb, head, ino); if (!old) { inode->i_ino = ino; spin_lock(&inode->i_lock); inode->i_state = I_NEW; hlist_add_head(&inode->i_hash, head); spin_unlock(&inode->i_lock); inode_sb_list_add(inode); spin_unlock(&inode_hash_lock); /* Return the locked inode with I_NEW set, the * caller is responsible for filling in the contents */ return inode; } /* * Uhhuh, somebody else created the same inode under * us. Use the old inode instead of the one we just * allocated. */ spin_unlock(&inode_hash_lock); destroy_inode(inode); inode = old; wait_on_inode(inode); } return inode; } EXPORT_SYMBOL(iget_locked); /* * search the inode cache for a matching inode number. * If we find one, then the inode number we are trying to * allocate is not unique and so we should not use it. * * Returns 1 if the inode number is unique, 0 if it is not. */ static int test_inode_iunique(struct super_block *sb, unsigned long ino) { struct hlist_head *b = inode_hashtable + hash(sb, ino); struct inode *inode; spin_lock(&inode_hash_lock); hlist_for_each_entry(inode, b, i_hash) { if (inode->i_ino == ino && inode->i_sb == sb) { spin_unlock(&inode_hash_lock); return 0; } } spin_unlock(&inode_hash_lock); return 1; } /** * iunique - get a unique inode number * @sb: superblock * @max_reserved: highest reserved inode number * * Obtain an inode number that is unique on the system for a given * superblock. This is used by file systems that have no natural * permanent inode numbering system. An inode number is returned that * is higher than the reserved limit but unique. * * BUGS: * With a large number of inodes live on the file system this function * currently becomes quite slow. */ ino_t iunique(struct super_block *sb, ino_t max_reserved) { /* * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW * error if st_ino won't fit in target struct field. Use 32bit counter * here to attempt to avoid that. */ static DEFINE_SPINLOCK(iunique_lock); static unsigned int counter; ino_t res; spin_lock(&iunique_lock); do { if (counter <= max_reserved) counter = max_reserved + 1; res = counter++; } while (!test_inode_iunique(sb, res)); spin_unlock(&iunique_lock); return res; } EXPORT_SYMBOL(iunique); struct inode *igrab(struct inode *inode) { spin_lock(&inode->i_lock); if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { __iget(inode); spin_unlock(&inode->i_lock); } else { spin_unlock(&inode->i_lock); /* * Handle the case where s_op->clear_inode is not been * called yet, and somebody is calling igrab * while the inode is getting freed. */ inode = NULL; } return inode; } EXPORT_SYMBOL(igrab); /** * ilookup5_nowait - search for an inode in the inode cache * @sb: super block of file system to search * @hashval: hash value (usually inode number) to search for * @test: callback used for comparisons between inodes * @data: opaque data pointer to pass to @test * * Search for the inode specified by @hashval and @data in the inode cache. * If the inode is in the cache, the inode is returned with an incremented * reference count. * * Note: I_NEW is not waited upon so you have to be very careful what you do * with the returned inode. You probably should be using ilookup5() instead. * * Note2: @test is called with the inode_hash_lock held, so can't sleep. */ struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), void *data) { struct hlist_head *head = inode_hashtable + hash(sb, hashval); struct inode *inode; spin_lock(&inode_hash_lock); inode = find_inode(sb, head, test, data); spin_unlock(&inode_hash_lock); return inode; } EXPORT_SYMBOL(ilookup5_nowait); /** * ilookup5 - search for an inode in the inode cache * @sb: super block of file system to search * @hashval: hash value (usually inode number) to search for * @test: callback used for comparisons between inodes * @data: opaque data pointer to pass to @test * * Search for the inode specified by @hashval and @data in the inode cache, * and if the inode is in the cache, return the inode with an incremented * reference count. Waits on I_NEW before returning the inode. * returned with an incremented reference count. * * This is a generalized version of ilookup() for file systems where the * inode number is not sufficient for unique identification of an inode. * * Note: @test is called with the inode_hash_lock held, so can't sleep. */ struct inode *ilookup5(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), void *data) { struct inode *inode = ilookup5_nowait(sb, hashval, test, data); if (inode) wait_on_inode(inode); return inode; } EXPORT_SYMBOL(ilookup5); /** * ilookup - search for an inode in the inode cache * @sb: super block of file system to search * @ino: inode number to search for * * Search for the inode @ino in the inode cache, and if the inode is in the * cache, the inode is returned with an incremented reference count. */ struct inode *ilookup(struct super_block *sb, unsigned long ino) { struct hlist_head *head = inode_hashtable + hash(sb, ino); struct inode *inode; spin_lock(&inode_hash_lock); inode = find_inode_fast(sb, head, ino); spin_unlock(&inode_hash_lock); if (inode) wait_on_inode(inode); return inode; } EXPORT_SYMBOL(ilookup); int insert_inode_locked(struct inode *inode) { struct super_block *sb = inode->i_sb; ino_t ino = inode->i_ino; struct hlist_head *head = inode_hashtable + hash(sb, ino); while (1) { struct inode *old = NULL; spin_lock(&inode_hash_lock); hlist_for_each_entry(old, head, i_hash) { if (old->i_ino != ino) continue; if (old->i_sb != sb) continue; spin_lock(&old->i_lock); if (old->i_state & (I_FREEING|I_WILL_FREE)) { spin_unlock(&old->i_lock); continue; } break; } if (likely(!old)) { spin_lock(&inode->i_lock); inode->i_state |= I_NEW; hlist_add_head(&inode->i_hash, head); spin_unlock(&inode->i_lock); spin_unlock(&inode_hash_lock); return 0; } __iget(old); spin_unlock(&old->i_lock); spin_unlock(&inode_hash_lock); wait_on_inode(old); if (unlikely(!inode_unhashed(old))) { iput(old); return -EBUSY; } iput(old); } } EXPORT_SYMBOL(insert_inode_locked); int insert_inode_locked4(struct inode *inode, unsigned long hashval, int (*test)(struct inode *, void *), void *data) { struct super_block *sb = inode->i_sb; struct hlist_head *head = inode_hashtable + hash(sb, hashval); while (1) { struct inode *old = NULL; spin_lock(&inode_hash_lock); hlist_for_each_entry(old, head, i_hash) { if (old->i_sb != sb) continue; if (!test(old, data)) continue; spin_lock(&old->i_lock); if (old->i_state & (I_FREEING|I_WILL_FREE)) { spin_unlock(&old->i_lock); continue; } break; } if (likely(!old)) { spin_lock(&inode->i_lock); inode->i_state |= I_NEW; hlist_add_head(&inode->i_hash, head); spin_unlock(&inode->i_lock); spin_unlock(&inode_hash_lock); return 0; } __iget(old); spin_unlock(&old->i_lock); spin_unlock(&inode_hash_lock); wait_on_inode(old); if (unlikely(!inode_unhashed(old))) { iput(old); return -EBUSY; } iput(old); } } EXPORT_SYMBOL(insert_inode_locked4); int generic_delete_inode(struct inode *inode) { return 1; } EXPORT_SYMBOL(generic_delete_inode); /* * Called when we're dropping the last reference * to an inode. * * Call the FS "drop_inode()" function, defaulting to * the legacy UNIX filesystem behaviour. If it tells * us to evict inode, do so. Otherwise, retain inode * in cache if fs is alive, sync and evict if fs is * shutting down. */ static void iput_final(struct inode *inode) { struct super_block *sb = inode->i_sb; const struct super_operations *op = inode->i_sb->s_op; int drop; WARN_ON(inode->i_state & I_NEW); if (op->drop_inode) drop = op->drop_inode(inode); else drop = generic_drop_inode(inode); if (!drop && (sb->s_flags & MS_ACTIVE)) { inode->i_state |= I_REFERENCED; inode_add_lru(inode); spin_unlock(&inode->i_lock); return; } if (!drop) { inode->i_state |= I_WILL_FREE; spin_unlock(&inode->i_lock); write_inode_now(inode, 1); spin_lock(&inode->i_lock); WARN_ON(inode->i_state & I_NEW); inode->i_state &= ~I_WILL_FREE; } inode->i_state |= I_FREEING; if (!list_empty(&inode->i_lru)) inode_lru_list_del(inode); spin_unlock(&inode->i_lock); evict(inode); } /** * iput - put an inode * @inode: inode to put * * Puts an inode, dropping its usage count. If the inode use count hits * zero, the inode is then freed and may also be destroyed. * * Consequently, iput() can sleep. */ void iput(struct inode *inode) { if (inode) { BUG_ON(inode->i_state & I_CLEAR); if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) iput_final(inode); } } EXPORT_SYMBOL(iput); /** * bmap - find a block number in a file * @inode: inode of file * @block: block to find * * Returns the block number on the device holding the inode that * is the disk block number for the block of the file requested. * That is, asked for block 4 of inode 1 the function will return the * disk block relative to the disk start that holds that block of the * file. */ sector_t bmap(struct inode *inode, sector_t block) { sector_t res = 0; if (inode->i_mapping->a_ops->bmap) res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); return res; } EXPORT_SYMBOL(bmap); /* * With relative atime, only update atime if the previous atime is * earlier than either the ctime or mtime or if at least a day has * passed since the last atime update. */ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, struct timespec now) { if (!(mnt->mnt_flags & MNT_RELATIME)) return 1; /* * Is mtime younger than atime? If yes, update atime: */ if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) return 1; /* * Is ctime younger than atime? If yes, update atime: */ if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) return 1; /* * Is the previous atime value older than a day? If yes, * update atime: */ if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) return 1; /* * Good, we can skip the atime update: */ return 0; } /* * This does the actual work of updating an inodes time or version. Must have * had called mnt_want_write() before calling this. */ static int update_time(struct inode *inode, struct timespec *time, int flags) { if (inode->i_op->update_time) return inode->i_op->update_time(inode, time, flags); if (flags & S_ATIME) inode->i_atime = *time; if (flags & S_VERSION) inode_inc_iversion(inode); if (flags & S_CTIME) inode->i_ctime = *time; if (flags & S_MTIME) inode->i_mtime = *time; mark_inode_dirty_sync(inode); return 0; } /** * touch_atime - update the access time * @path: the &struct path to update * * Update the accessed time on an inode and mark it for writeback. * This function automatically handles read only file systems and media, * as well as the "noatime" flag and inode specific "noatime" markers. */ void touch_atime(const struct path *path) { struct vfsmount *mnt = path->mnt; struct inode *inode = path->dentry->d_inode; struct timespec now; if (inode->i_flags & S_NOATIME) return; if (IS_NOATIME(inode)) return; if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) return; if (mnt->mnt_flags & MNT_NOATIME) return; if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) return; now = current_fs_time(inode->i_sb); if (!relatime_need_update(mnt, inode, now)) return; if (timespec_equal(&inode->i_atime, &now)) return; if (!sb_start_write_trylock(inode->i_sb)) return; if (__mnt_want_write(mnt)) goto skip_update; /* * File systems can error out when updating inodes if they need to * allocate new space to modify an inode (such is the case for * Btrfs), but since we touch atime while walking down the path we * really don't care if we failed to update the atime of the file, * so just ignore the return value. * We may also fail on filesystems that have the ability to make parts * of the fs read only, e.g. subvolumes in Btrfs. */ update_time(inode, &now, S_ATIME); __mnt_drop_write(mnt); skip_update: sb_end_write(inode->i_sb); } EXPORT_SYMBOL(touch_atime); /* * The logic we want is * * if suid or (sgid and xgrp) * remove privs */ int should_remove_suid(struct dentry *dentry) { umode_t mode = dentry->d_inode->i_mode; int kill = 0; /* suid always must be killed */ if (unlikely(mode & S_ISUID)) kill = ATTR_KILL_SUID; /* * sgid without any exec bits is just a mandatory locking mark; leave * it alone. If some exec bits are set, it's a real sgid; kill it. */ if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) kill |= ATTR_KILL_SGID; if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) return kill; return 0; } EXPORT_SYMBOL(should_remove_suid); static int __remove_suid(struct dentry *dentry, int kill) { struct iattr newattrs; newattrs.ia_valid = ATTR_FORCE | kill; /* * Note we call this on write, so notify_change will not * encounter any conflicting delegations: */ return notify_change(dentry, &newattrs, NULL); } int file_remove_suid(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; int killsuid; int killpriv; int error = 0; /* Fast path for nothing security related */ if (IS_NOSEC(inode)) return 0; killsuid = should_remove_suid(dentry); killpriv = security_inode_need_killpriv(dentry); if (killpriv < 0) return killpriv; if (killpriv) error = security_inode_killpriv(dentry); if (!error && killsuid) error = __remove_suid(dentry, killsuid); if (!error && (inode->i_sb->s_flags & MS_NOSEC)) inode->i_flags |= S_NOSEC; return error; } EXPORT_SYMBOL(file_remove_suid); /** * file_update_time - update mtime and ctime time * @file: file accessed * * Update the mtime and ctime members of an inode and mark the inode * for writeback. Note that this function is meant exclusively for * usage in the file write path of filesystems, and filesystems may * choose to explicitly ignore update via this function with the * S_NOCMTIME inode flag, e.g. for network filesystem where these * timestamps are handled by the server. This can return an error for * file systems who need to allocate space in order to update an inode. */ int file_update_time(struct file *file) { struct inode *inode = file_inode(file); struct timespec now; int sync_it = 0; int ret; /* First try to exhaust all avenues to not sync */ if (IS_NOCMTIME(inode)) return 0; now = current_fs_time(inode->i_sb); if (!timespec_equal(&inode->i_mtime, &now)) sync_it = S_MTIME; if (!timespec_equal(&inode->i_ctime, &now)) sync_it |= S_CTIME; if (IS_I_VERSION(inode)) sync_it |= S_VERSION; if (!sync_it) return 0; /* Finally allowed to write? Takes lock. */ if (__mnt_want_write_file(file)) return 0; ret = update_time(inode, &now, sync_it); __mnt_drop_write_file(file); return ret; } EXPORT_SYMBOL(file_update_time); int inode_needs_sync(struct inode *inode) { if (IS_SYNC(inode)) return 1; if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) return 1; return 0; } EXPORT_SYMBOL(inode_needs_sync); int inode_wait(void *word) { schedule(); return 0; } EXPORT_SYMBOL(inode_wait); /* * If we try to find an inode in the inode hash while it is being * deleted, we have to wait until the filesystem completes its * deletion before reporting that it isn't found. This function waits * until the deletion _might_ have completed. Callers are responsible * to recheck inode state. * * It doesn't matter if I_NEW is not set initially, a call to * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list * will DTRT. */ static void __wait_on_freeing_inode(struct inode *inode) { wait_queue_head_t *wq; DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); wq = bit_waitqueue(&inode->i_state, __I_NEW); prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); spin_unlock(&inode->i_lock); spin_unlock(&inode_hash_lock); schedule(); finish_wait(wq, &wait.wait); spin_lock(&inode_hash_lock); } static __initdata unsigned long ihash_entries; static int __init set_ihash_entries(char *str) { if (!str) return 0; ihash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("ihash_entries=", set_ihash_entries); /* * Initialize the waitqueues and inode hash table. */ void __init inode_init_early(void) { unsigned int loop; /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ if (hashdist) return; inode_hashtable = alloc_large_system_hash("Inode-cache", sizeof(struct hlist_head), ihash_entries, 14, HASH_EARLY, &i_hash_shift, &i_hash_mask, 0, 0); for (loop = 0; loop < (1U << i_hash_shift); loop++) INIT_HLIST_HEAD(&inode_hashtable[loop]); } void __init inode_init(void) { unsigned int loop; /* inode slab cache */ inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| SLAB_MEM_SPREAD), init_once); /* Hash may have been set up in inode_init_early */ if (!hashdist) return; inode_hashtable = alloc_large_system_hash("Inode-cache", sizeof(struct hlist_head), ihash_entries, 14, 0, &i_hash_shift, &i_hash_mask, 0, 0); for (loop = 0; loop < (1U << i_hash_shift); loop++) INIT_HLIST_HEAD(&inode_hashtable[loop]); } void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) { inode->i_mode = mode; if (S_ISCHR(mode)) { inode->i_fop = &def_chr_fops; inode->i_rdev = rdev; } else if (S_ISBLK(mode)) { inode->i_fop = &def_blk_fops; inode->i_rdev = rdev; } else if (S_ISFIFO(mode)) inode->i_fop = &pipefifo_fops; else if (S_ISSOCK(mode)) inode->i_fop = &bad_sock_fops; else printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" " inode %s:%lu\n", mode, inode->i_sb->s_id, inode->i_ino); } EXPORT_SYMBOL(init_special_inode); /** * inode_init_owner - Init uid,gid,mode for new inode according to posix standards * @inode: New inode * @dir: Directory inode * @mode: mode of the new inode */ void inode_init_owner(struct inode *inode, const struct inode *dir, umode_t mode) { inode->i_uid = current_fsuid(); if (dir && dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else inode->i_gid = current_fsgid(); inode->i_mode = mode; } EXPORT_SYMBOL(inode_init_owner); /** * inode_owner_or_capable - check current task permissions to inode * @inode: inode being checked * * Return true if current either has CAP_FOWNER in a namespace with the * inode owner uid mapped, or owns the file. */ bool inode_owner_or_capable(const struct inode *inode) { struct user_namespace *ns; if (uid_eq(current_fsuid(), inode->i_uid)) return true; ns = current_user_ns(); if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) return true; return false; } EXPORT_SYMBOL(inode_owner_or_capable); /* * Direct i/o helper functions */ static void __inode_dio_wait(struct inode *inode) { wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); do { prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); if (atomic_read(&inode->i_dio_count)) schedule(); } while (atomic_read(&inode->i_dio_count)); finish_wait(wq, &q.wait); } /** * inode_dio_wait - wait for outstanding DIO requests to finish * @inode: inode to wait for * * Waits for all pending direct I/O requests to finish so that we can * proceed with a truncate or equivalent operation. * * Must be called under a lock that serializes taking new references * to i_dio_count, usually by inode->i_mutex. */ void inode_dio_wait(struct inode *inode) { if (atomic_read(&inode->i_dio_count)) __inode_dio_wait(inode); } EXPORT_SYMBOL(inode_dio_wait); /* * inode_dio_done - signal finish of a direct I/O requests * @inode: inode the direct I/O happens on * * This is called once we've finished processing a direct I/O request, * and is used to wake up callers waiting for direct I/O to be quiesced. */ void inode_dio_done(struct inode *inode) { if (atomic_dec_and_test(&inode->i_dio_count)) wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); } EXPORT_SYMBOL(inode_dio_done); /* * inode_set_flags - atomically set some inode flags * * Note: the caller should be holding i_mutex, or else be sure that * they have exclusive access to the inode structure (i.e., while the * inode is being instantiated). The reason for the cmpxchg() loop * --- which wouldn't be necessary if all code paths which modify * i_flags actually followed this rule, is that there is at least one * code path which doesn't today --- for example, * __generic_file_aio_write() calls file_remove_suid() without holding * i_mutex --- so we use cmpxchg() out of an abundance of caution. * * In the long run, i_mutex is overkill, and we should probably look * at using the i_lock spinlock to protect i_flags, and then make sure * it is so documented in include/linux/fs.h and that all code follows * the locking convention!! */ void inode_set_flags(struct inode *inode, unsigned int flags, unsigned int mask) { unsigned int old_flags, new_flags; WARN_ON_ONCE(flags & ~mask); do { old_flags = ACCESS_ONCE(inode->i_flags); new_flags = (old_flags & ~mask) | flags; } while (unlikely(cmpxchg(&inode->i_flags, old_flags, new_flags) != old_flags)); } EXPORT_SYMBOL(inode_set_flags);
./CrossVul/dataset_final_sorted/CWE-264/c/good_2190_1
crossvul-cpp_data_good_1533_0
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* plugins/preauth/otp/main.c - OTP kdcpreauth module definition */ /* * Copyright 2011 NORDUnet A/S. All rights reserved. * Copyright 2013 Red Hat, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "k5-int.h" #include "k5-json.h" #include <krb5/preauth_plugin.h> #include "otp_state.h" #include <errno.h> #include <ctype.h> static krb5_preauthtype otp_pa_type_list[] = { KRB5_PADATA_OTP_REQUEST, 0 }; struct request_state { krb5_kdcpreauth_verify_respond_fn respond; void *arg; krb5_enc_tkt_part *enc_tkt_reply; }; static krb5_error_code decrypt_encdata(krb5_context context, krb5_keyblock *armor_key, krb5_pa_otp_req *req, krb5_data *out) { krb5_error_code retval; krb5_data plaintext; if (req == NULL) return EINVAL; retval = alloc_data(&plaintext, req->enc_data.ciphertext.length); if (retval) return retval; retval = krb5_c_decrypt(context, armor_key, KRB5_KEYUSAGE_PA_OTP_REQUEST, NULL, &req->enc_data, &plaintext); if (retval != 0) { com_err("otp", retval, "Unable to decrypt encData in PA-OTP-REQUEST"); free(plaintext.data); return retval; } *out = plaintext; return 0; } static krb5_error_code nonce_verify(krb5_context ctx, krb5_keyblock *armor_key, const krb5_data *nonce) { krb5_error_code retval; krb5_timestamp ts; krb5_data *er = NULL; if (armor_key == NULL || nonce->data == NULL) { retval = EINVAL; goto out; } /* Decode the PA-OTP-ENC-REQUEST structure. */ retval = decode_krb5_pa_otp_enc_req(nonce, &er); if (retval != 0) goto out; /* Make sure the nonce is exactly the same size as the one generated. */ if (er->length != armor_key->length + sizeof(krb5_timestamp)) goto out; /* Check to make sure the timestamp at the beginning is still valid. */ ts = load_32_be(er->data); retval = krb5_check_clockskew(ctx, ts); out: krb5_free_data(ctx, er); return retval; } static krb5_error_code timestamp_verify(krb5_context ctx, const krb5_data *nonce) { krb5_error_code retval = EINVAL; krb5_pa_enc_ts *et = NULL; if (nonce->data == NULL) goto out; /* Decode the PA-ENC-TS-ENC structure. */ retval = decode_krb5_pa_enc_ts(nonce, &et); if (retval != 0) goto out; /* Check the clockskew. */ retval = krb5_check_clockskew(ctx, et->patimestamp); out: krb5_free_pa_enc_ts(ctx, et); return retval; } static krb5_error_code nonce_generate(krb5_context ctx, unsigned int length, krb5_data *nonce_out) { krb5_data nonce; krb5_error_code retval; krb5_timestamp now; retval = krb5_timeofday(ctx, &now); if (retval != 0) return retval; retval = alloc_data(&nonce, sizeof(now) + length); if (retval != 0) return retval; retval = krb5_c_random_make_octets(ctx, &nonce); if (retval != 0) { free(nonce.data); return retval; } store_32_be(now, nonce.data); *nonce_out = nonce; return 0; } static void on_response(void *data, krb5_error_code retval, otp_response response) { struct request_state rs = *(struct request_state *)data; free(data); if (retval == 0 && response != otp_response_success) retval = KRB5_PREAUTH_FAILED; if (retval == 0) rs.enc_tkt_reply->flags |= TKT_FLG_PRE_AUTH; rs.respond(rs.arg, retval, NULL, NULL, NULL); } static krb5_error_code otp_init(krb5_context context, krb5_kdcpreauth_moddata *moddata_out, const char **realmnames) { krb5_error_code retval; otp_state *state; retval = otp_state_new(context, &state); if (retval) return retval; *moddata_out = (krb5_kdcpreauth_moddata)state; return 0; } static void otp_fini(krb5_context context, krb5_kdcpreauth_moddata moddata) { otp_state_free((otp_state *)moddata); } static int otp_flags(krb5_context context, krb5_preauthtype pa_type) { return PA_REPLACES_KEY; } static void otp_edata(krb5_context context, krb5_kdc_req *request, krb5_kdcpreauth_callbacks cb, krb5_kdcpreauth_rock rock, krb5_kdcpreauth_moddata moddata, krb5_preauthtype pa_type, krb5_kdcpreauth_edata_respond_fn respond, void *arg) { krb5_otp_tokeninfo ti, *tis[2] = { &ti, NULL }; krb5_keyblock *armor_key = NULL; krb5_pa_otp_challenge chl; krb5_pa_data *pa = NULL; krb5_error_code retval; krb5_data *encoding; char *config; /* Determine if otp is enabled for the user. */ retval = cb->get_string(context, rock, "otp", &config); if (retval == 0 && config == NULL) retval = ENOENT; if (retval != 0) goto out; cb->free_string(context, rock, config); /* Get the armor key. This indicates the length of random data to use in * the nonce. */ armor_key = cb->fast_armor(context, rock); if (armor_key == NULL) { retval = ENOENT; goto out; } /* Build the (mostly empty) challenge. */ memset(&ti, 0, sizeof(ti)); memset(&chl, 0, sizeof(chl)); chl.tokeninfo = tis; ti.format = -1; ti.length = -1; ti.iteration_count = -1; /* Generate the nonce. */ retval = nonce_generate(context, armor_key->length, &chl.nonce); if (retval != 0) goto out; /* Build the output pa-data. */ retval = encode_krb5_pa_otp_challenge(&chl, &encoding); if (retval != 0) goto out; pa = k5alloc(sizeof(krb5_pa_data), &retval); if (pa == NULL) { krb5_free_data(context, encoding); goto out; } pa->pa_type = KRB5_PADATA_OTP_CHALLENGE; pa->contents = (krb5_octet *)encoding->data; pa->length = encoding->length; free(encoding); out: (*respond)(arg, retval, pa); } static void otp_verify(krb5_context context, krb5_data *req_pkt, krb5_kdc_req *request, krb5_enc_tkt_part *enc_tkt_reply, krb5_pa_data *pa, krb5_kdcpreauth_callbacks cb, krb5_kdcpreauth_rock rock, krb5_kdcpreauth_moddata moddata, krb5_kdcpreauth_verify_respond_fn respond, void *arg) { krb5_keyblock *armor_key = NULL; krb5_pa_otp_req *req = NULL; struct request_state *rs; krb5_error_code retval; krb5_data d, plaintext; char *config; /* Get the FAST armor key. */ armor_key = cb->fast_armor(context, rock); if (armor_key == NULL) { retval = KRB5KDC_ERR_PREAUTH_FAILED; com_err("otp", retval, "No armor key found when verifying padata"); goto error; } /* Decode the request. */ d = make_data(pa->contents, pa->length); retval = decode_krb5_pa_otp_req(&d, &req); if (retval != 0) { com_err("otp", retval, "Unable to decode OTP request"); goto error; } /* Decrypt the nonce from the request. */ retval = decrypt_encdata(context, armor_key, req, &plaintext); if (retval != 0) { com_err("otp", retval, "Unable to decrypt nonce"); goto error; } /* Verify the nonce or timestamp. */ retval = nonce_verify(context, armor_key, &plaintext); if (retval != 0) retval = timestamp_verify(context, &plaintext); krb5_free_data_contents(context, &plaintext); if (retval != 0) { com_err("otp", retval, "Unable to verify nonce or timestamp"); goto error; } /* Create the request state. Save the response callback, and the * enc_tkt_reply pointer so we can set the TKT_FLG_PRE_AUTH flag later. */ rs = k5alloc(sizeof(struct request_state), &retval); if (rs == NULL) goto error; rs->arg = arg; rs->respond = respond; rs->enc_tkt_reply = enc_tkt_reply; /* Get the principal's OTP configuration string. */ retval = cb->get_string(context, rock, "otp", &config); if (retval == 0 && config == NULL) retval = KRB5_PREAUTH_FAILED; if (retval != 0) { free(rs); goto error; } /* Send the request. */ otp_state_verify((otp_state *)moddata, cb->event_context(context, rock), request->client, config, req, on_response, rs); cb->free_string(context, rock, config); k5_free_pa_otp_req(context, req); return; error: k5_free_pa_otp_req(context, req); (*respond)(arg, retval, NULL, NULL, NULL); } static krb5_error_code otp_return_padata(krb5_context context, krb5_pa_data *padata, krb5_data *req_pkt, krb5_kdc_req *request, krb5_kdc_rep *reply, krb5_keyblock *encrypting_key, krb5_pa_data **send_pa_out, krb5_kdcpreauth_callbacks cb, krb5_kdcpreauth_rock rock, krb5_kdcpreauth_moddata moddata, krb5_kdcpreauth_modreq modreq) { krb5_keyblock *armor_key = NULL; if (padata->length == 0) return 0; /* Get the armor key. */ armor_key = cb->fast_armor(context, rock); if (!armor_key) { com_err("otp", ENOENT, "No armor key found when returning padata"); return ENOENT; } /* Replace the reply key with the FAST armor key. */ krb5_free_keyblock_contents(context, encrypting_key); return krb5_copy_keyblock_contents(context, armor_key, encrypting_key); } krb5_error_code kdcpreauth_otp_initvt(krb5_context context, int maj_ver, int min_ver, krb5_plugin_vtable vtable); krb5_error_code kdcpreauth_otp_initvt(krb5_context context, int maj_ver, int min_ver, krb5_plugin_vtable vtable) { krb5_kdcpreauth_vtable vt; if (maj_ver != 1) return KRB5_PLUGIN_VER_NOTSUPP; vt = (krb5_kdcpreauth_vtable)vtable; vt->name = "otp"; vt->pa_type_list = otp_pa_type_list; vt->init = otp_init; vt->fini = otp_fini; vt->flags = otp_flags; vt->edata = otp_edata; vt->verify = otp_verify; vt->return_padata = otp_return_padata; com_err("otp", 0, "Loaded"); return 0; }
./CrossVul/dataset_final_sorted/CWE-264/c/good_1533_0
crossvul-cpp_data_good_5054_5
/* * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * - Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/poll.h> #include <linux/cdev.h> #include <linux/vmalloc.h> #include <linux/io.h> #include <rdma/ib.h> #include "hfi.h" #include "pio.h" #include "device.h" #include "common.h" #include "trace.h" #include "user_sdma.h" #include "user_exp_rcv.h" #include "eprom.h" #include "aspm.h" #include "mmu_rb.h" #undef pr_fmt #define pr_fmt(fmt) DRIVER_NAME ": " fmt #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */ /* * File operation functions */ static int hfi1_file_open(struct inode *, struct file *); static int hfi1_file_close(struct inode *, struct file *); static ssize_t hfi1_file_write(struct file *, const char __user *, size_t, loff_t *); static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *); static unsigned int hfi1_poll(struct file *, struct poll_table_struct *); static int hfi1_file_mmap(struct file *, struct vm_area_struct *); static u64 kvirt_to_phys(void *); static int assign_ctxt(struct file *, struct hfi1_user_info *); static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *); static int user_init(struct file *); static int get_ctxt_info(struct file *, void __user *, __u32); static int get_base_info(struct file *, void __user *, __u32); static int setup_ctxt(struct file *); static int setup_subctxt(struct hfi1_ctxtdata *); static int get_user_context(struct file *, struct hfi1_user_info *, int, unsigned); static int find_shared_ctxt(struct file *, const struct hfi1_user_info *); static int allocate_ctxt(struct file *, struct hfi1_devdata *, struct hfi1_user_info *); static unsigned int poll_urgent(struct file *, struct poll_table_struct *); static unsigned int poll_next(struct file *, struct poll_table_struct *); static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long); static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16); static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int); static int vma_fault(struct vm_area_struct *, struct vm_fault *); static const struct file_operations hfi1_file_ops = { .owner = THIS_MODULE, .write = hfi1_file_write, .write_iter = hfi1_write_iter, .open = hfi1_file_open, .release = hfi1_file_close, .poll = hfi1_poll, .mmap = hfi1_file_mmap, .llseek = noop_llseek, }; static struct vm_operations_struct vm_ops = { .fault = vma_fault, }; /* * Types of memories mapped into user processes' space */ enum mmap_types { PIO_BUFS = 1, PIO_BUFS_SOP, PIO_CRED, RCV_HDRQ, RCV_EGRBUF, UREGS, EVENTS, STATUS, RTAIL, SUBCTXT_UREGS, SUBCTXT_RCV_HDRQ, SUBCTXT_EGRBUF, SDMA_COMP }; /* * Masks and offsets defining the mmap tokens */ #define HFI1_MMAP_OFFSET_MASK 0xfffULL #define HFI1_MMAP_OFFSET_SHIFT 0 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL #define HFI1_MMAP_SUBCTXT_SHIFT 12 #define HFI1_MMAP_CTXT_MASK 0xffULL #define HFI1_MMAP_CTXT_SHIFT 16 #define HFI1_MMAP_TYPE_MASK 0xfULL #define HFI1_MMAP_TYPE_SHIFT 24 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL #define HFI1_MMAP_MAGIC_SHIFT 32 #define HFI1_MMAP_MAGIC 0xdabbad00 #define HFI1_MMAP_TOKEN_SET(field, val) \ (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT) #define HFI1_MMAP_TOKEN_GET(field, token) \ (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK) #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \ HFI1_MMAP_TOKEN_SET(TYPE, type) | \ HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \ HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr)))) #define dbg(fmt, ...) \ pr_info(fmt, ##__VA_ARGS__) static inline int is_valid_mmap(u64 token) { return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC); } static int hfi1_file_open(struct inode *inode, struct file *fp) { /* The real work is performed later in assign_ctxt() */ fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL); if (fp->private_data) /* no cpu affinity by default */ ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1; return fp->private_data ? 0 : -ENOMEM; } static ssize_t hfi1_file_write(struct file *fp, const char __user *data, size_t count, loff_t *offset) { const struct hfi1_cmd __user *ucmd; struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_cmd cmd; struct hfi1_user_info uinfo; struct hfi1_tid_info tinfo; unsigned long addr; ssize_t consumed = 0, copy = 0, ret = 0; void *dest = NULL; __u64 user_val = 0; int uctxt_required = 1; int must_be_root = 0; /* FIXME: This interface cannot continue out of staging */ if (WARN_ON_ONCE(!ib_safe_file_access(fp))) return -EACCES; if (count < sizeof(cmd)) { ret = -EINVAL; goto bail; } ucmd = (const struct hfi1_cmd __user *)data; if (copy_from_user(&cmd, ucmd, sizeof(cmd))) { ret = -EFAULT; goto bail; } consumed = sizeof(cmd); switch (cmd.type) { case HFI1_CMD_ASSIGN_CTXT: uctxt_required = 0; /* assigned user context not required */ copy = sizeof(uinfo); dest = &uinfo; break; case HFI1_CMD_SDMA_STATUS_UPD: case HFI1_CMD_CREDIT_UPD: copy = 0; break; case HFI1_CMD_TID_UPDATE: case HFI1_CMD_TID_FREE: case HFI1_CMD_TID_INVAL_READ: copy = sizeof(tinfo); dest = &tinfo; break; case HFI1_CMD_USER_INFO: case HFI1_CMD_RECV_CTRL: case HFI1_CMD_POLL_TYPE: case HFI1_CMD_ACK_EVENT: case HFI1_CMD_CTXT_INFO: case HFI1_CMD_SET_PKEY: case HFI1_CMD_CTXT_RESET: copy = 0; user_val = cmd.addr; break; case HFI1_CMD_EP_INFO: case HFI1_CMD_EP_ERASE_CHIP: case HFI1_CMD_EP_ERASE_RANGE: case HFI1_CMD_EP_READ_RANGE: case HFI1_CMD_EP_WRITE_RANGE: uctxt_required = 0; /* assigned user context not required */ must_be_root = 1; /* validate user */ copy = 0; break; default: ret = -EINVAL; goto bail; } /* If the command comes with user data, copy it. */ if (copy) { if (copy_from_user(dest, (void __user *)cmd.addr, copy)) { ret = -EFAULT; goto bail; } consumed += copy; } /* * Make sure there is a uctxt when needed. */ if (uctxt_required && !uctxt) { ret = -EINVAL; goto bail; } /* only root can do these operations */ if (must_be_root && !capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto bail; } switch (cmd.type) { case HFI1_CMD_ASSIGN_CTXT: ret = assign_ctxt(fp, &uinfo); if (ret < 0) goto bail; ret = setup_ctxt(fp); if (ret) goto bail; ret = user_init(fp); break; case HFI1_CMD_CTXT_INFO: ret = get_ctxt_info(fp, (void __user *)(unsigned long) user_val, cmd.len); break; case HFI1_CMD_USER_INFO: ret = get_base_info(fp, (void __user *)(unsigned long) user_val, cmd.len); break; case HFI1_CMD_SDMA_STATUS_UPD: break; case HFI1_CMD_CREDIT_UPD: if (uctxt && uctxt->sc) sc_return_credits(uctxt->sc); break; case HFI1_CMD_TID_UPDATE: ret = hfi1_user_exp_rcv_setup(fp, &tinfo); if (!ret) { /* * Copy the number of tidlist entries we used * and the length of the buffer we registered. * These fields are adjacent in the structure so * we can copy them at the same time. */ addr = (unsigned long)cmd.addr + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt) + sizeof(tinfo.length))) ret = -EFAULT; } break; case HFI1_CMD_TID_INVAL_READ: ret = hfi1_user_exp_rcv_invalid(fp, &tinfo); if (ret) break; addr = (unsigned long)cmd.addr + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) ret = -EFAULT; break; case HFI1_CMD_TID_FREE: ret = hfi1_user_exp_rcv_clear(fp, &tinfo); if (ret) break; addr = (unsigned long)cmd.addr + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) ret = -EFAULT; break; case HFI1_CMD_RECV_CTRL: ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val); break; case HFI1_CMD_POLL_TYPE: uctxt->poll_type = (typeof(uctxt->poll_type))user_val; break; case HFI1_CMD_ACK_EVENT: ret = user_event_ack(uctxt, fd->subctxt, user_val); break; case HFI1_CMD_SET_PKEY: if (HFI1_CAP_IS_USET(PKEY_CHECK)) ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val); else ret = -EPERM; break; case HFI1_CMD_CTXT_RESET: { struct send_context *sc; struct hfi1_devdata *dd; if (!uctxt || !uctxt->dd || !uctxt->sc) { ret = -EINVAL; break; } /* * There is no protection here. User level has to * guarantee that no one will be writing to the send * context while it is being re-initialized. * If user level breaks that guarantee, it will break * it's own context and no one else's. */ dd = uctxt->dd; sc = uctxt->sc; /* * Wait until the interrupt handler has marked the * context as halted or frozen. Report error if we time * out. */ wait_event_interruptible_timeout( sc->halt_wait, (sc->flags & SCF_HALTED), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); if (!(sc->flags & SCF_HALTED)) { ret = -ENOLCK; break; } /* * If the send context was halted due to a Freeze, * wait until the device has been "unfrozen" before * resetting the context. */ if (sc->flags & SCF_FROZEN) { wait_event_interruptible_timeout( dd->event_queue, !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); if (dd->flags & HFI1_FROZEN) { ret = -ENOLCK; break; } if (dd->flags & HFI1_FORCED_FREEZE) { /* * Don't allow context reset if we are into * forced freeze */ ret = -ENODEV; break; } sc_disable(sc); ret = sc_enable(sc); hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt->ctxt); } else { ret = sc_restart(sc); } if (!ret) sc_return_credits(sc); break; } case HFI1_CMD_EP_INFO: case HFI1_CMD_EP_ERASE_CHIP: case HFI1_CMD_EP_ERASE_RANGE: case HFI1_CMD_EP_READ_RANGE: case HFI1_CMD_EP_WRITE_RANGE: ret = handle_eprom_command(fp, &cmd); break; } if (ret >= 0) ret = consumed; bail: return ret; } static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) { struct hfi1_filedata *fd = kiocb->ki_filp->private_data; struct hfi1_user_sdma_pkt_q *pq = fd->pq; struct hfi1_user_sdma_comp_q *cq = fd->cq; int ret = 0, done = 0, reqs = 0; unsigned long dim = from->nr_segs; if (!cq || !pq) { ret = -EIO; goto done; } if (!iter_is_iovec(from) || !dim) { ret = -EINVAL; goto done; } hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)", fd->uctxt->ctxt, fd->subctxt, dim); if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) { ret = -ENOSPC; goto done; } while (dim) { unsigned long count = 0; ret = hfi1_user_sdma_process_request( kiocb->ki_filp, (struct iovec *)(from->iov + done), dim, &count); if (ret) goto done; dim -= count; done += count; reqs++; } done: return ret ? ret : reqs; } static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd; unsigned long flags, pfn; u64 token = vma->vm_pgoff << PAGE_SHIFT, memaddr = 0; u8 subctxt, mapio = 0, vmf = 0, type; ssize_t memlen = 0; int ret = 0; u16 ctxt; if (!is_valid_mmap(token) || !uctxt || !(vma->vm_flags & VM_SHARED)) { ret = -EINVAL; goto done; } dd = uctxt->dd; ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token); type = HFI1_MMAP_TOKEN_GET(TYPE, token); if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { ret = -EINVAL; goto done; } flags = vma->vm_flags; switch (type) { case PIO_BUFS: case PIO_BUFS_SOP: memaddr = ((dd->physaddr + TXE_PIO_SEND) + /* chip pio base */ (uctxt->sc->hw_context * BIT(16))) + /* 64K PIO space / ctxt */ (type == PIO_BUFS_SOP ? (TXE_PIO_SIZE / 2) : 0); /* sop? */ /* * Map only the amount allocated to the context, not the * entire available context's PIO space. */ memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE); flags &= ~VM_MAYREAD; flags |= VM_DONTCOPY | VM_DONTEXPAND; vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); mapio = 1; break; case PIO_CRED: if (flags & VM_WRITE) { ret = -EPERM; goto done; } /* * The credit return location for this context could be on the * second or third page allocated for credit returns (if number * of enabled contexts > 64 and 128 respectively). */ memaddr = dd->cr_base[uctxt->numa_id].pa + (((u64)uctxt->sc->hw_free - (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK); memlen = PAGE_SIZE; flags &= ~VM_MAYWRITE; flags |= VM_DONTCOPY | VM_DONTEXPAND; /* * The driver has already allocated memory for credit * returns and programmed it into the chip. Has that * memory been flagged as non-cached? */ /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */ mapio = 1; break; case RCV_HDRQ: memaddr = uctxt->rcvhdrq_phys; memlen = uctxt->rcvhdrq_size; break; case RCV_EGRBUF: { unsigned long addr; int i; /* * The RcvEgr buffer need to be handled differently * as multiple non-contiguous pages need to be mapped * into the user process. */ memlen = uctxt->egrbufs.size; if ((vma->vm_end - vma->vm_start) != memlen) { dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n", (vma->vm_end - vma->vm_start), memlen); ret = -EINVAL; goto done; } if (vma->vm_flags & VM_WRITE) { ret = -EPERM; goto done; } vma->vm_flags &= ~VM_MAYWRITE; addr = vma->vm_start; for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) { ret = remap_pfn_range( vma, addr, uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT, uctxt->egrbufs.buffers[i].len, vma->vm_page_prot); if (ret < 0) goto done; addr += uctxt->egrbufs.buffers[i].len; } ret = 0; goto done; } case UREGS: /* * Map only the page that contains this context's user * registers. */ memaddr = (unsigned long) (dd->physaddr + RXE_PER_CONTEXT_USER) + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); /* * TidFlow table is on the same page as the rest of the * user registers. */ memlen = PAGE_SIZE; flags |= VM_DONTCOPY | VM_DONTEXPAND; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mapio = 1; break; case EVENTS: /* * Use the page where this context's flags are. User level * knows where it's own bitmap is within the page. */ memaddr = (unsigned long)(dd->events + ((uctxt->ctxt - dd->first_user_ctxt) * HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK; memlen = PAGE_SIZE; /* * v3.7 removes VM_RESERVED but the effect is kept by * using VM_IO. */ flags |= VM_IO | VM_DONTEXPAND; vmf = 1; break; case STATUS: memaddr = kvirt_to_phys((void *)dd->status); memlen = PAGE_SIZE; flags |= VM_IO | VM_DONTEXPAND; break; case RTAIL: if (!HFI1_CAP_IS_USET(DMA_RTAIL)) { /* * If the memory allocation failed, the context alloc * also would have failed, so we would never get here */ ret = -EINVAL; goto done; } if (flags & VM_WRITE) { ret = -EPERM; goto done; } memaddr = uctxt->rcvhdrqtailaddr_phys; memlen = PAGE_SIZE; flags &= ~VM_MAYWRITE; break; case SUBCTXT_UREGS: memaddr = (u64)uctxt->subctxt_uregbase; memlen = PAGE_SIZE; flags |= VM_IO | VM_DONTEXPAND; vmf = 1; break; case SUBCTXT_RCV_HDRQ: memaddr = (u64)uctxt->subctxt_rcvhdr_base; memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt; flags |= VM_IO | VM_DONTEXPAND; vmf = 1; break; case SUBCTXT_EGRBUF: memaddr = (u64)uctxt->subctxt_rcvegrbuf; memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt; flags |= VM_IO | VM_DONTEXPAND; flags &= ~VM_MAYWRITE; vmf = 1; break; case SDMA_COMP: { struct hfi1_user_sdma_comp_q *cq = fd->cq; if (!cq) { ret = -EFAULT; goto done; } memaddr = (u64)cq->comps; memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries); flags |= VM_IO | VM_DONTEXPAND; vmf = 1; break; } default: ret = -EINVAL; break; } if ((vma->vm_end - vma->vm_start) != memlen) { hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu", uctxt->ctxt, fd->subctxt, (vma->vm_end - vma->vm_start), memlen); ret = -EINVAL; goto done; } vma->vm_flags = flags; hfi1_cdbg(PROC, "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n", ctxt, subctxt, type, mapio, vmf, memaddr, memlen, vma->vm_end - vma->vm_start, vma->vm_flags); pfn = (unsigned long)(memaddr >> PAGE_SHIFT); if (vmf) { vma->vm_pgoff = pfn; vma->vm_ops = &vm_ops; ret = 0; } else if (mapio) { ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen, vma->vm_page_prot); } else { ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen, vma->vm_page_prot); } done: return ret; } /* * Local (non-chip) user memory is not mapped right away but as it is * accessed by the user-level code. */ static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page; page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; return 0; } static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt) { struct hfi1_ctxtdata *uctxt; unsigned pollflag; uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt; if (!uctxt) pollflag = POLLERR; else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT) pollflag = poll_urgent(fp, pt); else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV) pollflag = poll_next(fp, pt); else /* invalid */ pollflag = POLLERR; return pollflag; } static int hfi1_file_close(struct inode *inode, struct file *fp) { struct hfi1_filedata *fdata = fp->private_data; struct hfi1_ctxtdata *uctxt = fdata->uctxt; struct hfi1_devdata *dd; unsigned long flags, *ev; fp->private_data = NULL; if (!uctxt) goto done; hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); dd = uctxt->dd; mutex_lock(&hfi1_mutex); flush_wc(); /* drain user sdma queue */ hfi1_user_sdma_free_queues(fdata); /* release the cpu */ hfi1_put_proc_affinity(dd, fdata->rec_cpu_num); /* * Clear any left over, unhandled events so the next process that * gets this context doesn't get confused. */ ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) * HFI1_MAX_SHARED_CTXTS) + fdata->subctxt; *ev = 0; if (--uctxt->cnt) { uctxt->active_slaves &= ~(1 << fdata->subctxt); uctxt->subpid[fdata->subctxt] = 0; mutex_unlock(&hfi1_mutex); goto done; } spin_lock_irqsave(&dd->uctxt_lock, flags); /* * Disable receive context and interrupt available, reset all * RcvCtxtCtrl bits to default values. */ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_TIDFLOW_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS | HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_ONE_PKT_EGR_DIS | HFI1_RCVCTRL_NO_RHQ_DROP_DIS | HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt); /* Clear the context's J_KEY */ hfi1_clear_ctxt_jkey(dd, uctxt->ctxt); /* * Reset context integrity checks to default. * (writes to CSRs probably belong in chip.c) */ write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE, hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type)); sc_disable(uctxt->sc); uctxt->pid = 0; spin_unlock_irqrestore(&dd->uctxt_lock, flags); dd->rcd[uctxt->ctxt] = NULL; hfi1_user_exp_rcv_free(fdata); hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); uctxt->rcvwait_to = 0; uctxt->piowait_to = 0; uctxt->rcvnowait = 0; uctxt->pionowait = 0; uctxt->event_flags = 0; hfi1_stats.sps_ctxts--; if (++dd->freectxts == dd->num_user_contexts) aspm_enable_all(dd); mutex_unlock(&hfi1_mutex); hfi1_free_ctxtdata(dd, uctxt); done: kfree(fdata); return 0; } /* * Convert kernel *virtual* addresses to physical addresses. * This is used to vmalloc'ed addresses. */ static u64 kvirt_to_phys(void *addr) { struct page *page; u64 paddr = 0; page = vmalloc_to_page(addr); if (page) paddr = page_to_pfn(page) << PAGE_SHIFT; return paddr; } static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) { int i_minor, ret = 0; unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS; swmajor = uinfo->userversion >> 16; if (swmajor != HFI1_USER_SWMAJOR) { ret = -ENODEV; goto done; } swminor = uinfo->userversion & 0xffff; if (uinfo->hfi1_alg < HFI1_ALG_COUNT) alg = uinfo->hfi1_alg; mutex_lock(&hfi1_mutex); /* First, lets check if we need to setup a shared context? */ if (uinfo->subctxt_cnt) { struct hfi1_filedata *fd = fp->private_data; ret = find_shared_ctxt(fp, uinfo); if (ret < 0) goto done_unlock; if (ret) fd->rec_cpu_num = hfi1_get_proc_affinity( fd->uctxt->dd, fd->uctxt->numa_id); } /* * We execute the following block if we couldn't find a * shared context or if context sharing is not required. */ if (!ret) { i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE; ret = get_user_context(fp, uinfo, i_minor - 1, alg); } done_unlock: mutex_unlock(&hfi1_mutex); done: return ret; } /* return true if the device available for general use */ static int usable_device(struct hfi1_devdata *dd) { struct hfi1_pportdata *ppd = dd->pport; return driver_lstate(ppd) == IB_PORT_ACTIVE; } static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo, int devno, unsigned alg) { struct hfi1_devdata *dd = NULL; int ret = 0, devmax, npresent, nup, dev; devmax = hfi1_count_units(&npresent, &nup); if (!npresent) { ret = -ENXIO; goto done; } if (!nup) { ret = -ENETDOWN; goto done; } if (devno >= 0) { dd = hfi1_lookup(devno); if (!dd) ret = -ENODEV; else if (!dd->freectxts) ret = -EBUSY; } else { struct hfi1_devdata *pdd; if (alg == HFI1_ALG_ACROSS) { unsigned free = 0U; for (dev = 0; dev < devmax; dev++) { pdd = hfi1_lookup(dev); if (!pdd) continue; if (!usable_device(pdd)) continue; if (pdd->freectxts && pdd->freectxts > free) { dd = pdd; free = pdd->freectxts; } } } else { for (dev = 0; dev < devmax; dev++) { pdd = hfi1_lookup(dev); if (!pdd) continue; if (!usable_device(pdd)) continue; if (pdd->freectxts) { dd = pdd; break; } } } if (!dd) ret = -EBUSY; } done: return ret ? ret : allocate_ctxt(fp, dd, uinfo); } static int find_shared_ctxt(struct file *fp, const struct hfi1_user_info *uinfo) { int devmax, ndev, i; int ret = 0; struct hfi1_filedata *fd = fp->private_data; devmax = hfi1_count_units(NULL, NULL); for (ndev = 0; ndev < devmax; ndev++) { struct hfi1_devdata *dd = hfi1_lookup(ndev); if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase)) continue; for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) { struct hfi1_ctxtdata *uctxt = dd->rcd[i]; /* Skip ctxts which are not yet open */ if (!uctxt || !uctxt->cnt) continue; /* Skip ctxt if it doesn't match the requested one */ if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) || uctxt->jkey != generate_jkey(current_uid()) || uctxt->subctxt_id != uinfo->subctxt_id || uctxt->subctxt_cnt != uinfo->subctxt_cnt) continue; /* Verify the sharing process matches the master */ if (uctxt->userversion != uinfo->userversion || uctxt->cnt >= uctxt->subctxt_cnt) { ret = -EINVAL; goto done; } fd->uctxt = uctxt; fd->subctxt = uctxt->cnt++; uctxt->subpid[fd->subctxt] = current->pid; uctxt->active_slaves |= 1 << fd->subctxt; ret = 1; goto done; } } done: return ret; } static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd, struct hfi1_user_info *uinfo) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt; unsigned ctxt; int ret, numa; if (dd->flags & HFI1_FROZEN) { /* * Pick an error that is unique from all other errors * that are returned so the user process knows that * it tried to allocate while the SPC was frozen. It * it should be able to retry with success in a short * while. */ return -EIO; } for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++) if (!dd->rcd[ctxt]) break; if (ctxt == dd->num_rcv_contexts) return -EBUSY; fd->rec_cpu_num = hfi1_get_proc_affinity(dd, -1); if (fd->rec_cpu_num != -1) numa = cpu_to_node(fd->rec_cpu_num); else numa = numa_node_id(); uctxt = hfi1_create_ctxtdata(dd->pport, ctxt, numa); if (!uctxt) { dd_dev_err(dd, "Unable to allocate ctxtdata memory, failing open\n"); return -ENOMEM; } hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)", uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num, uctxt->numa_id); /* * Allocate and enable a PIO send context. */ uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, uctxt->dd->node); if (!uctxt->sc) return -ENOMEM; hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index, uctxt->sc->hw_context); ret = sc_enable(uctxt->sc); if (ret) return ret; /* * Setup shared context resources if the user-level has requested * shared contexts and this is the 'master' process. * This has to be done here so the rest of the sub-contexts find the * proper master. */ if (uinfo->subctxt_cnt && !fd->subctxt) { ret = init_subctxts(uctxt, uinfo); /* * On error, we don't need to disable and de-allocate the * send context because it will be done during file close */ if (ret) return ret; } uctxt->userversion = uinfo->userversion; uctxt->pid = current->pid; uctxt->flags = HFI1_CAP_UGET(MASK); init_waitqueue_head(&uctxt->wait); strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm)); memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)); uctxt->jkey = generate_jkey(current_uid()); INIT_LIST_HEAD(&uctxt->sdma_queues); spin_lock_init(&uctxt->sdma_qlock); hfi1_stats.sps_ctxts++; /* * Disable ASPM when there are open user/PSM contexts to avoid * issues with ASPM L1 exit latency */ if (dd->freectxts-- == dd->num_user_contexts) aspm_disable_all(dd); fd->uctxt = uctxt; return 0; } static int init_subctxts(struct hfi1_ctxtdata *uctxt, const struct hfi1_user_info *uinfo) { unsigned num_subctxts; num_subctxts = uinfo->subctxt_cnt; if (num_subctxts > HFI1_MAX_SHARED_CTXTS) return -EINVAL; uctxt->subctxt_cnt = uinfo->subctxt_cnt; uctxt->subctxt_id = uinfo->subctxt_id; uctxt->active_slaves = 1; uctxt->redirect_seq_cnt = 1; set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags); return 0; } static int setup_subctxt(struct hfi1_ctxtdata *uctxt) { int ret = 0; unsigned num_subctxts = uctxt->subctxt_cnt; uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE); if (!uctxt->subctxt_uregbase) { ret = -ENOMEM; goto bail; } /* We can take the size of the RcvHdr Queue from the master */ uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size * num_subctxts); if (!uctxt->subctxt_rcvhdr_base) { ret = -ENOMEM; goto bail_ureg; } uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size * num_subctxts); if (!uctxt->subctxt_rcvegrbuf) { ret = -ENOMEM; goto bail_rhdr; } goto bail; bail_rhdr: vfree(uctxt->subctxt_rcvhdr_base); bail_ureg: vfree(uctxt->subctxt_uregbase); uctxt->subctxt_uregbase = NULL; bail: return ret; } static int user_init(struct file *fp) { unsigned int rcvctrl_ops = 0; struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; /* make sure that the context has already been setup */ if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) return -EFAULT; /* initialize poll variables... */ uctxt->urgent = 0; uctxt->urgent_poll = 0; /* * Now enable the ctxt for receive. * For chips that are set to DMA the tail register to memory * when they change (and when the update bit transitions from * 0 to 1. So for those chips, we turn it off and then back on. * This will (very briefly) affect any other open ctxts, but the * duration is very short, and therefore isn't an issue. We * explicitly set the in-memory tail copy to 0 beforehand, so we * don't have to wait to be sure the DMA update has happened * (chip resets head/tail to 0 on transition to enable). */ if (uctxt->rcvhdrtail_kvaddr) clear_rcvhdrtail(uctxt); /* Setup J_KEY before enabling the context */ hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey); rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB; if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP)) rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB; /* * Ignore the bit in the flags for now until proper * support for multiple packet per rcv array entry is * added. */ if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR)) rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL)) rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL)) rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; /* * The RcvCtxtCtrl.TailUpd bit has to be explicitly written. * We can't rely on the correct value to be set from prior * uses of the chip or ctxt. Therefore, add the rcvctrl op * for both cases. */ if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL)) rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB; else rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS; hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt); /* Notify any waiting slaves */ if (uctxt->subctxt_cnt) { clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags); wake_up(&uctxt->wait); } return 0; } static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) { struct hfi1_ctxt_info cinfo; struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; int ret = 0; memset(&cinfo, 0, sizeof(cinfo)); ret = hfi1_get_base_kinfo(uctxt, &cinfo); if (ret < 0) goto done; cinfo.num_active = hfi1_count_active_units(); cinfo.unit = uctxt->dd->unit; cinfo.ctxt = uctxt->ctxt; cinfo.subctxt = fd->subctxt; cinfo.rcvtids = roundup(uctxt->egrbufs.alloced, uctxt->dd->rcv_entries.group_size) + uctxt->expected_count; cinfo.credits = uctxt->sc->credits; cinfo.numa_node = uctxt->numa_id; cinfo.rec_cpu = fd->rec_cpu_num; cinfo.send_ctxt = uctxt->sc->hw_context; cinfo.egrtids = uctxt->egrbufs.alloced; cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt; cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2; cinfo.sdma_ring_size = fd->cq->nentries; cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size; trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo); if (copy_to_user(ubase, &cinfo, sizeof(cinfo))) ret = -EFAULT; done: return ret; } static int setup_ctxt(struct file *fp) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; int ret = 0; /* * Context should be set up only once, including allocation and * programming of eager buffers. This is done if context sharing * is not requested or by the master process. */ if (!uctxt->subctxt_cnt || !fd->subctxt) { ret = hfi1_init_ctxt(uctxt->sc); if (ret) goto done; /* Now allocate the RcvHdr queue and eager buffers. */ ret = hfi1_create_rcvhdrq(dd, uctxt); if (ret) goto done; ret = hfi1_setup_eagerbufs(uctxt); if (ret) goto done; if (uctxt->subctxt_cnt && !fd->subctxt) { ret = setup_subctxt(uctxt); if (ret) goto done; } } else { ret = wait_event_interruptible(uctxt->wait, !test_bit( HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags)); if (ret) goto done; } ret = hfi1_user_sdma_alloc_queues(uctxt, fp); if (ret) goto done; /* * Expected receive has to be setup for all processes (including * shared contexts). However, it has to be done after the master * context has been fully configured as it depends on the * eager/expected split of the RcvArray entries. * Setting it up here ensures that the subcontexts will be waiting * (due to the above wait_event_interruptible() until the master * is setup. */ ret = hfi1_user_exp_rcv_init(fp); if (ret) goto done; set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags); done: return ret; } static int get_base_info(struct file *fp, void __user *ubase, __u32 len) { struct hfi1_base_info binfo; struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; ssize_t sz; unsigned offset; int ret = 0; trace_hfi1_uctxtdata(uctxt->dd, uctxt); memset(&binfo, 0, sizeof(binfo)); binfo.hw_version = dd->revision; binfo.sw_version = HFI1_KERN_SWVERSION; binfo.bthqp = kdeth_qp; binfo.jkey = uctxt->jkey; /* * If more than 64 contexts are enabled the allocated credit * return will span two or three contiguous pages. Since we only * map the page containing the context's credit return address, * we need to calculate the offset in the proper page. */ offset = ((u64)uctxt->sc->hw_free - (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE; binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt, fd->subctxt, offset); binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt, fd->subctxt, uctxt->sc->base_addr); binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP, uctxt->ctxt, fd->subctxt, uctxt->sc->base_addr); binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt, fd->subctxt, uctxt->rcvhdrq); binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt, fd->subctxt, uctxt->egrbufs.rcvtids[0].phys); binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt, fd->subctxt, 0); /* * user regs are at * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE)) */ binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt, fd->subctxt, 0); offset = offset_in_page((((uctxt->ctxt - dd->first_user_ctxt) * HFI1_MAX_SHARED_CTXTS) + fd->subctxt) * sizeof(*dd->events)); binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt, fd->subctxt, offset); binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt, fd->subctxt, dd->status); if (HFI1_CAP_IS_USET(DMA_RTAIL)) binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt, fd->subctxt, 0); if (uctxt->subctxt_cnt) { binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS, uctxt->ctxt, fd->subctxt, 0); binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ, uctxt->ctxt, fd->subctxt, 0); binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF, uctxt->ctxt, fd->subctxt, 0); } sz = (len < sizeof(binfo)) ? len : sizeof(binfo); if (copy_to_user(ubase, &binfo, sz)) ret = -EFAULT; return ret; } static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; unsigned pollflag; poll_wait(fp, &uctxt->wait, pt); spin_lock_irq(&dd->uctxt_lock); if (uctxt->urgent != uctxt->urgent_poll) { pollflag = POLLIN | POLLRDNORM; uctxt->urgent_poll = uctxt->urgent; } else { pollflag = 0; set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags); } spin_unlock_irq(&dd->uctxt_lock); return pollflag; } static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; unsigned pollflag; poll_wait(fp, &uctxt->wait, pt); spin_lock_irq(&dd->uctxt_lock); if (hdrqempty(uctxt)) { set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags); hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt); pollflag = 0; } else { pollflag = POLLIN | POLLRDNORM; } spin_unlock_irq(&dd->uctxt_lock); return pollflag; } /* * Find all user contexts in use, and set the specified bit in their * event mask. * See also find_ctxt() for a similar use, that is specific to send buffers. */ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit) { struct hfi1_ctxtdata *uctxt; struct hfi1_devdata *dd = ppd->dd; unsigned ctxt; int ret = 0; unsigned long flags; if (!dd->events) { ret = -EINVAL; goto done; } spin_lock_irqsave(&dd->uctxt_lock, flags); for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++) { uctxt = dd->rcd[ctxt]; if (uctxt) { unsigned long *evs = dd->events + (uctxt->ctxt - dd->first_user_ctxt) * HFI1_MAX_SHARED_CTXTS; int i; /* * subctxt_cnt is 0 if not shared, so do base * separately, first, then remaining subctxt, if any */ set_bit(evtbit, evs); for (i = 1; i < uctxt->subctxt_cnt; i++) set_bit(evtbit, evs + i); } } spin_unlock_irqrestore(&dd->uctxt_lock, flags); done: return ret; } /** * manage_rcvq - manage a context's receive queue * @uctxt: the context * @subctxt: the sub-context * @start_stop: action to carry out * * start_stop == 0 disables receive on the context, for use in queue * overflow conditions. start_stop==1 re-enables, to be used to * re-init the software copy of the head register */ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt, int start_stop) { struct hfi1_devdata *dd = uctxt->dd; unsigned int rcvctrl_op; if (subctxt) goto bail; /* atomically clear receive enable ctxt. */ if (start_stop) { /* * On enable, force in-memory copy of the tail register to * 0, so that protocol code doesn't have to worry about * whether or not the chip has yet updated the in-memory * copy or not on return from the system call. The chip * always resets it's tail register back to 0 on a * transition from disabled to enabled. */ if (uctxt->rcvhdrtail_kvaddr) clear_rcvhdrtail(uctxt); rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB; } else { rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS; } hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt); /* always; new head should be equal to new tail; see above */ bail: return 0; } /* * clear the event notifier events for this context. * User process then performs actions appropriate to bit having been * set, if desired, and checks again in future. */ static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt, unsigned long events) { int i; struct hfi1_devdata *dd = uctxt->dd; unsigned long *evs; if (!dd->events) return 0; evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) * HFI1_MAX_SHARED_CTXTS) + subctxt; for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) { if (!test_bit(i, &events)) continue; clear_bit(i, evs); } return 0; } static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt, u16 pkey) { int ret = -ENOENT, i, intable = 0; struct hfi1_pportdata *ppd = uctxt->ppd; struct hfi1_devdata *dd = uctxt->dd; if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) { ret = -EINVAL; goto done; } for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) if (pkey == ppd->pkeys[i]) { intable = 1; break; } if (intable) ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey); done: return ret; } static int ui_open(struct inode *inode, struct file *filp) { struct hfi1_devdata *dd; dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev); filp->private_data = dd; /* for other methods */ return 0; } static int ui_release(struct inode *inode, struct file *filp) { /* nothing to do */ return 0; } static loff_t ui_lseek(struct file *filp, loff_t offset, int whence) { struct hfi1_devdata *dd = filp->private_data; return fixed_size_llseek(filp, offset, whence, (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE); } /* NOTE: assumes unsigned long is 8 bytes */ static ssize_t ui_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct hfi1_devdata *dd = filp->private_data; void __iomem *base = dd->kregbase; unsigned long total, csr_off, barlen = (dd->kregend - dd->kregbase); u64 data; /* only read 8 byte quantities */ if ((count % 8) != 0) return -EINVAL; /* offset must be 8-byte aligned */ if ((*f_pos % 8) != 0) return -EINVAL; /* destination buffer must be 8-byte aligned */ if ((unsigned long)buf % 8 != 0) return -EINVAL; /* must be in range */ if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE)) return -EINVAL; /* only set the base if we are not starting past the BAR */ if (*f_pos < barlen) base += *f_pos; csr_off = *f_pos; for (total = 0; total < count; total += 8, csr_off += 8) { /* accessing LCB CSRs requires more checks */ if (is_lcb_offset(csr_off)) { if (read_lcb_csr(dd, csr_off, (u64 *)&data)) break; /* failed */ } /* * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a * false parity error. Avoid the whole issue by not reading * them. These registers are defined as having a read value * of 0. */ else if (csr_off == ASIC_GPIO_CLEAR || csr_off == ASIC_GPIO_FORCE || csr_off == ASIC_QSFP1_CLEAR || csr_off == ASIC_QSFP1_FORCE || csr_off == ASIC_QSFP2_CLEAR || csr_off == ASIC_QSFP2_FORCE) data = 0; else if (csr_off >= barlen) { /* * read_8051_data can read more than just 8 bytes at * a time. However, folding this into the loop and * handling the reads in 8 byte increments allows us * to smoothly transition from chip memory to 8051 * memory. */ if (read_8051_data(dd, (u32)(csr_off - barlen), sizeof(data), &data)) break; /* failed */ } else data = readq(base + total); if (put_user(data, (unsigned long __user *)(buf + total))) break; } *f_pos += total; return total; } /* NOTE: assumes unsigned long is 8 bytes */ static ssize_t ui_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct hfi1_devdata *dd = filp->private_data; void __iomem *base; unsigned long total, data, csr_off; int in_lcb; /* only write 8 byte quantities */ if ((count % 8) != 0) return -EINVAL; /* offset must be 8-byte aligned */ if ((*f_pos % 8) != 0) return -EINVAL; /* source buffer must be 8-byte aligned */ if ((unsigned long)buf % 8 != 0) return -EINVAL; /* must be in range */ if (*f_pos + count > dd->kregend - dd->kregbase) return -EINVAL; base = (void __iomem *)dd->kregbase + *f_pos; csr_off = *f_pos; in_lcb = 0; for (total = 0; total < count; total += 8, csr_off += 8) { if (get_user(data, (unsigned long __user *)(buf + total))) break; /* accessing LCB CSRs requires a special procedure */ if (is_lcb_offset(csr_off)) { if (!in_lcb) { int ret = acquire_lcb_access(dd, 1); if (ret) break; in_lcb = 1; } } else { if (in_lcb) { release_lcb_access(dd, 1); in_lcb = 0; } } writeq(data, base + total); } if (in_lcb) release_lcb_access(dd, 1); *f_pos += total; return total; } static const struct file_operations ui_file_ops = { .owner = THIS_MODULE, .llseek = ui_lseek, .read = ui_read, .write = ui_write, .open = ui_open, .release = ui_release, }; #define UI_OFFSET 192 /* device minor offset for UI devices */ static int create_ui = 1; static struct cdev wildcard_cdev; static struct device *wildcard_device; static atomic_t user_count = ATOMIC_INIT(0); static void user_remove(struct hfi1_devdata *dd) { if (atomic_dec_return(&user_count) == 0) hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device); hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device); hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device); } static int user_add(struct hfi1_devdata *dd) { char name[10]; int ret; if (atomic_inc_return(&user_count) == 1) { ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops, &wildcard_cdev, &wildcard_device, true); if (ret) goto done; } snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops, &dd->user_cdev, &dd->user_device, true); if (ret) goto done; if (create_ui) { snprintf(name, sizeof(name), "%s_ui%d", class_name(), dd->unit); ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops, &dd->ui_cdev, &dd->ui_device, false); if (ret) goto done; } return 0; done: user_remove(dd); return ret; } /* * Create per-unit files in /dev */ int hfi1_device_create(struct hfi1_devdata *dd) { int r, ret; r = user_add(dd); ret = hfi1_diag_add(dd); if (r && !ret) ret = r; return ret; } /* * Remove per-unit files in /dev * void, core kernel returns no errors for this stuff */ void hfi1_device_remove(struct hfi1_devdata *dd) { user_remove(dd); hfi1_diag_remove(dd); }
./CrossVul/dataset_final_sorted/CWE-264/c/good_5054_5
crossvul-cpp_data_good_3524_9
//------------------------------------------------------------------------------ // Copyright (c) 2004-2010 Atheros Communications Inc. // All rights reserved. // // // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // // // // Author(s): ="Atheros" //------------------------------------------------------------------------------ /* * This driver is a pseudo ethernet driver to access the Atheros AR6000 * WLAN Device */ #include "ar6000_drv.h" #include "cfg80211.h" #include "htc.h" #include "wmi_filter_linux.h" #include "epping_test.h" #include "wlan_config.h" #include "ar3kconfig.h" #include "ar6k_pal.h" #include "AR6002/addrs.h" /* LINUX_HACK_FUDGE_FACTOR -- this is used to provide a workaround for linux behavior. When * the meta data was added to the header it was found that linux did not correctly provide * enough headroom. However when more headroom was requested beyond what was truly needed * Linux gave the requested headroom. Therefore to get the necessary headroom from Linux * the driver requests more than is needed by the amount = LINUX_HACK_FUDGE_FACTOR */ #define LINUX_HACK_FUDGE_FACTOR 16 #define BDATA_BDADDR_OFFSET 28 u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; u8 null_mac[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; #ifdef DEBUG #define ATH_DEBUG_DBG_LOG ATH_DEBUG_MAKE_MODULE_MASK(0) #define ATH_DEBUG_WLAN_CONNECT ATH_DEBUG_MAKE_MODULE_MASK(1) #define ATH_DEBUG_WLAN_SCAN ATH_DEBUG_MAKE_MODULE_MASK(2) #define ATH_DEBUG_WLAN_TX ATH_DEBUG_MAKE_MODULE_MASK(3) #define ATH_DEBUG_WLAN_RX ATH_DEBUG_MAKE_MODULE_MASK(4) #define ATH_DEBUG_HTC_RAW ATH_DEBUG_MAKE_MODULE_MASK(5) #define ATH_DEBUG_HCI_BRIDGE ATH_DEBUG_MAKE_MODULE_MASK(6) static struct ath_debug_mask_description driver_debug_desc[] = { { ATH_DEBUG_DBG_LOG , "Target Debug Logs"}, { ATH_DEBUG_WLAN_CONNECT , "WLAN connect"}, { ATH_DEBUG_WLAN_SCAN , "WLAN scan"}, { ATH_DEBUG_WLAN_TX , "WLAN Tx"}, { ATH_DEBUG_WLAN_RX , "WLAN Rx"}, { ATH_DEBUG_HTC_RAW , "HTC Raw IF tracing"}, { ATH_DEBUG_HCI_BRIDGE , "HCI Bridge Setup"}, { ATH_DEBUG_HCI_RECV , "HCI Recv tracing"}, { ATH_DEBUG_HCI_DUMP , "HCI Packet dumps"}, }; ATH_DEBUG_INSTANTIATE_MODULE_VAR(driver, "driver", "Linux Driver Interface", ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_WLAN_SCAN | ATH_DEBUG_HCI_BRIDGE, ATH_DEBUG_DESCRIPTION_COUNT(driver_debug_desc), driver_debug_desc); #endif #define IS_MAC_NULL(mac) (mac[0]==0 && mac[1]==0 && mac[2]==0 && mac[3]==0 && mac[4]==0 && mac[5]==0) #define IS_MAC_BCAST(mac) (*mac==0xff) #define DESCRIPTION "Driver to access the Atheros AR600x Device, version " __stringify(__VER_MAJOR_) "." __stringify(__VER_MINOR_) "." __stringify(__VER_PATCH_) "." __stringify(__BUILD_NUMBER_) MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION(DESCRIPTION); MODULE_LICENSE("Dual BSD/GPL"); #ifndef REORG_APTC_HEURISTICS #undef ADAPTIVE_POWER_THROUGHPUT_CONTROL #endif /* REORG_APTC_HEURISTICS */ #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL #define APTC_TRAFFIC_SAMPLING_INTERVAL 100 /* msec */ #define APTC_UPPER_THROUGHPUT_THRESHOLD 3000 /* Kbps */ #define APTC_LOWER_THROUGHPUT_THRESHOLD 2000 /* Kbps */ typedef struct aptc_traffic_record { bool timerScheduled; struct timeval samplingTS; unsigned long bytesReceived; unsigned long bytesTransmitted; } APTC_TRAFFIC_RECORD; A_TIMER aptcTimer; APTC_TRAFFIC_RECORD aptcTR; #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ #ifdef EXPORT_HCI_BRIDGE_INTERFACE // callbacks registered by HCI transport driver struct hci_transport_callbacks ar6kHciTransCallbacks = { NULL }; #endif unsigned int processDot11Hdr = 0; char ifname[IFNAMSIZ] = {0,}; int wlaninitmode = WLAN_INIT_MODE_DEFAULT; static bool bypasswmi; unsigned int debuglevel = 0; int tspecCompliance = ATHEROS_COMPLIANCE; unsigned int busspeedlow = 0; unsigned int onebitmode = 0; unsigned int skipflash = 0; unsigned int wmitimeout = 2; unsigned int wlanNodeCaching = 1; unsigned int enableuartprint = ENABLEUARTPRINT_DEFAULT; unsigned int logWmiRawMsgs = 0; unsigned int enabletimerwar = 0; unsigned int num_device = 1; unsigned int regscanmode; unsigned int fwmode = 1; unsigned int mbox_yield_limit = 99; unsigned int enablerssicompensation = 0; int reduce_credit_dribble = 1 + HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_ONE_HALF; int allow_trace_signal = 0; #ifdef CONFIG_HOST_TCMD_SUPPORT unsigned int testmode =0; #endif unsigned int irqprocmode = HIF_DEVICE_IRQ_SYNC_ONLY;//HIF_DEVICE_IRQ_ASYNC_SYNC; unsigned int panic_on_assert = 1; unsigned int nohifscattersupport = NOHIFSCATTERSUPPORT_DEFAULT; unsigned int setuphci = SETUPHCI_DEFAULT; unsigned int loghci = 0; unsigned int setupbtdev = SETUPBTDEV_DEFAULT; #ifndef EXPORT_HCI_BRIDGE_INTERFACE unsigned int ar3khcibaud = AR3KHCIBAUD_DEFAULT; unsigned int hciuartscale = HCIUARTSCALE_DEFAULT; unsigned int hciuartstep = HCIUARTSTEP_DEFAULT; #endif unsigned int csumOffload=0; unsigned int csumOffloadTest=0; unsigned int eppingtest=0; unsigned int mac_addr_method; unsigned int firmware_bridge; module_param_string(ifname, ifname, sizeof(ifname), 0644); module_param(wlaninitmode, int, 0644); module_param(bypasswmi, bool, 0644); module_param(debuglevel, uint, 0644); module_param(tspecCompliance, int, 0644); module_param(onebitmode, uint, 0644); module_param(busspeedlow, uint, 0644); module_param(skipflash, uint, 0644); module_param(wmitimeout, uint, 0644); module_param(wlanNodeCaching, uint, 0644); module_param(logWmiRawMsgs, uint, 0644); module_param(enableuartprint, uint, 0644); module_param(enabletimerwar, uint, 0644); module_param(fwmode, uint, 0644); module_param(mbox_yield_limit, uint, 0644); module_param(reduce_credit_dribble, int, 0644); module_param(allow_trace_signal, int, 0644); module_param(enablerssicompensation, uint, 0644); module_param(processDot11Hdr, uint, 0644); module_param(csumOffload, uint, 0644); #ifdef CONFIG_HOST_TCMD_SUPPORT module_param(testmode, uint, 0644); #endif module_param(irqprocmode, uint, 0644); module_param(nohifscattersupport, uint, 0644); module_param(panic_on_assert, uint, 0644); module_param(setuphci, uint, 0644); module_param(loghci, uint, 0644); module_param(setupbtdev, uint, 0644); #ifndef EXPORT_HCI_BRIDGE_INTERFACE module_param(ar3khcibaud, uint, 0644); module_param(hciuartscale, uint, 0644); module_param(hciuartstep, uint, 0644); #endif module_param(eppingtest, uint, 0644); /* in 2.6.10 and later this is now a pointer to a uint */ unsigned int _mboxnum = HTC_MAILBOX_NUM_MAX; #define mboxnum &_mboxnum #ifdef DEBUG u32 g_dbg_flags = DBG_DEFAULTS; unsigned int debugflags = 0; int debugdriver = 0; unsigned int debughtc = 0; unsigned int debugbmi = 0; unsigned int debughif = 0; unsigned int txcreditsavailable[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int txcreditsconsumed[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int txcreditintrenable[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int txcreditintrenableaggregate[HTC_MAILBOX_NUM_MAX] = {0}; module_param(debugflags, uint, 0644); module_param(debugdriver, int, 0644); module_param(debughtc, uint, 0644); module_param(debugbmi, uint, 0644); module_param(debughif, uint, 0644); module_param_array(txcreditsavailable, uint, mboxnum, 0644); module_param_array(txcreditsconsumed, uint, mboxnum, 0644); module_param_array(txcreditintrenable, uint, mboxnum, 0644); module_param_array(txcreditintrenableaggregate, uint, mboxnum, 0644); #endif /* DEBUG */ unsigned int resetok = 1; unsigned int tx_attempt[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int tx_post[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int tx_complete[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int hifBusRequestNumMax = 40; unsigned int war23838_disabled = 0; #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL unsigned int enableAPTCHeuristics = 1; #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ module_param_array(tx_attempt, uint, mboxnum, 0644); module_param_array(tx_post, uint, mboxnum, 0644); module_param_array(tx_complete, uint, mboxnum, 0644); module_param(hifBusRequestNumMax, uint, 0644); module_param(war23838_disabled, uint, 0644); module_param(resetok, uint, 0644); #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL module_param(enableAPTCHeuristics, uint, 0644); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ #ifdef BLOCK_TX_PATH_FLAG int blocktx = 0; module_param(blocktx, int, 0644); #endif /* BLOCK_TX_PATH_FLAG */ typedef struct user_rssi_compensation_t { u16 customerID; union { u16 a_enable; u16 bg_enable; u16 enable; }; s16 bg_param_a; s16 bg_param_b; s16 a_param_a; s16 a_param_b; u32 reserved; } USER_RSSI_CPENSATION; static USER_RSSI_CPENSATION rssi_compensation_param; static s16 rssi_compensation_table[96]; int reconnect_flag = 0; static ar6k_pal_config_t ar6k_pal_config_g; /* Function declarations */ static int ar6000_init_module(void); static void ar6000_cleanup_module(void); int ar6000_init(struct net_device *dev); static int ar6000_open(struct net_device *dev); static int ar6000_close(struct net_device *dev); static void ar6000_init_control_info(struct ar6_softc *ar); static int ar6000_data_tx(struct sk_buff *skb, struct net_device *dev); void ar6000_destroy(struct net_device *dev, unsigned int unregister); static void ar6000_detect_error(unsigned long ptr); static void ar6000_set_multicast_list(struct net_device *dev); static struct net_device_stats *ar6000_get_stats(struct net_device *dev); static void disconnect_timer_handler(unsigned long ptr); void read_rssi_compensation_param(struct ar6_softc *ar); /* * HTC service connection handlers */ static int ar6000_avail_ev(void *context, void *hif_handle); static int ar6000_unavail_ev(void *context, void *hif_handle); int ar6000_configure_target(struct ar6_softc *ar); static void ar6000_target_failure(void *Instance, int Status); static void ar6000_rx(void *Context, struct htc_packet *pPacket); static void ar6000_rx_refill(void *Context,HTC_ENDPOINT_ID Endpoint); static void ar6000_tx_complete(void *Context, struct htc_packet_queue *pPackets); static HTC_SEND_FULL_ACTION ar6000_tx_queue_full(void *Context, struct htc_packet *pPacket); static void ar6000_alloc_netbufs(A_NETBUF_QUEUE_T *q, u16 num); static void ar6000_deliver_frames_to_nw_stack(void * dev, void *osbuf); //static void ar6000_deliver_frames_to_bt_stack(void * dev, void *osbuf); static struct htc_packet *ar6000_alloc_amsdu_rxbuf(void *Context, HTC_ENDPOINT_ID Endpoint, int Length); static void ar6000_refill_amsdu_rxbufs(struct ar6_softc *ar, int Count); static void ar6000_cleanup_amsdu_rxbufs(struct ar6_softc *ar); static ssize_t ar6000_sysfs_bmi_read(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count); static ssize_t ar6000_sysfs_bmi_write(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count); static int ar6000_sysfs_bmi_init(struct ar6_softc *ar); void ar6k_cleanup_hci_pal(struct ar6_softc *ar); static void ar6000_sysfs_bmi_deinit(struct ar6_softc *ar); int ar6000_sysfs_bmi_get_config(struct ar6_softc *ar, u32 mode); /* * Static variables */ struct net_device *ar6000_devices[MAX_AR6000]; static int is_netdev_registered; DECLARE_WAIT_QUEUE_HEAD(arEvent); static void ar6000_cookie_init(struct ar6_softc *ar); static void ar6000_cookie_cleanup(struct ar6_softc *ar); static void ar6000_free_cookie(struct ar6_softc *ar, struct ar_cookie * cookie); static struct ar_cookie *ar6000_alloc_cookie(struct ar6_softc *ar); static int ar6000_reinstall_keys(struct ar6_softc *ar,u8 key_op_ctrl); #ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT struct net_device *arApNetDev; #endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */ static struct ar_cookie s_ar_cookie_mem[MAX_COOKIE_NUM]; #define HOST_INTEREST_ITEM_ADDRESS(ar, item) \ (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_HOST_INTEREST_ITEM_ADDRESS(item) : \ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0)) static struct net_device_ops ar6000_netdev_ops = { .ndo_init = NULL, .ndo_open = ar6000_open, .ndo_stop = ar6000_close, .ndo_get_stats = ar6000_get_stats, .ndo_start_xmit = ar6000_data_tx, .ndo_set_multicast_list = ar6000_set_multicast_list, }; /* Debug log support */ /* * Flag to govern whether the debug logs should be parsed in the kernel * or reported to the application. */ #define REPORT_DEBUG_LOGS_TO_APP int ar6000_set_host_app_area(struct ar6_softc *ar) { u32 address, data; struct host_app_area_s host_app_area; /* Fetch the address of the host_app_area_s instance in the host interest area */ address = TARG_VTOP(ar->arTargetType, HOST_INTEREST_ITEM_ADDRESS(ar, hi_app_host_interest)); if (ar6000_ReadRegDiag(ar->arHifDevice, &address, &data) != 0) { return A_ERROR; } address = TARG_VTOP(ar->arTargetType, data); host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION; if (ar6000_WriteDataDiag(ar->arHifDevice, address, (u8 *)&host_app_area, sizeof(struct host_app_area_s)) != 0) { return A_ERROR; } return 0; } u32 dbglog_get_debug_hdr_ptr(struct ar6_softc *ar) { u32 param; u32 address; int status; address = TARG_VTOP(ar->arTargetType, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dbglog_hdr)); if ((status = ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)&param, 4)) != 0) { param = 0; } return param; } /* * The dbglog module has been initialized. Its ok to access the relevant * data stuctures over the diagnostic window. */ void ar6000_dbglog_init_done(struct ar6_softc *ar) { ar->dbglog_init_done = true; } u32 dbglog_get_debug_fragment(s8 *datap, u32 len, u32 limit) { s32 *buffer; u32 count; u32 numargs; u32 length; u32 fraglen; count = fraglen = 0; buffer = (s32 *)datap; length = (limit >> 2); if (len <= limit) { fraglen = len; } else { while (count < length) { numargs = DBGLOG_GET_NUMARGS(buffer[count]); fraglen = (count << 2); count += numargs + 1; } } return fraglen; } void dbglog_parse_debug_logs(s8 *datap, u32 len) { s32 *buffer; u32 count; u32 timestamp; u32 debugid; u32 moduleid; u32 numargs; u32 length; count = 0; buffer = (s32 *)datap; length = (len >> 2); while (count < length) { debugid = DBGLOG_GET_DBGID(buffer[count]); moduleid = DBGLOG_GET_MODULEID(buffer[count]); numargs = DBGLOG_GET_NUMARGS(buffer[count]); timestamp = DBGLOG_GET_TIMESTAMP(buffer[count]); switch (numargs) { case 0: AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d)\n", moduleid, debugid, timestamp)); break; case 1: AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d): 0x%x\n", moduleid, debugid, timestamp, buffer[count+1])); break; case 2: AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d): 0x%x, 0x%x\n", moduleid, debugid, timestamp, buffer[count+1], buffer[count+2])); break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Invalid args: %d\n", numargs)); } count += numargs + 1; } } int ar6000_dbglog_get_debug_logs(struct ar6_softc *ar) { u32 data[8]; /* Should be able to accommodate struct dbglog_buf_s */ u32 address; u32 length; u32 dropped; u32 firstbuf; u32 debug_hdr_ptr; if (!ar->dbglog_init_done) return A_ERROR; AR6000_SPIN_LOCK(&ar->arLock, 0); if (ar->dbgLogFetchInProgress) { AR6000_SPIN_UNLOCK(&ar->arLock, 0); return A_EBUSY; } /* block out others */ ar->dbgLogFetchInProgress = true; AR6000_SPIN_UNLOCK(&ar->arLock, 0); debug_hdr_ptr = dbglog_get_debug_hdr_ptr(ar); printk("debug_hdr_ptr: 0x%x\n", debug_hdr_ptr); /* Get the contents of the ring buffer */ if (debug_hdr_ptr) { address = TARG_VTOP(ar->arTargetType, debug_hdr_ptr); length = 4 /* sizeof(dbuf) */ + 4 /* sizeof(dropped) */; A_MEMZERO(data, sizeof(data)); ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)data, length); address = TARG_VTOP(ar->arTargetType, data[0] /* dbuf */); firstbuf = address; dropped = data[1]; /* dropped */ length = 4 /* sizeof(next) */ + 4 /* sizeof(buffer) */ + 4 /* sizeof(bufsize) */ + 4 /* sizeof(length) */ + 4 /* sizeof(count) */ + 4 /* sizeof(free) */; A_MEMZERO(data, sizeof(data)); ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)&data, length); do { address = TARG_VTOP(ar->arTargetType, data[1] /* buffer*/); length = data[3]; /* length */ if ((length) && (length <= data[2] /* bufsize*/)) { /* Rewind the index if it is about to overrun the buffer */ if (ar->log_cnt > (DBGLOG_HOST_LOG_BUFFER_SIZE - length)) { ar->log_cnt = 0; } if(0 != ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)&ar->log_buffer[ar->log_cnt], length)) { break; } ar6000_dbglog_event(ar, dropped, (s8 *)&ar->log_buffer[ar->log_cnt], length); ar->log_cnt += length; } else { AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("Length: %d (Total size: %d)\n", data[3], data[2])); } address = TARG_VTOP(ar->arTargetType, data[0] /* next */); length = 4 /* sizeof(next) */ + 4 /* sizeof(buffer) */ + 4 /* sizeof(bufsize) */ + 4 /* sizeof(length) */ + 4 /* sizeof(count) */ + 4 /* sizeof(free) */; A_MEMZERO(data, sizeof(data)); if(0 != ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)&data, length)) { break; } } while (address != firstbuf); } ar->dbgLogFetchInProgress = false; return 0; } void ar6000_dbglog_event(struct ar6_softc *ar, u32 dropped, s8 *buffer, u32 length) { #ifdef REPORT_DEBUG_LOGS_TO_APP #define MAX_WIRELESS_EVENT_SIZE 252 /* * Break it up into chunks of MAX_WIRELESS_EVENT_SIZE bytes of messages. * There seems to be a limitation on the length of message that could be * transmitted to the user app via this mechanism. */ u32 send, sent; sent = 0; send = dbglog_get_debug_fragment(&buffer[sent], length - sent, MAX_WIRELESS_EVENT_SIZE); while (send) { sent += send; send = dbglog_get_debug_fragment(&buffer[sent], length - sent, MAX_WIRELESS_EVENT_SIZE); } #else AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Dropped logs: 0x%x\nDebug info length: %d\n", dropped, length)); /* Interpret the debug logs */ dbglog_parse_debug_logs((s8 *)buffer, length); #endif /* REPORT_DEBUG_LOGS_TO_APP */ } static int __init ar6000_init_module(void) { static int probed = 0; int r; OSDRV_CALLBACKS osdrvCallbacks; a_module_debug_support_init(); #ifdef DEBUG /* check for debug mask overrides */ if (debughtc != 0) { ATH_DEBUG_SET_DEBUG_MASK(htc,debughtc); } if (debugbmi != 0) { ATH_DEBUG_SET_DEBUG_MASK(bmi,debugbmi); } if (debughif != 0) { ATH_DEBUG_SET_DEBUG_MASK(hif,debughif); } if (debugdriver != 0) { ATH_DEBUG_SET_DEBUG_MASK(driver,debugdriver); } #endif A_REGISTER_MODULE_DEBUG_INFO(driver); A_MEMZERO(&osdrvCallbacks,sizeof(osdrvCallbacks)); osdrvCallbacks.deviceInsertedHandler = ar6000_avail_ev; osdrvCallbacks.deviceRemovedHandler = ar6000_unavail_ev; #ifdef CONFIG_PM osdrvCallbacks.deviceSuspendHandler = ar6000_suspend_ev; osdrvCallbacks.deviceResumeHandler = ar6000_resume_ev; osdrvCallbacks.devicePowerChangeHandler = ar6000_power_change_ev; #endif #ifdef DEBUG /* Set the debug flags if specified at load time */ if(debugflags != 0) { g_dbg_flags = debugflags; } #endif if (probed) { return -ENODEV; } probed++; #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL memset(&aptcTR, 0, sizeof(APTC_TRAFFIC_RECORD)); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ r = HIFInit(&osdrvCallbacks); if (r) return r; return 0; } static void __exit ar6000_cleanup_module(void) { int i = 0; struct net_device *ar6000_netdev; #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL /* Delete the Adaptive Power Control timer */ if (timer_pending(&aptcTimer)) { del_timer_sync(&aptcTimer); } #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ for (i=0; i < MAX_AR6000; i++) { if (ar6000_devices[i] != NULL) { ar6000_netdev = ar6000_devices[i]; ar6000_devices[i] = NULL; ar6000_destroy(ar6000_netdev, 1); } } HIFShutDownDevice(NULL); a_module_debug_support_cleanup(); AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_cleanup: success\n")); } #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL void aptcTimerHandler(unsigned long arg) { u32 numbytes; u32 throughput; struct ar6_softc *ar; int status; ar = (struct ar6_softc *)arg; A_ASSERT(ar != NULL); A_ASSERT(!timer_pending(&aptcTimer)); AR6000_SPIN_LOCK(&ar->arLock, 0); /* Get the number of bytes transferred */ numbytes = aptcTR.bytesTransmitted + aptcTR.bytesReceived; aptcTR.bytesTransmitted = aptcTR.bytesReceived = 0; /* Calculate and decide based on throughput thresholds */ throughput = ((numbytes * 8)/APTC_TRAFFIC_SAMPLING_INTERVAL); /* Kbps */ if (throughput < APTC_LOWER_THROUGHPUT_THRESHOLD) { /* Enable Sleep and delete the timer */ A_ASSERT(ar->arWmiReady == true); AR6000_SPIN_UNLOCK(&ar->arLock, 0); status = wmi_powermode_cmd(ar->arWmi, REC_POWER); AR6000_SPIN_LOCK(&ar->arLock, 0); A_ASSERT(status == 0); aptcTR.timerScheduled = false; } else { A_TIMEOUT_MS(&aptcTimer, APTC_TRAFFIC_SAMPLING_INTERVAL, 0); } AR6000_SPIN_UNLOCK(&ar->arLock, 0); } #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ static void ar6000_alloc_netbufs(A_NETBUF_QUEUE_T *q, u16 num) { void * osbuf; while(num) { if((osbuf = A_NETBUF_ALLOC(AR6000_BUFFER_SIZE))) { A_NETBUF_ENQUEUE(q, osbuf); } else { break; } num--; } if(num) { A_PRINTF("%s(), allocation of netbuf failed", __func__); } } static struct bin_attribute bmi_attr = { .attr = {.name = "bmi", .mode = 0600}, .read = ar6000_sysfs_bmi_read, .write = ar6000_sysfs_bmi_write, }; static ssize_t ar6000_sysfs_bmi_read(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { int index; struct ar6_softc *ar; struct hif_device_os_device_info *osDevInfo; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Read %d bytes\n", (u32)count)); for (index=0; index < MAX_AR6000; index++) { ar = (struct ar6_softc *)ar6k_priv(ar6000_devices[index]); osDevInfo = &ar->osDevInfo; if (kobj == (&(((struct device *)osDevInfo->pOSDevice)->kobj))) { break; } } if (index == MAX_AR6000) return 0; if ((BMIRawRead(ar->arHifDevice, (u8*)buf, count, true)) != 0) { return 0; } return count; } static ssize_t ar6000_sysfs_bmi_write(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { int index; struct ar6_softc *ar; struct hif_device_os_device_info *osDevInfo; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Write %d bytes\n", (u32)count)); for (index=0; index < MAX_AR6000; index++) { ar = (struct ar6_softc *)ar6k_priv(ar6000_devices[index]); osDevInfo = &ar->osDevInfo; if (kobj == (&(((struct device *)osDevInfo->pOSDevice)->kobj))) { break; } } if (index == MAX_AR6000) return 0; if ((BMIRawWrite(ar->arHifDevice, (u8*)buf, count)) != 0) { return 0; } return count; } static int ar6000_sysfs_bmi_init(struct ar6_softc *ar) { int status; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Creating sysfs entry\n")); A_MEMZERO(&ar->osDevInfo, sizeof(struct hif_device_os_device_info)); /* Get the underlying OS device */ status = HIFConfigureDevice(ar->arHifDevice, HIF_DEVICE_GET_OS_DEVICE, &ar->osDevInfo, sizeof(struct hif_device_os_device_info)); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI: Failed to get OS device info from HIF\n")); return A_ERROR; } /* Create a bmi entry in the sysfs filesystem */ if ((sysfs_create_bin_file(&(((struct device *)ar->osDevInfo.pOSDevice)->kobj), &bmi_attr)) < 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMI: Failed to create entry for bmi in sysfs filesystem\n")); return A_ERROR; } return 0; } static void ar6000_sysfs_bmi_deinit(struct ar6_softc *ar) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Deleting sysfs entry\n")); sysfs_remove_bin_file(&(((struct device *)ar->osDevInfo.pOSDevice)->kobj), &bmi_attr); } #define bmifn(fn) do { \ if ((fn) < 0) { \ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); \ return A_ERROR; \ } \ } while(0) #ifdef SOFTMAC_FILE_USED #define AR6002_MAC_ADDRESS_OFFSET 0x0A #define AR6003_MAC_ADDRESS_OFFSET 0x16 static void calculate_crc(u32 TargetType, u8 *eeprom_data) { u16 *ptr_crc; u16 *ptr16_eeprom; u16 checksum; u32 i; u32 eeprom_size; if (TargetType == TARGET_TYPE_AR6001) { eeprom_size = 512; ptr_crc = (u16 *)eeprom_data; } else if (TargetType == TARGET_TYPE_AR6003) { eeprom_size = 1024; ptr_crc = (u16 *)((u8 *)eeprom_data + 0x04); } else { eeprom_size = 768; ptr_crc = (u16 *)((u8 *)eeprom_data + 0x04); } // Clear the crc *ptr_crc = 0; // Recalculate new CRC checksum = 0; ptr16_eeprom = (u16 *)eeprom_data; for (i = 0;i < eeprom_size; i += 2) { checksum = checksum ^ (*ptr16_eeprom); ptr16_eeprom++; } checksum = 0xFFFF ^ checksum; *ptr_crc = checksum; } static void ar6000_softmac_update(struct ar6_softc *ar, u8 *eeprom_data, size_t size) { const char *source = "random generated"; const struct firmware *softmac_entry; u8 *ptr_mac; switch (ar->arTargetType) { case TARGET_TYPE_AR6002: ptr_mac = (u8 *)((u8 *)eeprom_data + AR6002_MAC_ADDRESS_OFFSET); break; case TARGET_TYPE_AR6003: ptr_mac = (u8 *)((u8 *)eeprom_data + AR6003_MAC_ADDRESS_OFFSET); break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Target Type\n")); return; } printk(KERN_DEBUG "MAC from EEPROM %pM\n", ptr_mac); /* create a random MAC in case we cannot read file from system */ ptr_mac[0] = 0; ptr_mac[1] = 0x03; ptr_mac[2] = 0x7F; ptr_mac[3] = random32() & 0xff; ptr_mac[4] = random32() & 0xff; ptr_mac[5] = random32() & 0xff; if ((A_REQUEST_FIRMWARE(&softmac_entry, "softmac", ((struct device *)ar->osDevInfo.pOSDevice))) == 0) { char *macbuf = A_MALLOC_NOWAIT(softmac_entry->size+1); if (macbuf) { unsigned int softmac[6]; memcpy(macbuf, softmac_entry->data, softmac_entry->size); macbuf[softmac_entry->size] = '\0'; if (sscanf(macbuf, "%02x:%02x:%02x:%02x:%02x:%02x", &softmac[0], &softmac[1], &softmac[2], &softmac[3], &softmac[4], &softmac[5])==6) { int i; for (i=0; i<6; ++i) { ptr_mac[i] = softmac[i] & 0xff; } source = "softmac file"; } kfree(macbuf); } A_RELEASE_FIRMWARE(softmac_entry); } printk(KERN_DEBUG "MAC from %s %pM\n", source, ptr_mac); calculate_crc(ar->arTargetType, eeprom_data); } #endif /* SOFTMAC_FILE_USED */ static int ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address, bool compressed) { int status; const char *filename; const struct firmware *fw_entry; u32 fw_entry_size; switch (file) { case AR6K_OTP_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_OTP_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_OTP_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_OTP_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } break; case AR6K_FIRMWARE_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } if (eppingtest) { bypasswmi = true; if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_EPPING_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_EPPING_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_EPPING_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("eppingtest : unsupported firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } compressed = false; } #ifdef CONFIG_HOST_TCMD_SUPPORT if(testmode) { if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_TCMD_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_TCMD_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_TCMD_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } compressed = false; } #endif #ifdef HTC_RAW_INTERFACE if (!eppingtest && bypasswmi) { if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_ART_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_ART_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } compressed = false; } #endif break; case AR6K_PATCH_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_PATCH_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_PATCH_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_PATCH_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } break; case AR6K_BOARD_DATA_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_BOARD_DATA_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_BOARD_DATA_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_BOARD_DATA_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown file type: %d\n", file)); return A_ERROR; } if ((A_REQUEST_FIRMWARE(&fw_entry, filename, ((struct device *)ar->osDevInfo.pOSDevice))) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to get %s\n", filename)); return A_ENOENT; } #ifdef SOFTMAC_FILE_USED if (file==AR6K_BOARD_DATA_FILE && fw_entry->data) { ar6000_softmac_update(ar, (u8 *)fw_entry->data, fw_entry->size); } #endif fw_entry_size = fw_entry->size; /* Load extended board data for AR6003 */ if ((file==AR6K_BOARD_DATA_FILE) && (fw_entry->data)) { u32 board_ext_address; u32 board_ext_data_size; u32 board_data_size; board_ext_data_size = (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_BOARD_EXT_DATA_SZ : \ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_BOARD_EXT_DATA_SZ : 0)); board_data_size = (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_BOARD_DATA_SZ : \ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_BOARD_DATA_SZ : 0)); /* Determine where in Target RAM to write Board Data */ bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data), (u8 *)&board_ext_address, 4)); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Board extended Data download address: 0x%x\n", board_ext_address)); /* check whether the target has allocated memory for extended board data and file contains extended board data */ if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) { u32 param; status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (u8 *)(fw_entry->data + board_data_size), board_ext_data_size); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); A_RELEASE_FIRMWARE(fw_entry); return A_ERROR; } /* Record the fact that extended board Data IS initialized */ param = (board_ext_data_size << 16) | 1; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data_config), (unsigned char *)&param, 4)); } fw_entry_size = board_data_size; } if (compressed) { status = BMIFastDownload(ar->arHifDevice, address, (u8 *)fw_entry->data, fw_entry_size); } else { status = BMIWriteMemory(ar->arHifDevice, address, (u8 *)fw_entry->data, fw_entry_size); } if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); A_RELEASE_FIRMWARE(fw_entry); return A_ERROR; } A_RELEASE_FIRMWARE(fw_entry); return 0; } int ar6000_update_bdaddr(struct ar6_softc *ar) { if (setupbtdev != 0) { u32 address; if (BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data), (u8 *)&address, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for hi_board_data failed\n")); return A_ERROR; } if (BMIReadMemory(ar->arHifDevice, address + BDATA_BDADDR_OFFSET, (u8 *)ar->bdaddr, 6) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for BD address failed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BDADDR 0x%x:0x%x:0x%x:0x%x:0x%x:0x%x\n", ar->bdaddr[0], ar->bdaddr[1], ar->bdaddr[2], ar->bdaddr[3], ar->bdaddr[4], ar->bdaddr[5])); } return 0; } int ar6000_sysfs_bmi_get_config(struct ar6_softc *ar, u32 mode) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Requesting device specific configuration\n")); if (mode == WLAN_INIT_MODE_UDEV) { char version[16]; const struct firmware *fw_entry; /* Get config using udev through a script in user space */ sprintf(version, "%2.2x", ar->arVersion.target_ver); if ((A_REQUEST_FIRMWARE(&fw_entry, version, ((struct device *)ar->osDevInfo.pOSDevice))) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI: Failure to get configuration for target version: %s\n", version)); return A_ERROR; } A_RELEASE_FIRMWARE(fw_entry); } else { /* The config is contained within the driver itself */ int status; u32 param, options, sleep, address; /* Temporarily disable system sleep */ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS; bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param)); options = param; param |= AR6K_OPTION_SLEEP_DISABLE; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS; bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param)); sleep = param; param |= WLAN_SYSTEM_SLEEP_DISABLE_SET(1); bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("old options: %d, old sleep: %d\n", options, sleep)); if (ar->arTargetType == TARGET_TYPE_AR6003) { /* Program analog PLL register */ bmifn(BMIWriteSOCRegister(ar->arHifDevice, ANALOG_INTF_BASE_ADDRESS + 0x284, 0xF9104001)); /* Run at 80/88MHz by default */ param = CPU_CLOCK_STANDARD_SET(1); } else { /* Run at 40/44MHz by default */ param = CPU_CLOCK_STANDARD_SET(0); } address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); param = 0; if (ar->arTargetType == TARGET_TYPE_AR6002) { bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_ext_clk_detected), (u8 *)&param, 4)); } /* LPO_CAL.ENABLE = 1 if no external clk is detected */ if (param != 1) { address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS; param = LPO_CAL_ENABLE_SET(1); bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); } /* Venus2.0: Lower SDIO pad drive strength, * temporary WAR to avoid SDIO CRC error */ if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("AR6K: Temporary WAR to avoid SDIO CRC error\n")); param = 0x20; address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); } #ifdef FORCE_INTERNAL_CLOCK /* Ignore external clock, if any, and force use of internal clock */ if (ar->arTargetType == TARGET_TYPE_AR6003) { /* hi_ext_clk_detected = 0 */ param = 0; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_ext_clk_detected), (u8 *)&param, 4)); /* CLOCK_CONTROL &= ~LF_CLK32 */ address = RTC_BASE_ADDRESS + CLOCK_CONTROL_ADDRESS; bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param)); param &= (~CLOCK_CONTROL_LF_CLK32_SET(1)); bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); } #endif /* FORCE_INTERNAL_CLOCK */ /* Transfer Board Data from Target EEPROM to Target RAM */ if (ar->arTargetType == TARGET_TYPE_AR6003) { /* Determine where in Target RAM to write Board Data */ bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data), (u8 *)&address, 4)); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Board Data download address: 0x%x\n", address)); /* Write EEPROM data to Target RAM */ if ((ar6000_transfer_bin_file(ar, AR6K_BOARD_DATA_FILE, address, false)) != 0) { return A_ERROR; } /* Record the fact that Board Data IS initialized */ param = 1; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data_initialized), (u8 *)&param, 4)); /* Transfer One time Programmable data */ AR6K_APP_LOAD_ADDRESS(address, ar->arVersion.target_ver); if (ar->arVersion.target_ver == AR6003_REV3_VERSION) address = 0x1234; status = ar6000_transfer_bin_file(ar, AR6K_OTP_FILE, address, true); if (status == 0) { /* Execute the OTP code */ param = 0; AR6K_APP_START_OVERRIDE_ADDRESS(address, ar->arVersion.target_ver); bmifn(BMIExecute(ar->arHifDevice, address, &param)); } else if (status != A_ENOENT) { return A_ERROR; } } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Programming of board data for chip %d not supported\n", ar->arTargetType)); return A_ERROR; } /* Download Target firmware */ AR6K_APP_LOAD_ADDRESS(address, ar->arVersion.target_ver); if (ar->arVersion.target_ver == AR6003_REV3_VERSION) address = 0x1234; if ((ar6000_transfer_bin_file(ar, AR6K_FIRMWARE_FILE, address, true)) != 0) { return A_ERROR; } /* Set starting address for firmware */ AR6K_APP_START_OVERRIDE_ADDRESS(address, ar->arVersion.target_ver); bmifn(BMISetAppStart(ar->arHifDevice, address)); if(ar->arTargetType == TARGET_TYPE_AR6003) { AR6K_DATASET_PATCH_ADDRESS(address, ar->arVersion.target_ver); if ((ar6000_transfer_bin_file(ar, AR6K_PATCH_FILE, address, false)) != 0) return A_ERROR; param = address; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dset_list_head), (unsigned char *)&param, 4)); } /* Restore system sleep */ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, sleep)); address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS; param = options | 0x20; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); if (ar->arTargetType == TARGET_TYPE_AR6003) { /* Configure GPIO AR6003 UART */ #ifndef CONFIG_AR600x_DEBUG_UART_TX_PIN #define CONFIG_AR600x_DEBUG_UART_TX_PIN 8 #endif param = CONFIG_AR600x_DEBUG_UART_TX_PIN; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dbg_uart_txpin), (u8 *)&param, 4)); #if (CONFIG_AR600x_DEBUG_UART_TX_PIN == 23) { address = GPIO_BASE_ADDRESS + CLOCK_GPIO_ADDRESS; bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param)); param |= CLOCK_GPIO_BT_CLK_OUT_EN_SET(1); bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); } #endif /* Configure GPIO for BT Reset */ #ifdef ATH6KL_CONFIG_GPIO_BT_RESET #define CONFIG_AR600x_BT_RESET_PIN 0x16 param = CONFIG_AR600x_BT_RESET_PIN; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_hci_uart_support_pins), (u8 *)&param, 4)); #endif /* ATH6KL_CONFIG_GPIO_BT_RESET */ /* Configure UART flow control polarity */ #ifndef CONFIG_ATH6KL_BT_UART_FC_POLARITY #define CONFIG_ATH6KL_BT_UART_FC_POLARITY 0 #endif #if (CONFIG_ATH6KL_BT_UART_FC_POLARITY == 1) if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { param = ((CONFIG_ATH6KL_BT_UART_FC_POLARITY << 1) & 0x2); bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_hci_uart_pwr_mgmt_params), (u8 *)&param, 4)); } #endif /* CONFIG_ATH6KL_BT_UART_FC_POLARITY */ } #ifdef HTC_RAW_INTERFACE if (!eppingtest && bypasswmi) { /* Don't run BMIDone for ART mode and force resetok=0 */ resetok = 0; msleep(1000); } #endif /* HTC_RAW_INTERFACE */ } return 0; } int ar6000_configure_target(struct ar6_softc *ar) { u32 param; if (enableuartprint) { param = 1; if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_serial_enable), (u8 *)&param, 4)!= 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for enableuartprint failed \n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Serial console prints enabled\n")); } /* Tell target which HTC version it is used*/ param = HTC_PROTOCOL_VERSION; if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_app_host_interest), (u8 *)&param, 4)!= 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for htc version failed \n")); return A_ERROR; } #ifdef CONFIG_HOST_TCMD_SUPPORT if(testmode) { ar->arTargetMode = AR6000_TCMD_MODE; }else { ar->arTargetMode = AR6000_WLAN_MODE; } #endif if (enabletimerwar) { u32 param; if (BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4)!= 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for enabletimerwar failed \n")); return A_ERROR; } param |= HI_OPTION_TIMER_WAR; if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for enabletimerwar failed \n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Timer WAR enabled\n")); } /* set the firmware mode to STA/IBSS/AP */ { u32 param; if (BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4)!= 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for setting fwmode failed \n")); return A_ERROR; } param |= (num_device << HI_OPTION_NUM_DEV_SHIFT); param |= (fwmode << HI_OPTION_FW_MODE_SHIFT); param |= (mac_addr_method << HI_OPTION_MAC_ADDR_METHOD_SHIFT); param |= (firmware_bridge << HI_OPTION_FW_BRIDGE_SHIFT); if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for setting fwmode failed \n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Firmware mode set\n")); } #ifdef ATH6KL_DISABLE_TARGET_DBGLOGS { u32 param; if (BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4)!= 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for disabling debug logs failed\n")); return A_ERROR; } param |= HI_OPTION_DISABLE_DBGLOG; if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for HI_OPTION_DISABLE_DBGLOG\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Firmware mode set\n")); } #endif /* ATH6KL_DISABLE_TARGET_DBGLOGS */ /* * Hardcode the address use for the extended board data * Ideally this should be pre-allocate by the OS at boot time * But since it is a new feature and board data is loaded * at init time, we have to workaround this from host. * It is difficult to patch the firmware boot code, * but possible in theory. */ if (ar->arTargetType == TARGET_TYPE_AR6003) { u32 ramReservedSz; if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { param = AR6003_REV2_BOARD_EXT_DATA_ADDRESS; ramReservedSz = AR6003_REV2_RAM_RESERVE_SIZE; } else { param = AR6003_REV3_BOARD_EXT_DATA_ADDRESS; ramReservedSz = AR6003_REV3_RAM_RESERVE_SIZE; } if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIWriteMemory for " "hi_board_ext_data failed\n")); return A_ERROR; } if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_end_RAM_reserve_sz), (u8 *)&ramReservedSz, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR , ("BMIWriteMemory for " "hi_end_RAM_reserve_sz failed\n")); return A_ERROR; } } /* since BMIInit is called in the driver layer, we have to set the block * size here for the target */ if (ar6000_set_htc_params(ar->arHifDevice, ar->arTargetType, mbox_yield_limit, 0)) { /* use default number of control buffers */ return A_ERROR; } if (setupbtdev != 0) { if (ar6000_set_hci_bridge_flags(ar->arHifDevice, ar->arTargetType, setupbtdev)) { return A_ERROR; } } return 0; } static void init_netdev(struct net_device *dev, char *name) { dev->netdev_ops = &ar6000_netdev_ops; dev->watchdog_timeo = AR6000_TX_TIMEOUT; /* * We need the OS to provide us with more headroom in order to * perform dix to 802.3, WMI header encap, and the HTC header */ if (processDot11Hdr) { dev->hard_header_len = sizeof(struct ieee80211_qosframe) + sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR) + HTC_HEADER_LEN + WMI_MAX_TX_META_SZ + LINUX_HACK_FUDGE_FACTOR; } else { dev->hard_header_len = ETH_HLEN + sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR) + HTC_HEADER_LEN + WMI_MAX_TX_META_SZ + LINUX_HACK_FUDGE_FACTOR; } if (name[0]) { strcpy(dev->name, name); } #ifdef CONFIG_CHECKSUM_OFFLOAD if(csumOffload){ dev->features |= NETIF_F_IP_CSUM; /*advertise kernel capability to do TCP/UDP CSUM offload for IPV4*/ } #endif return; } static int __ath6kl_init_netdev(struct net_device *dev) { int r; rtnl_lock(); r = ar6000_init(dev); rtnl_unlock(); if (r) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: ar6000_init\n")); return r; } return 0; } #ifdef HTC_RAW_INTERFACE static int ath6kl_init_netdev_wmi(struct net_device *dev) { if (!eppingtest && bypasswmi) return 0; return __ath6kl_init_netdev(dev); } #else static int ath6kl_init_netdev_wmi(struct net_device *dev) { return __ath6kl_init_netdev(dev); } #endif static int ath6kl_init_netdev(struct ar6_softc *ar) { int r; r = ar6000_sysfs_bmi_get_config(ar, wlaninitmode); if (r) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ar6000_avail: " "ar6000_sysfs_bmi_get_config failed\n")); return r; } return ath6kl_init_netdev_wmi(ar->arNetDev); } /* * HTC Event handlers */ static int ar6000_avail_ev(void *context, void *hif_handle) { int i; struct net_device *dev; void *ar_netif; struct ar6_softc *ar; int device_index = 0; struct htc_init_info htcInfo; struct wireless_dev *wdev; int r = 0; struct hif_device_os_device_info osDevInfo; memset(&osDevInfo, 0, sizeof(osDevInfo)); if (HIFConfigureDevice(hif_handle, HIF_DEVICE_GET_OS_DEVICE, &osDevInfo, sizeof(osDevInfo))) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: Failed to get OS device instance\n", __func__)); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_available\n")); for (i=0; i < MAX_AR6000; i++) { if (ar6000_devices[i] == NULL) { break; } } if (i == MAX_AR6000) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_available: max devices reached\n")); return A_ERROR; } /* Save this. It gives a bit better readability especially since */ /* we use another local "i" variable below. */ device_index = i; wdev = ar6k_cfg80211_init(osDevInfo.pOSDevice); if (IS_ERR(wdev)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ar6k_cfg80211_init failed\n", __func__)); return A_ERROR; } ar_netif = wdev_priv(wdev); if (ar_netif == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Can't allocate ar6k priv memory\n", __func__)); return A_ERROR; } A_MEMZERO(ar_netif, sizeof(struct ar6_softc)); ar = (struct ar6_softc *)ar_netif; ar->wdev = wdev; wdev->iftype = NL80211_IFTYPE_STATION; dev = alloc_netdev_mq(0, "wlan%d", ether_setup, 1); if (!dev) { printk(KERN_CRIT "AR6K: no memory for network device instance\n"); ar6k_cfg80211_deinit(ar); return A_ERROR; } dev->ieee80211_ptr = wdev; SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy)); wdev->netdev = dev; ar->arNetworkType = INFRA_NETWORK; ar->smeState = SME_DISCONNECTED; ar->arAutoAuthStage = AUTH_IDLE; init_netdev(dev, ifname); ar->arNetDev = dev; ar->arHifDevice = hif_handle; ar->arWlanState = WLAN_ENABLED; ar->arDeviceIndex = device_index; ar->arWlanPowerState = WLAN_POWER_STATE_ON; ar->arWlanOff = false; /* We are in ON state */ #ifdef CONFIG_PM ar->arWowState = WLAN_WOW_STATE_NONE; ar->arBTOff = true; /* BT chip assumed to be OFF */ ar->arBTSharing = WLAN_CONFIG_BT_SHARING; ar->arWlanOffConfig = WLAN_CONFIG_WLAN_OFF; ar->arSuspendConfig = WLAN_CONFIG_PM_SUSPEND; ar->arWow2Config = WLAN_CONFIG_PM_WOW2; #endif /* CONFIG_PM */ A_INIT_TIMER(&ar->arHBChallengeResp.timer, ar6000_detect_error, dev); ar->arHBChallengeResp.seqNum = 0; ar->arHBChallengeResp.outstanding = false; ar->arHBChallengeResp.missCnt = 0; ar->arHBChallengeResp.frequency = AR6000_HB_CHALLENGE_RESP_FREQ_DEFAULT; ar->arHBChallengeResp.missThres = AR6000_HB_CHALLENGE_RESP_MISS_THRES_DEFAULT; ar6000_init_control_info(ar); init_waitqueue_head(&arEvent); sema_init(&ar->arSem, 1); ar->bIsDestroyProgress = false; INIT_HTC_PACKET_QUEUE(&ar->amsdu_rx_buffer_queue); #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL A_INIT_TIMER(&aptcTimer, aptcTimerHandler, ar); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ A_INIT_TIMER(&ar->disconnect_timer, disconnect_timer_handler, dev); BMIInit(); ar6000_sysfs_bmi_init(ar); { struct bmi_target_info targ_info; r = BMIGetTargetInfo(ar->arHifDevice, &targ_info); if (r) goto avail_ev_failed; ar->arVersion.target_ver = targ_info.target_ver; ar->arTargetType = targ_info.target_type; wdev->wiphy->hw_version = targ_info.target_ver; } r = ar6000_configure_target(ar); if (r) goto avail_ev_failed; A_MEMZERO(&htcInfo,sizeof(htcInfo)); htcInfo.pContext = ar; htcInfo.TargetFailure = ar6000_target_failure; ar->arHtcTarget = HTCCreate(ar->arHifDevice,&htcInfo); if (!ar->arHtcTarget) { r = -ENOMEM; goto avail_ev_failed; } spin_lock_init(&ar->arLock); #ifdef WAPI_ENABLE ar->arWapiEnable = 0; #endif if(csumOffload){ /*if external frame work is also needed, change and use an extended rxMetaVerion*/ ar->rxMetaVersion=WMI_META_VERSION_2; } ar->aggr_cntxt = aggr_init(ar6000_alloc_netbufs); if (!ar->aggr_cntxt) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Failed to initialize aggr.\n", __func__)); r = -ENOMEM; goto avail_ev_failed; } aggr_register_rx_dispatcher(ar->aggr_cntxt, (void *)dev, ar6000_deliver_frames_to_nw_stack); HIFClaimDevice(ar->arHifDevice, ar); /* We only register the device in the global list if we succeed. */ /* If the device is in the global list, it will be destroyed */ /* when the module is unloaded. */ ar6000_devices[device_index] = dev; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("BMI enabled: %d\n", wlaninitmode)); if ((wlaninitmode == WLAN_INIT_MODE_UDEV) || (wlaninitmode == WLAN_INIT_MODE_DRV)) { r = ath6kl_init_netdev(ar); if (r) goto avail_ev_failed; } /* This runs the init function if registered */ r = register_netdev(dev); if (r) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: register_netdev failed\n")); ar6000_destroy(dev, 0); return r; } is_netdev_registered = 1; #ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT arApNetDev = NULL; #endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_avail: name=%s hifdevice=0x%lx, dev=0x%lx (%d), ar=0x%lx\n", dev->name, (unsigned long)ar->arHifDevice, (unsigned long)dev, device_index, (unsigned long)ar)); avail_ev_failed : if (r) ar6000_sysfs_bmi_deinit(ar); return r; } static void ar6000_target_failure(void *Instance, int Status) { struct ar6_softc *ar = (struct ar6_softc *)Instance; WMI_TARGET_ERROR_REPORT_EVENT errEvent; static bool sip = false; if (Status != 0) { printk(KERN_ERR "ar6000_target_failure: target asserted \n"); if (timer_pending(&ar->arHBChallengeResp.timer)) { A_UNTIMEOUT(&ar->arHBChallengeResp.timer); } /* try dumping target assertion information (if any) */ ar6000_dump_target_assert_info(ar->arHifDevice,ar->arTargetType); /* * Fetch the logs from the target via the diagnostic * window. */ ar6000_dbglog_get_debug_logs(ar); /* Report the error only once */ if (!sip) { sip = true; errEvent.errorVal = WMI_TARGET_COM_ERR | WMI_TARGET_FATAL_ERR; } } } static int ar6000_unavail_ev(void *context, void *hif_handle) { struct ar6_softc *ar = (struct ar6_softc *)context; /* NULL out it's entry in the global list */ ar6000_devices[ar->arDeviceIndex] = NULL; ar6000_destroy(ar->arNetDev, 1); return 0; } void ar6000_restart_endpoint(struct net_device *dev) { int status = 0; struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); BMIInit(); do { if ( (status=ar6000_configure_target(ar))!= 0) break; if ( (status=ar6000_sysfs_bmi_get_config(ar, wlaninitmode)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: ar6000_sysfs_bmi_get_config failed\n")); break; } rtnl_lock(); status = (ar6000_init(dev)==0) ? 0 : A_ERROR; rtnl_unlock(); if (status) { break; } if (ar->arSsidLen && ar->arWlanState == WLAN_ENABLED) { ar6000_connect_to_ap(ar); } } while (0); if (status== 0) { return; } ar6000_devices[ar->arDeviceIndex] = NULL; ar6000_destroy(ar->arNetDev, 1); } void ar6000_stop_endpoint(struct net_device *dev, bool keepprofile, bool getdbglogs) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); /* Stop the transmit queues */ netif_stop_queue(dev); /* Disable the target and the interrupts associated with it */ if (ar->arWmiReady == true) { if (!bypasswmi) { bool disconnectIssued; disconnectIssued = (ar->arConnected) || (ar->arConnectPending); ar6000_disconnect(ar); if (!keepprofile) { ar6000_init_profile_info(ar); } A_UNTIMEOUT(&ar->disconnect_timer); if (getdbglogs) { ar6000_dbglog_get_debug_logs(ar); } ar->arWmiReady = false; wmi_shutdown(ar->arWmi); ar->arWmiEnabled = false; ar->arWmi = NULL; /* * After wmi_shudown all WMI events will be dropped. * We need to cleanup the buffers allocated in AP mode * and give disconnect notification to stack, which usually * happens in the disconnect_event. * Simulate the disconnect_event by calling the function directly. * Sometimes disconnect_event will be received when the debug logs * are collected. */ if (disconnectIssued) { if(ar->arNetworkType & AP_NETWORK) { ar6000_disconnect_event(ar, DISCONNECT_CMD, bcast_mac, 0, NULL, 0); } else { ar6000_disconnect_event(ar, DISCONNECT_CMD, ar->arBssid, 0, NULL, 0); } } ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT; ar->user_key_ctrl = 0; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): WMI stopped\n", __func__)); } else { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): WMI not ready 0x%lx 0x%lx\n", __func__, (unsigned long) ar, (unsigned long) ar->arWmi)); /* Shut down WMI if we have started it */ if(ar->arWmiEnabled == true) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): Shut down WMI\n", __func__)); wmi_shutdown(ar->arWmi); ar->arWmiEnabled = false; ar->arWmi = NULL; } } if (ar->arHtcTarget != NULL) { #ifdef EXPORT_HCI_BRIDGE_INTERFACE if (NULL != ar6kHciTransCallbacks.cleanupTransport) { ar6kHciTransCallbacks.cleanupTransport(NULL); } #else // FIXME: workaround to reset BT's UART baud rate to default if (NULL != ar->exitCallback) { struct ar3k_config_info ar3kconfig; int status; A_MEMZERO(&ar3kconfig,sizeof(ar3kconfig)); ar6000_set_default_ar3kconfig(ar, (void *)&ar3kconfig); status = ar->exitCallback(&ar3kconfig); if (0 != status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to reset AR3K baud rate! \n")); } } // END workaround if (setuphci) ar6000_cleanup_hci(ar); #endif AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Shutting down HTC .... \n")); /* stop HTC */ HTCStop(ar->arHtcTarget); } if (resetok) { /* try to reset the device if we can * The driver may have been configure NOT to reset the target during * a debug session */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Attempting to reset target on instance destroy.... \n")); if (ar->arHifDevice != NULL) { bool coldReset = (ar->arTargetType == TARGET_TYPE_AR6003) ? true: false; ar6000_reset_device(ar->arHifDevice, ar->arTargetType, true, coldReset); } } else { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Host does not want target reset. \n")); } /* Done with cookies */ ar6000_cookie_cleanup(ar); /* cleanup any allocated AMSDU buffers */ ar6000_cleanup_amsdu_rxbufs(ar); } /* * We need to differentiate between the surprise and planned removal of the * device because of the following consideration: * - In case of surprise removal, the hcd already frees up the pending * for the device and hence there is no need to unregister the function * driver inorder to get these requests. For planned removal, the function * driver has to explicitly unregister itself to have the hcd return all the * pending requests before the data structures for the devices are freed up. * Note that as per the current implementation, the function driver will * end up releasing all the devices since there is no API to selectively * release a particular device. * - Certain commands issued to the target can be skipped for surprise * removal since they will anyway not go through. */ void ar6000_destroy(struct net_device *dev, unsigned int unregister) { struct ar6_softc *ar; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("+ar6000_destroy \n")); if((dev == NULL) || ((ar = ar6k_priv(dev)) == NULL)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s(): Failed to get device structure.\n", __func__)); return; } ar->bIsDestroyProgress = true; if (down_interruptible(&ar->arSem)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s(): down_interruptible failed \n", __func__)); return; } if (ar->arWlanPowerState != WLAN_POWER_STATE_CUT_PWR) { /* only stop endpoint if we are not stop it in suspend_ev */ ar6000_stop_endpoint(dev, false, true); } ar->arWlanState = WLAN_DISABLED; if (ar->arHtcTarget != NULL) { /* destroy HTC */ HTCDestroy(ar->arHtcTarget); } if (ar->arHifDevice != NULL) { /*release the device so we do not get called back on remove incase we * we're explicity destroyed by module unload */ HIFReleaseDevice(ar->arHifDevice); HIFShutDownDevice(ar->arHifDevice); } aggr_module_destroy(ar->aggr_cntxt); /* Done with cookies */ ar6000_cookie_cleanup(ar); /* cleanup any allocated AMSDU buffers */ ar6000_cleanup_amsdu_rxbufs(ar); ar6000_sysfs_bmi_deinit(ar); /* Cleanup BMI */ BMICleanup(); /* Clear the tx counters */ memset(tx_attempt, 0, sizeof(tx_attempt)); memset(tx_post, 0, sizeof(tx_post)); memset(tx_complete, 0, sizeof(tx_complete)); #ifdef HTC_RAW_INTERFACE if (ar->arRawHtc) { kfree(ar->arRawHtc); ar->arRawHtc = NULL; } #endif /* Free up the device data structure */ if (unregister && is_netdev_registered) { unregister_netdev(dev); is_netdev_registered = 0; } free_netdev(dev); ar6k_cfg80211_deinit(ar); #ifdef CONFIG_AP_VIRTUL_ADAPTER_SUPPORT ar6000_remove_ap_interface(); #endif /*CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("-ar6000_destroy \n")); } static void disconnect_timer_handler(unsigned long ptr) { struct net_device *dev = (struct net_device *)ptr; struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); A_UNTIMEOUT(&ar->disconnect_timer); ar6000_init_profile_info(ar); ar6000_disconnect(ar); } static void ar6000_detect_error(unsigned long ptr) { struct net_device *dev = (struct net_device *)ptr; struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); WMI_TARGET_ERROR_REPORT_EVENT errEvent; AR6000_SPIN_LOCK(&ar->arLock, 0); if (ar->arHBChallengeResp.outstanding) { ar->arHBChallengeResp.missCnt++; } else { ar->arHBChallengeResp.missCnt = 0; } if (ar->arHBChallengeResp.missCnt > ar->arHBChallengeResp.missThres) { /* Send Error Detect event to the application layer and do not reschedule the error detection module timer */ ar->arHBChallengeResp.missCnt = 0; ar->arHBChallengeResp.seqNum = 0; errEvent.errorVal = WMI_TARGET_COM_ERR | WMI_TARGET_FATAL_ERR; AR6000_SPIN_UNLOCK(&ar->arLock, 0); return; } /* Generate the sequence number for the next challenge */ ar->arHBChallengeResp.seqNum++; ar->arHBChallengeResp.outstanding = true; AR6000_SPIN_UNLOCK(&ar->arLock, 0); /* Send the challenge on the control channel */ if (wmi_get_challenge_resp_cmd(ar->arWmi, ar->arHBChallengeResp.seqNum, DRV_HB_CHALLENGE) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to send heart beat challenge\n")); } /* Reschedule the timer for the next challenge */ A_TIMEOUT_MS(&ar->arHBChallengeResp.timer, ar->arHBChallengeResp.frequency * 1000, 0); } void ar6000_init_profile_info(struct ar6_softc *ar) { ar->arSsidLen = 0; A_MEMZERO(ar->arSsid, sizeof(ar->arSsid)); switch(fwmode) { case HI_OPTION_FW_MODE_IBSS: ar->arNetworkType = ar->arNextMode = ADHOC_NETWORK; break; case HI_OPTION_FW_MODE_BSS_STA: ar->arNetworkType = ar->arNextMode = INFRA_NETWORK; break; case HI_OPTION_FW_MODE_AP: ar->arNetworkType = ar->arNextMode = AP_NETWORK; break; } ar->arDot11AuthMode = OPEN_AUTH; ar->arAuthMode = NONE_AUTH; ar->arPairwiseCrypto = NONE_CRYPT; ar->arPairwiseCryptoLen = 0; ar->arGroupCrypto = NONE_CRYPT; ar->arGroupCryptoLen = 0; A_MEMZERO(ar->arWepKeyList, sizeof(ar->arWepKeyList)); A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid)); A_MEMZERO(ar->arBssid, sizeof(ar->arBssid)); ar->arBssChannel = 0; } static void ar6000_init_control_info(struct ar6_softc *ar) { ar->arWmiEnabled = false; ar6000_init_profile_info(ar); ar->arDefTxKeyIndex = 0; A_MEMZERO(ar->arWepKeyList, sizeof(ar->arWepKeyList)); ar->arChannelHint = 0; ar->arListenIntervalT = A_DEFAULT_LISTEN_INTERVAL; ar->arListenIntervalB = 0; ar->arVersion.host_ver = AR6K_SW_VERSION; ar->arRssi = 0; ar->arTxPwr = 0; ar->arTxPwrSet = false; ar->arSkipScan = 0; ar->arBeaconInterval = 0; ar->arBitRate = 0; ar->arMaxRetries = 0; ar->arWmmEnabled = true; ar->intra_bss = 1; ar->scan_triggered = 0; A_MEMZERO(&ar->scParams, sizeof(ar->scParams)); ar->scParams.shortScanRatio = WMI_SHORTSCANRATIO_DEFAULT; ar->scParams.scanCtrlFlags = DEFAULT_SCAN_CTRL_FLAGS; /* Initialize the AP mode state info */ { u8 ctr; A_MEMZERO((u8 *)ar->sta_list, AP_MAX_NUM_STA * sizeof(sta_t)); /* init the Mutexes */ A_MUTEX_INIT(&ar->mcastpsqLock); /* Init the PS queues */ for (ctr=0; ctr < AP_MAX_NUM_STA ; ctr++) { A_MUTEX_INIT(&ar->sta_list[ctr].psqLock); A_NETBUF_QUEUE_INIT(&ar->sta_list[ctr].psq); } ar->ap_profile_flag = 0; A_NETBUF_QUEUE_INIT(&ar->mcastpsq); memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3); ar->ap_wmode = DEF_AP_WMODE_G; ar->ap_dtim_period = DEF_AP_DTIM; ar->ap_beacon_interval = DEF_BEACON_INTERVAL; } } static int ar6000_open(struct net_device *dev) { unsigned long flags; struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); spin_lock_irqsave(&ar->arLock, flags); if(ar->arWlanState == WLAN_DISABLED) { ar->arWlanState = WLAN_ENABLED; } if( ar->arConnected || bypasswmi) { netif_carrier_on(dev); /* Wake up the queues */ netif_wake_queue(dev); } else netif_carrier_off(dev); spin_unlock_irqrestore(&ar->arLock, flags); return 0; } static int ar6000_close(struct net_device *dev) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); netif_stop_queue(dev); ar6000_disconnect(ar); if(ar->arWmiReady == true) { if (wmi_scanparams_cmd(ar->arWmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0) { return -EIO; } ar->arWlanState = WLAN_DISABLED; } ar6k_cfg80211_scanComplete_event(ar, A_ECANCELED); return 0; } /* connect to a service */ static int ar6000_connectservice(struct ar6_softc *ar, struct htc_service_connect_req *pConnect, char *pDesc) { int status; struct htc_service_connect_resp response; do { A_MEMZERO(&response,sizeof(response)); status = HTCConnectService(ar->arHtcTarget, pConnect, &response); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Failed to connect to %s service status:%d \n", pDesc, status)); break; } switch (pConnect->ServiceID) { case WMI_CONTROL_SVC : if (ar->arWmiEnabled) { /* set control endpoint for WMI use */ wmi_set_control_ep(ar->arWmi, response.Endpoint); } /* save EP for fast lookup */ ar->arControlEp = response.Endpoint; break; case WMI_DATA_BE_SVC : arSetAc2EndpointIDMap(ar, WMM_AC_BE, response.Endpoint); break; case WMI_DATA_BK_SVC : arSetAc2EndpointIDMap(ar, WMM_AC_BK, response.Endpoint); break; case WMI_DATA_VI_SVC : arSetAc2EndpointIDMap(ar, WMM_AC_VI, response.Endpoint); break; case WMI_DATA_VO_SVC : arSetAc2EndpointIDMap(ar, WMM_AC_VO, response.Endpoint); break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ServiceID not mapped %d\n", pConnect->ServiceID)); status = A_EINVAL; break; } } while (false); return status; } void ar6000_TxDataCleanup(struct ar6_softc *ar) { /* flush all the data (non-control) streams * we only flush packets that are tagged as data, we leave any control packets that * were in the TX queues alone */ HTCFlushEndpoint(ar->arHtcTarget, arAc2EndpointID(ar, WMM_AC_BE), AR6K_DATA_PKT_TAG); HTCFlushEndpoint(ar->arHtcTarget, arAc2EndpointID(ar, WMM_AC_BK), AR6K_DATA_PKT_TAG); HTCFlushEndpoint(ar->arHtcTarget, arAc2EndpointID(ar, WMM_AC_VI), AR6K_DATA_PKT_TAG); HTCFlushEndpoint(ar->arHtcTarget, arAc2EndpointID(ar, WMM_AC_VO), AR6K_DATA_PKT_TAG); } HTC_ENDPOINT_ID ar6000_ac2_endpoint_id ( void * devt, u8 ac) { struct ar6_softc *ar = (struct ar6_softc *) devt; return(arAc2EndpointID(ar, ac)); } u8 ar6000_endpoint_id2_ac(void * devt, HTC_ENDPOINT_ID ep ) { struct ar6_softc *ar = (struct ar6_softc *) devt; return(arEndpoint2Ac(ar, ep )); } #if defined(CONFIG_ATH6KL_ENABLE_COEXISTENCE) static int ath6kl_config_btcoex_params(struct ar6_softc *ar) { int r; WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD sbcb_cmd; WMI_SET_BTCOEX_FE_ANT_CMD sbfa_cmd; /* Configure the type of BT collocated with WLAN */ memset(&sbcb_cmd, 0, sizeof(WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD)); sbcb_cmd.btcoexCoLocatedBTdev = ATH6KL_BT_DEV; r = wmi_set_btcoex_colocated_bt_dev_cmd(ar->arWmi, &sbcb_cmd); if (r) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to set collocated BT type\n")); return r; } /* Configure the type of BT collocated with WLAN */ memset(&sbfa_cmd, 0, sizeof(WMI_SET_BTCOEX_FE_ANT_CMD)); sbfa_cmd.btcoexFeAntType = ATH6KL_BT_ANTENNA; r = wmi_set_btcoex_fe_ant_cmd(ar->arWmi, &sbfa_cmd); if (r) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to set fornt end antenna configuration\n")); return r; } return 0; } #else static int ath6kl_config_btcoex_params(struct ar6_softc *ar) { return 0; } #endif /* CONFIG_ATH6KL_ENABLE_COEXISTENCE */ /* * This function applies WLAN specific configuration defined in wlan_config.h */ int ar6000_target_config_wlan_params(struct ar6_softc *ar) { int status = 0; #ifdef CONFIG_HOST_TCMD_SUPPORT if (ar->arTargetMode != AR6000_WLAN_MODE) { return 0; } #endif /* CONFIG_HOST_TCMD_SUPPORT */ /* * configure the device for rx dot11 header rules 0,0 are the default values * therefore this command can be skipped if the inputs are 0,FALSE,FALSE.Required * if checksum offload is needed. Set RxMetaVersion to 2 */ if ((wmi_set_rx_frame_format_cmd(ar->arWmi,ar->rxMetaVersion, processDot11Hdr, processDot11Hdr)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set the rx frame format.\n")); status = A_ERROR; } status = ath6kl_config_btcoex_params(ar); if (status) return status; #if WLAN_CONFIG_IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN if ((wmi_pmparams_cmd(ar->arWmi, 0, 1, 0, 0, 1, IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set power save fail event policy\n")); status = A_ERROR; } #endif #if WLAN_CONFIG_DONOT_IGNORE_BARKER_IN_ERP if ((wmi_set_lpreamble_cmd(ar->arWmi, 0, WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set barker preamble policy\n")); status = A_ERROR; } #endif if ((wmi_set_keepalive_cmd(ar->arWmi, WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set keep alive interval\n")); status = A_ERROR; } #if WLAN_CONFIG_DISABLE_11N { WMI_SET_HT_CAP_CMD htCap; memset(&htCap, 0, sizeof(WMI_SET_HT_CAP_CMD)); htCap.band = 0; if ((wmi_set_ht_cap_cmd(ar->arWmi, &htCap)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set ht capabilities \n")); status = A_ERROR; } htCap.band = 1; if ((wmi_set_ht_cap_cmd(ar->arWmi, &htCap)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set ht capabilities \n")); status = A_ERROR; } } #endif /* WLAN_CONFIG_DISABLE_11N */ #ifdef ATH6K_CONFIG_OTA_MODE if ((wmi_powermode_cmd(ar->arWmi, MAX_PERF_POWER)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set power mode \n")); status = A_ERROR; } #endif if ((wmi_disctimeout_cmd(ar->arWmi, WLAN_CONFIG_DISCONNECT_TIMEOUT)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set disconnect timeout \n")); status = A_ERROR; } #if WLAN_CONFIG_DISABLE_TX_BURSTING if ((wmi_set_wmm_txop(ar->arWmi, WMI_TXOP_DISABLED)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set txop bursting \n")); status = A_ERROR; } #endif return status; } /* This function does one time initialization for the lifetime of the device */ int ar6000_init(struct net_device *dev) { struct ar6_softc *ar; int status; s32 timeleft; s16 i; int ret = 0; if((ar = ar6k_priv(dev)) == NULL) { return -EIO; } if (wlaninitmode == WLAN_INIT_MODE_USR || wlaninitmode == WLAN_INIT_MODE_DRV) { ar6000_update_bdaddr(ar); if (enablerssicompensation) { ar6000_copy_cust_data_from_target(ar->arHifDevice, ar->arTargetType); read_rssi_compensation_param(ar); for (i=-95; i<=0; i++) { rssi_compensation_table[0-i] = rssi_compensation_calc(ar,i); } } } dev_hold(dev); rtnl_unlock(); /* Do we need to finish the BMI phase */ if ((wlaninitmode == WLAN_INIT_MODE_USR || wlaninitmode == WLAN_INIT_MODE_DRV) && (BMIDone(ar->arHifDevice) != 0)) { ret = -EIO; goto ar6000_init_done; } if (!bypasswmi) { #if 0 /* TBDXXX */ if (ar->arVersion.host_ver != ar->arVersion.target_ver) { A_PRINTF("WARNING: Host version 0x%x does not match Target " " version 0x%x!\n", ar->arVersion.host_ver, ar->arVersion.target_ver); } #endif /* Indicate that WMI is enabled (although not ready yet) */ ar->arWmiEnabled = true; if ((ar->arWmi = wmi_init((void *) ar)) == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Failed to initialize WMI.\n", __func__)); ret = -EIO; goto ar6000_init_done; } AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Got WMI @ 0x%lx.\n", __func__, (unsigned long) ar->arWmi)); } do { struct htc_service_connect_req connect; /* the reason we have to wait for the target here is that the driver layer * has to init BMI in order to set the host block size, */ status = HTCWaitTarget(ar->arHtcTarget); if (status) { break; } A_MEMZERO(&connect,sizeof(connect)); /* meta data is unused for now */ connect.pMetaData = NULL; connect.MetaDataLength = 0; /* these fields are the same for all service endpoints */ connect.EpCallbacks.pContext = ar; connect.EpCallbacks.EpTxCompleteMultiple = ar6000_tx_complete; connect.EpCallbacks.EpRecv = ar6000_rx; connect.EpCallbacks.EpRecvRefill = ar6000_rx_refill; connect.EpCallbacks.EpSendFull = ar6000_tx_queue_full; /* set the max queue depth so that our ar6000_tx_queue_full handler gets called. * Linux has the peculiarity of not providing flow control between the * NIC and the network stack. There is no API to indicate that a TX packet * was sent which could provide some back pressure to the network stack. * Under linux you would have to wait till the network stack consumed all sk_buffs * before any back-flow kicked in. Which isn't very friendly. * So we have to manage this ourselves */ connect.MaxSendQueueDepth = MAX_DEFAULT_SEND_QUEUE_DEPTH; connect.EpCallbacks.RecvRefillWaterMark = AR6000_MAX_RX_BUFFERS / 4; /* set to 25 % */ if (0 == connect.EpCallbacks.RecvRefillWaterMark) { connect.EpCallbacks.RecvRefillWaterMark++; } /* connect to control service */ connect.ServiceID = WMI_CONTROL_SVC; status = ar6000_connectservice(ar, &connect, "WMI CONTROL"); if (status) { break; } connect.LocalConnectionFlags |= HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING; /* limit the HTC message size on the send path, although we can receive A-MSDU frames of * 4K, we will only send ethernet-sized (802.3) frames on the send path. */ connect.MaxSendMsgSize = WMI_MAX_TX_DATA_FRAME_LENGTH; /* to reduce the amount of committed memory for larger A_MSDU frames, use the recv-alloc threshold * mechanism for larger packets */ connect.EpCallbacks.RecvAllocThreshold = AR6000_BUFFER_SIZE; connect.EpCallbacks.EpRecvAllocThresh = ar6000_alloc_amsdu_rxbuf; /* for the remaining data services set the connection flag to reduce dribbling, * if configured to do so */ if (reduce_credit_dribble) { connect.ConnectionFlags |= HTC_CONNECT_FLAGS_REDUCE_CREDIT_DRIBBLE; /* the credit dribble trigger threshold is (reduce_credit_dribble - 1) for a value * of 0-3 */ connect.ConnectionFlags &= ~HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_MASK; connect.ConnectionFlags |= ((u16)reduce_credit_dribble - 1) & HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_MASK; } /* connect to best-effort service */ connect.ServiceID = WMI_DATA_BE_SVC; status = ar6000_connectservice(ar, &connect, "WMI DATA BE"); if (status) { break; } /* connect to back-ground * map this to WMI LOW_PRI */ connect.ServiceID = WMI_DATA_BK_SVC; status = ar6000_connectservice(ar, &connect, "WMI DATA BK"); if (status) { break; } /* connect to Video service, map this to * to HI PRI */ connect.ServiceID = WMI_DATA_VI_SVC; status = ar6000_connectservice(ar, &connect, "WMI DATA VI"); if (status) { break; } /* connect to VO service, this is currently not * mapped to a WMI priority stream due to historical reasons. * WMI originally defined 3 priorities over 3 mailboxes * We can change this when WMI is reworked so that priorities are not * dependent on mailboxes */ connect.ServiceID = WMI_DATA_VO_SVC; status = ar6000_connectservice(ar, &connect, "WMI DATA VO"); if (status) { break; } A_ASSERT(arAc2EndpointID(ar,WMM_AC_BE) != 0); A_ASSERT(arAc2EndpointID(ar,WMM_AC_BK) != 0); A_ASSERT(arAc2EndpointID(ar,WMM_AC_VI) != 0); A_ASSERT(arAc2EndpointID(ar,WMM_AC_VO) != 0); /* setup access class priority mappings */ ar->arAcStreamPriMap[WMM_AC_BK] = 0; /* lowest */ ar->arAcStreamPriMap[WMM_AC_BE] = 1; /* */ ar->arAcStreamPriMap[WMM_AC_VI] = 2; /* */ ar->arAcStreamPriMap[WMM_AC_VO] = 3; /* highest */ #ifdef EXPORT_HCI_BRIDGE_INTERFACE if (setuphci && (NULL != ar6kHciTransCallbacks.setupTransport)) { struct hci_transport_misc_handles hciHandles; hciHandles.netDevice = ar->arNetDev; hciHandles.hifDevice = ar->arHifDevice; hciHandles.htcHandle = ar->arHtcTarget; status = (int)(ar6kHciTransCallbacks.setupTransport(&hciHandles)); } #else if (setuphci) { /* setup HCI */ status = ar6000_setup_hci(ar); } #endif } while (false); if (status) { ret = -EIO; goto ar6000_init_done; } if (regscanmode) { u32 param; if (BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIReadMemory forsetting " "regscanmode failed\n")); return A_ERROR; } if (regscanmode == 1) param |= HI_OPTION_SKIP_REG_SCAN; else if (regscanmode == 2) param |= HI_OPTION_INIT_REG_SCAN; if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIWriteMemory forsetting " "regscanmode failed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Regulatory scan mode set\n")); } /* * give our connected endpoints some buffers */ ar6000_rx_refill(ar, ar->arControlEp); ar6000_rx_refill(ar, arAc2EndpointID(ar,WMM_AC_BE)); /* * We will post the receive buffers only for SPE or endpoint ping testing so we are * making it conditional on the 'bypasswmi' flag. */ if (bypasswmi) { ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_BK)); ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_VI)); ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_VO)); } /* allocate some buffers that handle larger AMSDU frames */ ar6000_refill_amsdu_rxbufs(ar,AR6000_MAX_AMSDU_RX_BUFFERS); /* setup credit distribution */ ar6000_setup_credit_dist(ar->arHtcTarget, &ar->arCreditStateInfo); /* Since cookies are used for HTC transports, they should be */ /* initialized prior to enabling HTC. */ ar6000_cookie_init(ar); /* start HTC */ status = HTCStart(ar->arHtcTarget); if (status) { if (ar->arWmiEnabled == true) { wmi_shutdown(ar->arWmi); ar->arWmiEnabled = false; ar->arWmi = NULL; } ar6000_cookie_cleanup(ar); ret = -EIO; goto ar6000_init_done; } if (!bypasswmi) { /* Wait for Wmi event to be ready */ timeleft = wait_event_interruptible_timeout(arEvent, (ar->arWmiReady == true), wmitimeout * HZ); if (ar->arVersion.abi_ver != AR6K_ABI_VERSION) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ABI Version mismatch: Host(0x%x), Target(0x%x)\n", AR6K_ABI_VERSION, ar->arVersion.abi_ver)); #ifndef ATH6K_SKIP_ABI_VERSION_CHECK ret = -EIO; goto ar6000_init_done; #endif /* ATH6K_SKIP_ABI_VERSION_CHECK */ } if(!timeleft || signal_pending(current)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("WMI is not ready or wait was interrupted\n")); ret = -EIO; goto ar6000_init_done; } AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() WMI is ready\n", __func__)); /* Communicate the wmi protocol verision to the target */ if ((ar6000_set_host_app_area(ar)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set the host app area\n")); } ar6000_target_config_wlan_params(ar); } ar->arNumDataEndPts = 1; if (bypasswmi) { /* for tests like endpoint ping, the MAC address needs to be non-zero otherwise * the data path through a raw socket is disabled */ dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x01; dev->dev_addr[2] = 0x02; dev->dev_addr[3] = 0xAA; dev->dev_addr[4] = 0xBB; dev->dev_addr[5] = 0xCC; } ar6000_init_done: rtnl_lock(); dev_put(dev); return ret; } void ar6000_bitrate_rx(void *devt, s32 rateKbps) { struct ar6_softc *ar = (struct ar6_softc *)devt; ar->arBitRate = rateKbps; wake_up(&arEvent); } void ar6000_ratemask_rx(void *devt, u32 ratemask) { struct ar6_softc *ar = (struct ar6_softc *)devt; ar->arRateMask = ratemask; wake_up(&arEvent); } void ar6000_txPwr_rx(void *devt, u8 txPwr) { struct ar6_softc *ar = (struct ar6_softc *)devt; ar->arTxPwr = txPwr; wake_up(&arEvent); } void ar6000_channelList_rx(void *devt, s8 numChan, u16 *chanList) { struct ar6_softc *ar = (struct ar6_softc *)devt; memcpy(ar->arChannelList, chanList, numChan * sizeof (u16)); ar->arNumChannels = numChan; wake_up(&arEvent); } u8 ar6000_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, u32 *mapNo) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); u8 *datap; ATH_MAC_HDR *macHdr; u32 i, eptMap; (*mapNo) = 0; datap = A_NETBUF_DATA(skb); macHdr = (ATH_MAC_HDR *)(datap + sizeof(WMI_DATA_HDR)); if (IEEE80211_IS_MULTICAST(macHdr->dstMac)) { return ENDPOINT_2; } eptMap = -1; for (i = 0; i < ar->arNodeNum; i ++) { if (IEEE80211_ADDR_EQ(macHdr->dstMac, ar->arNodeMap[i].macAddress)) { (*mapNo) = i + 1; ar->arNodeMap[i].txPending ++; return ar->arNodeMap[i].epId; } if ((eptMap == -1) && !ar->arNodeMap[i].txPending) { eptMap = i; } } if (eptMap == -1) { eptMap = ar->arNodeNum; ar->arNodeNum ++; A_ASSERT(ar->arNodeNum <= MAX_NODE_NUM); } memcpy(ar->arNodeMap[eptMap].macAddress, macHdr->dstMac, IEEE80211_ADDR_LEN); for (i = ENDPOINT_2; i <= ENDPOINT_5; i ++) { if (!ar->arTxPending[i]) { ar->arNodeMap[eptMap].epId = i; break; } // No free endpoint is available, start redistribution on the inuse endpoints. if (i == ENDPOINT_5) { ar->arNodeMap[eptMap].epId = ar->arNexEpId; ar->arNexEpId ++; if (ar->arNexEpId > ENDPOINT_5) { ar->arNexEpId = ENDPOINT_2; } } } (*mapNo) = eptMap + 1; ar->arNodeMap[eptMap].txPending ++; return ar->arNodeMap[eptMap].epId; } #ifdef DEBUG static void ar6000_dump_skb(struct sk_buff *skb) { u_char *ch; for (ch = A_NETBUF_DATA(skb); (unsigned long)ch < ((unsigned long)A_NETBUF_DATA(skb) + A_NETBUF_LEN(skb)); ch++) { AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("%2.2x ", *ch)); } AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("\n")); } #endif #ifdef HTC_TEST_SEND_PKTS static void DoHTCSendPktsTest(struct ar6_softc *ar, int MapNo, HTC_ENDPOINT_ID eid, struct sk_buff *skb); #endif static int ar6000_data_tx(struct sk_buff *skb, struct net_device *dev) { #define AC_NOT_MAPPED 99 struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); u8 ac = AC_NOT_MAPPED; HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED; u32 mapNo = 0; int len; struct ar_cookie *cookie; bool checkAdHocPsMapping = false,bMoreData = false; HTC_TX_TAG htc_tag = AR6K_DATA_PKT_TAG; u8 dot11Hdr = processDot11Hdr; #ifdef CONFIG_PM if (ar->arWowState != WLAN_WOW_STATE_NONE) { A_NETBUF_FREE(skb); return 0; } #endif /* CONFIG_PM */ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar6000_data_tx start - skb=0x%lx, data=0x%lx, len=0x%x\n", (unsigned long)skb, (unsigned long)A_NETBUF_DATA(skb), A_NETBUF_LEN(skb))); /* If target is not associated */ if( (!ar->arConnected && !bypasswmi) #ifdef CONFIG_HOST_TCMD_SUPPORT /* TCMD doesn't support any data, free the buf and return */ || (ar->arTargetMode == AR6000_TCMD_MODE) #endif ) { A_NETBUF_FREE(skb); return 0; } do { if (ar->arWmiReady == false && bypasswmi == 0) { break; } #ifdef BLOCK_TX_PATH_FLAG if (blocktx) { break; } #endif /* BLOCK_TX_PATH_FLAG */ /* AP mode Power save processing */ /* If the dst STA is in sleep state, queue the pkt in its PS queue */ if (ar->arNetworkType == AP_NETWORK) { ATH_MAC_HDR *datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb); sta_t *conn = NULL; /* If the dstMac is a Multicast address & atleast one of the * associated STA is in PS mode, then queue the pkt to the * mcastq */ if (IEEE80211_IS_MULTICAST(datap->dstMac)) { u8 ctr=0; bool qMcast=false; for (ctr=0; ctr<AP_MAX_NUM_STA; ctr++) { if (STA_IS_PWR_SLEEP((&ar->sta_list[ctr]))) { qMcast = true; } } if(qMcast) { /* If this transmit is not because of a Dtim Expiry q it */ if (ar->DTIMExpired == false) { bool isMcastqEmpty = false; A_MUTEX_LOCK(&ar->mcastpsqLock); isMcastqEmpty = A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq); A_NETBUF_ENQUEUE(&ar->mcastpsq, skb); A_MUTEX_UNLOCK(&ar->mcastpsqLock); /* If this is the first Mcast pkt getting queued * indicate to the target to set the BitmapControl LSB * of the TIM IE. */ if (isMcastqEmpty) { wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 1); } return 0; } else { /* This transmit is because of Dtim expiry. Determine if * MoreData bit has to be set. */ A_MUTEX_LOCK(&ar->mcastpsqLock); if(!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) { bMoreData = true; } A_MUTEX_UNLOCK(&ar->mcastpsqLock); } } } else { conn = ieee80211_find_conn(ar, datap->dstMac); if (conn) { if (STA_IS_PWR_SLEEP(conn)) { /* If this transmit is not because of a PsPoll q it*/ if (!STA_IS_PS_POLLED(conn)) { bool isPsqEmpty = false; /* Queue the frames if the STA is sleeping */ A_MUTEX_LOCK(&conn->psqLock); isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq); A_NETBUF_ENQUEUE(&conn->psq, skb); A_MUTEX_UNLOCK(&conn->psqLock); /* If this is the first pkt getting queued * for this STA, update the PVB for this STA */ if (isPsqEmpty) { wmi_set_pvb_cmd(ar->arWmi, conn->aid, 1); } return 0; } else { /* This tx is because of a PsPoll. Determine if * MoreData bit has to be set */ A_MUTEX_LOCK(&conn->psqLock); if (!A_NETBUF_QUEUE_EMPTY(&conn->psq)) { bMoreData = true; } A_MUTEX_UNLOCK(&conn->psqLock); } } } else { /* non existent STA. drop the frame */ A_NETBUF_FREE(skb); return 0; } } } if (ar->arWmiEnabled) { u8 csumStart=0; u8 csumDest=0; u8 csum=skb->ip_summed; if(csumOffload && (csum==CHECKSUM_PARTIAL)){ csumStart = (skb->head + skb->csum_start - skb_network_header(skb) + sizeof(ATH_LLC_SNAP_HDR)); csumDest=skb->csum_offset+csumStart; } if (A_NETBUF_HEADROOM(skb) < dev->hard_header_len - LINUX_HACK_FUDGE_FACTOR) { struct sk_buff *newbuf; /* * We really should have gotten enough headroom but sometimes * we still get packets with not enough headroom. Copy the packet. */ len = A_NETBUF_LEN(skb); newbuf = A_NETBUF_ALLOC(len); if (newbuf == NULL) { break; } A_NETBUF_PUT(newbuf, len); memcpy(A_NETBUF_DATA(newbuf), A_NETBUF_DATA(skb), len); A_NETBUF_FREE(skb); skb = newbuf; /* fall through and assemble header */ } if (dot11Hdr) { if (wmi_dot11_hdr_add(ar->arWmi,skb,ar->arNetworkType) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx-wmi_dot11_hdr_add failed\n")); break; } } else { if (wmi_dix_2_dot3(ar->arWmi, skb) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_dix_2_dot3 failed\n")); break; } } if(csumOffload && (csum ==CHECKSUM_PARTIAL)){ WMI_TX_META_V2 metaV2; metaV2.csumStart =csumStart; metaV2.csumDest = csumDest; metaV2.csumFlags = 0x1;/*instruct target to calculate checksum*/ if (wmi_data_hdr_add(ar->arWmi, skb, DATA_MSGTYPE, bMoreData, dot11Hdr, WMI_META_VERSION_2,&metaV2) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_data_hdr_add failed\n")); break; } } else { if (wmi_data_hdr_add(ar->arWmi, skb, DATA_MSGTYPE, bMoreData, dot11Hdr,0,NULL) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_data_hdr_add failed\n")); break; } } if ((ar->arNetworkType == ADHOC_NETWORK) && ar->arIbssPsEnable && ar->arConnected) { /* flag to check adhoc mapping once we take the lock below: */ checkAdHocPsMapping = true; } else { /* get the stream mapping */ ac = wmi_implicit_create_pstream(ar->arWmi, skb, 0, ar->arWmmEnabled); } } else { EPPING_HEADER *eppingHdr; eppingHdr = A_NETBUF_DATA(skb); if (IS_EPPING_PACKET(eppingHdr)) { /* the stream ID is mapped to an access class */ ac = eppingHdr->StreamNo_h; /* some EPPING packets cannot be dropped no matter what access class it was * sent on. We can change the packet tag to guarantee it will not get dropped */ if (IS_EPING_PACKET_NO_DROP(eppingHdr)) { htc_tag = AR6K_CONTROL_PKT_TAG; } if (ac == HCI_TRANSPORT_STREAM_NUM) { /* pass this to HCI */ #ifndef EXPORT_HCI_BRIDGE_INTERFACE if (!hci_test_send(ar,skb)) { return 0; } #endif /* set AC to discard this skb */ ac = AC_NOT_MAPPED; } else { /* a quirk of linux, the payload of the frame is 32-bit aligned and thus the addition * of the HTC header will mis-align the start of the HTC frame, so we add some * padding which will be stripped off in the target */ if (EPPING_ALIGNMENT_PAD > 0) { A_NETBUF_PUSH(skb, EPPING_ALIGNMENT_PAD); } } } else { /* not a ping packet, drop it */ ac = AC_NOT_MAPPED; } } } while (false); /* did we succeed ? */ if ((ac == AC_NOT_MAPPED) && !checkAdHocPsMapping) { /* cleanup and exit */ A_NETBUF_FREE(skb); AR6000_STAT_INC(ar, tx_dropped); AR6000_STAT_INC(ar, tx_aborted_errors); return 0; } cookie = NULL; /* take the lock to protect driver data */ AR6000_SPIN_LOCK(&ar->arLock, 0); do { if (checkAdHocPsMapping) { eid = ar6000_ibss_map_epid(skb, dev, &mapNo); }else { eid = arAc2EndpointID (ar, ac); } /* validate that the endpoint is connected */ if (eid == 0 || eid == ENDPOINT_UNUSED ) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" eid %d is NOT mapped!\n", eid)); break; } /* allocate resource for this packet */ cookie = ar6000_alloc_cookie(ar); if (cookie != NULL) { /* update counts while the lock is held */ ar->arTxPending[eid]++; ar->arTotalTxDataPending++; } } while (false); AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (cookie != NULL) { cookie->arc_bp[0] = (unsigned long)skb; cookie->arc_bp[1] = mapNo; SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, cookie, A_NETBUF_DATA(skb), A_NETBUF_LEN(skb), eid, htc_tag); #ifdef DEBUG if (debugdriver >= 3) { ar6000_dump_skb(skb); } #endif #ifdef HTC_TEST_SEND_PKTS DoHTCSendPktsTest(ar,mapNo,eid,skb); #endif /* HTC interface is asynchronous, if this fails, cleanup will happen in * the ar6000_tx_complete callback */ HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt); } else { /* no packet to send, cleanup */ A_NETBUF_FREE(skb); AR6000_STAT_INC(ar, tx_dropped); AR6000_STAT_INC(ar, tx_aborted_errors); } return 0; } int ar6000_acl_data_tx(struct sk_buff *skb, struct net_device *dev) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); struct ar_cookie *cookie; HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED; cookie = NULL; AR6000_SPIN_LOCK(&ar->arLock, 0); /* For now we send ACL on BE endpoint: We can also have a dedicated EP */ eid = arAc2EndpointID (ar, 0); /* allocate resource for this packet */ cookie = ar6000_alloc_cookie(ar); if (cookie != NULL) { /* update counts while the lock is held */ ar->arTxPending[eid]++; ar->arTotalTxDataPending++; } AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (cookie != NULL) { cookie->arc_bp[0] = (unsigned long)skb; cookie->arc_bp[1] = 0; SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, cookie, A_NETBUF_DATA(skb), A_NETBUF_LEN(skb), eid, AR6K_DATA_PKT_TAG); /* HTC interface is asynchronous, if this fails, cleanup will happen in * the ar6000_tx_complete callback */ HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt); } else { /* no packet to send, cleanup */ A_NETBUF_FREE(skb); AR6000_STAT_INC(ar, tx_dropped); AR6000_STAT_INC(ar, tx_aborted_errors); } return 0; } #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL static void tvsub(register struct timeval *out, register struct timeval *in) { if((out->tv_usec -= in->tv_usec) < 0) { out->tv_sec--; out->tv_usec += 1000000; } out->tv_sec -= in->tv_sec; } void applyAPTCHeuristics(struct ar6_softc *ar) { u32 duration; u32 numbytes; u32 throughput; struct timeval ts; int status; AR6000_SPIN_LOCK(&ar->arLock, 0); if ((enableAPTCHeuristics) && (!aptcTR.timerScheduled)) { do_gettimeofday(&ts); tvsub(&ts, &aptcTR.samplingTS); duration = ts.tv_sec * 1000 + ts.tv_usec / 1000; /* ms */ numbytes = aptcTR.bytesTransmitted + aptcTR.bytesReceived; if (duration > APTC_TRAFFIC_SAMPLING_INTERVAL) { /* Initialize the time stamp and byte count */ aptcTR.bytesTransmitted = aptcTR.bytesReceived = 0; do_gettimeofday(&aptcTR.samplingTS); /* Calculate and decide based on throughput thresholds */ throughput = ((numbytes * 8) / duration); if (throughput > APTC_UPPER_THROUGHPUT_THRESHOLD) { /* Disable Sleep and schedule a timer */ A_ASSERT(ar->arWmiReady == true); AR6000_SPIN_UNLOCK(&ar->arLock, 0); status = wmi_powermode_cmd(ar->arWmi, MAX_PERF_POWER); AR6000_SPIN_LOCK(&ar->arLock, 0); A_TIMEOUT_MS(&aptcTimer, APTC_TRAFFIC_SAMPLING_INTERVAL, 0); aptcTR.timerScheduled = true; } } } AR6000_SPIN_UNLOCK(&ar->arLock, 0); } #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ static HTC_SEND_FULL_ACTION ar6000_tx_queue_full(void *Context, struct htc_packet *pPacket) { struct ar6_softc *ar = (struct ar6_softc *)Context; HTC_SEND_FULL_ACTION action = HTC_SEND_FULL_KEEP; bool stopNet = false; HTC_ENDPOINT_ID Endpoint = HTC_GET_ENDPOINT_FROM_PKT(pPacket); do { if (bypasswmi) { int accessClass; if (HTC_GET_TAG_FROM_PKT(pPacket) == AR6K_CONTROL_PKT_TAG) { /* don't drop special control packets */ break; } accessClass = arEndpoint2Ac(ar,Endpoint); /* for endpoint ping testing drop Best Effort and Background */ if ((accessClass == WMM_AC_BE) || (accessClass == WMM_AC_BK)) { action = HTC_SEND_FULL_DROP; stopNet = false; } else { /* keep but stop the netqueues */ stopNet = true; } break; } if (Endpoint == ar->arControlEp) { /* under normal WMI if this is getting full, then something is running rampant * the host should not be exhausting the WMI queue with too many commands * the only exception to this is during testing using endpointping */ AR6000_SPIN_LOCK(&ar->arLock, 0); /* set flag to handle subsequent messages */ ar->arWMIControlEpFull = true; AR6000_SPIN_UNLOCK(&ar->arLock, 0); AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("WMI Control Endpoint is FULL!!! \n")); /* no need to stop the network */ stopNet = false; break; } /* if we get here, we are dealing with data endpoints getting full */ if (HTC_GET_TAG_FROM_PKT(pPacket) == AR6K_CONTROL_PKT_TAG) { /* don't drop control packets issued on ANY data endpoint */ break; } if (ar->arNetworkType == ADHOC_NETWORK) { /* in adhoc mode, we cannot differentiate traffic priorities so there is no need to * continue, however we should stop the network */ stopNet = true; break; } /* the last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for the highest * active stream */ if (ar->arAcStreamPriMap[arEndpoint2Ac(ar,Endpoint)] < ar->arHiAcStreamActivePri && ar->arCookieCount <= MAX_HI_COOKIE_NUM) { /* this stream's priority is less than the highest active priority, we * give preference to the highest priority stream by directing * HTC to drop the packet that overflowed */ action = HTC_SEND_FULL_DROP; /* since we are dropping packets, no need to stop the network */ stopNet = false; break; } } while (false); if (stopNet) { AR6000_SPIN_LOCK(&ar->arLock, 0); ar->arNetQueueStopped = true; AR6000_SPIN_UNLOCK(&ar->arLock, 0); /* one of the data endpoints queues is getting full..need to stop network stack * the queue will resume in ar6000_tx_complete() */ netif_stop_queue(ar->arNetDev); } return action; } static void ar6000_tx_complete(void *Context, struct htc_packet_queue *pPacketQueue) { struct ar6_softc *ar = (struct ar6_softc *)Context; u32 mapNo = 0; int status; struct ar_cookie * ar_cookie; HTC_ENDPOINT_ID eid; bool wakeEvent = false; struct sk_buff_head skb_queue; struct htc_packet *pPacket; struct sk_buff *pktSkb; bool flushing = false; skb_queue_head_init(&skb_queue); /* lock the driver as we update internal state */ AR6000_SPIN_LOCK(&ar->arLock, 0); /* reap completed packets */ while (!HTC_QUEUE_EMPTY(pPacketQueue)) { pPacket = HTC_PACKET_DEQUEUE(pPacketQueue); ar_cookie = (struct ar_cookie *)pPacket->pPktContext; A_ASSERT(ar_cookie); status = pPacket->Status; pktSkb = (struct sk_buff *)ar_cookie->arc_bp[0]; eid = pPacket->Endpoint; mapNo = ar_cookie->arc_bp[1]; A_ASSERT(pktSkb); A_ASSERT(pPacket->pBuffer == A_NETBUF_DATA(pktSkb)); /* add this to the list, use faster non-lock API */ __skb_queue_tail(&skb_queue,pktSkb); if (!status) { A_ASSERT(pPacket->ActualLength == A_NETBUF_LEN(pktSkb)); } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar6000_tx_complete skb=0x%lx data=0x%lx len=0x%x eid=%d ", (unsigned long)pktSkb, (unsigned long)pPacket->pBuffer, pPacket->ActualLength, eid)); ar->arTxPending[eid]--; if ((eid != ar->arControlEp) || bypasswmi) { ar->arTotalTxDataPending--; } if (eid == ar->arControlEp) { if (ar->arWMIControlEpFull) { /* since this packet completed, the WMI EP is no longer full */ ar->arWMIControlEpFull = false; } if (ar->arTxPending[eid] == 0) { wakeEvent = true; } } if (status) { if (status == A_ECANCELED) { /* a packet was flushed */ flushing = true; } AR6000_STAT_INC(ar, tx_errors); if (status != A_NO_RESOURCE) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() -TX ERROR, status: 0x%x\n", __func__, status)); } } else { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("OK\n")); flushing = false; AR6000_STAT_INC(ar, tx_packets); ar->arNetStats.tx_bytes += A_NETBUF_LEN(pktSkb); #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL aptcTR.bytesTransmitted += a_netbuf_to_len(pktSkb); applyAPTCHeuristics(ar); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ } // TODO this needs to be looked at if ((ar->arNetworkType == ADHOC_NETWORK) && ar->arIbssPsEnable && (eid != ar->arControlEp) && mapNo) { mapNo --; ar->arNodeMap[mapNo].txPending --; if (!ar->arNodeMap[mapNo].txPending && (mapNo == (ar->arNodeNum - 1))) { u32 i; for (i = ar->arNodeNum; i > 0; i --) { if (!ar->arNodeMap[i - 1].txPending) { A_MEMZERO(&ar->arNodeMap[i - 1], sizeof(struct ar_node_mapping)); ar->arNodeNum --; } else { break; } } } } ar6000_free_cookie(ar, ar_cookie); if (ar->arNetQueueStopped) { ar->arNetQueueStopped = false; } } AR6000_SPIN_UNLOCK(&ar->arLock, 0); /* lock is released, we can freely call other kernel APIs */ /* free all skbs in our local list */ while (!skb_queue_empty(&skb_queue)) { /* use non-lock version */ pktSkb = __skb_dequeue(&skb_queue); A_NETBUF_FREE(pktSkb); } if ((ar->arConnected == true) || bypasswmi) { if (!flushing) { /* don't wake the queue if we are flushing, other wise it will just * keep queueing packets, which will keep failing */ netif_wake_queue(ar->arNetDev); } } if (wakeEvent) { wake_up(&arEvent); } } sta_t * ieee80211_find_conn(struct ar6_softc *ar, u8 *node_addr) { sta_t *conn = NULL; u8 i, max_conn; switch(ar->arNetworkType) { case AP_NETWORK: max_conn = AP_MAX_NUM_STA; break; default: max_conn=0; break; } for (i = 0; i < max_conn; i++) { if (IEEE80211_ADDR_EQ(node_addr, ar->sta_list[i].mac)) { conn = &ar->sta_list[i]; break; } } return conn; } sta_t *ieee80211_find_conn_for_aid(struct ar6_softc *ar, u8 aid) { sta_t *conn = NULL; u8 ctr; for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { if (ar->sta_list[ctr].aid == aid) { conn = &ar->sta_list[ctr]; break; } } return conn; } /* * Receive event handler. This is called by HTC when a packet is received */ int pktcount; static void ar6000_rx(void *Context, struct htc_packet *pPacket) { struct ar6_softc *ar = (struct ar6_softc *)Context; struct sk_buff *skb = (struct sk_buff *)pPacket->pPktContext; int minHdrLen; u8 containsDot11Hdr = 0; int status = pPacket->Status; HTC_ENDPOINT_ID ept = pPacket->Endpoint; A_ASSERT((status) || (pPacket->pBuffer == (A_NETBUF_DATA(skb) + HTC_HEADER_LEN))); AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_rx ar=0x%lx eid=%d, skb=0x%lx, data=0x%lx, len=0x%x status:%d", (unsigned long)ar, ept, (unsigned long)skb, (unsigned long)pPacket->pBuffer, pPacket->ActualLength, status)); if (status) { if (status != A_ECANCELED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("RX ERR (%d) \n",status)); } } /* take lock to protect buffer counts * and adaptive power throughput state */ AR6000_SPIN_LOCK(&ar->arLock, 0); if (!status) { AR6000_STAT_INC(ar, rx_packets); ar->arNetStats.rx_bytes += pPacket->ActualLength; #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL aptcTR.bytesReceived += a_netbuf_to_len(skb); applyAPTCHeuristics(ar); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ A_NETBUF_PUT(skb, pPacket->ActualLength + HTC_HEADER_LEN); A_NETBUF_PULL(skb, HTC_HEADER_LEN); #ifdef DEBUG if (debugdriver >= 2) { ar6000_dump_skb(skb); } #endif /* DEBUG */ } AR6000_SPIN_UNLOCK(&ar->arLock, 0); skb->dev = ar->arNetDev; if (status) { AR6000_STAT_INC(ar, rx_errors); A_NETBUF_FREE(skb); } else if (ar->arWmiEnabled == true) { if (ept == ar->arControlEp) { /* * this is a wmi control msg */ #ifdef CONFIG_PM ar6000_check_wow_status(ar, skb, true); #endif /* CONFIG_PM */ wmi_control_rx(ar->arWmi, skb); } else { WMI_DATA_HDR *dhdr = (WMI_DATA_HDR *)A_NETBUF_DATA(skb); bool is_amsdu; u8 tid; /* * This check can be removed if after a while we do not * see the warning. For now we leave it to ensure * we drop these frames accordingly in case the * target generates them for some reason. These * were used for an internal PAL but that's not * used or supported anymore. These frames should * not come up from the target. */ if (WARN_ON(WMI_DATA_HDR_GET_DATA_TYPE(dhdr) == WMI_DATA_HDR_DATA_TYPE_ACL)) { AR6000_STAT_INC(ar, rx_errors); A_NETBUF_FREE(skb); return; } #ifdef CONFIG_PM ar6000_check_wow_status(ar, NULL, false); #endif /* CONFIG_PM */ /* * this is a wmi data packet */ // NWF if (processDot11Hdr) { minHdrLen = sizeof(WMI_DATA_HDR) + sizeof(struct ieee80211_frame) + sizeof(ATH_LLC_SNAP_HDR); } else { minHdrLen = sizeof (WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR); } /* In the case of AP mode we may receive NULL data frames * that do not have LLC hdr. They are 16 bytes in size. * Allow these frames in the AP mode. * ACL data frames don't follow ethernet frame bounds for * min length */ if (ar->arNetworkType != AP_NETWORK && ((pPacket->ActualLength < minHdrLen) || (pPacket->ActualLength > AR6000_MAX_RX_MESSAGE_SIZE))) { /* * packet is too short or too long */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("TOO SHORT or TOO LONG\n")); AR6000_STAT_INC(ar, rx_errors); AR6000_STAT_INC(ar, rx_length_errors); A_NETBUF_FREE(skb); } else { u16 seq_no; u8 meta_type; #if 0 /* Access RSSI values here */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("RSSI %d\n", ((WMI_DATA_HDR *) A_NETBUF_DATA(skb))->rssi)); #endif /* Get the Power save state of the STA */ if (ar->arNetworkType == AP_NETWORK) { sta_t *conn = NULL; u8 psState=0,prevPsState; ATH_MAC_HDR *datap=NULL; u16 offset; meta_type = WMI_DATA_HDR_GET_META(dhdr); psState = (((WMI_DATA_HDR *)A_NETBUF_DATA(skb))->info >> WMI_DATA_HDR_PS_SHIFT) & WMI_DATA_HDR_PS_MASK; offset = sizeof(WMI_DATA_HDR); switch (meta_type) { case 0: break; case WMI_META_VERSION_1: offset += sizeof(WMI_RX_META_V1); break; case WMI_META_VERSION_2: offset += sizeof(WMI_RX_META_V2); break; default: break; } datap = (ATH_MAC_HDR *)(A_NETBUF_DATA(skb)+offset); conn = ieee80211_find_conn(ar, datap->srcMac); if (conn) { /* if there is a change in PS state of the STA, * take appropriate steps. * 1. If Sleep-->Awake, flush the psq for the STA * Clear the PVB for the STA. * 2. If Awake-->Sleep, Starting queueing frames * the STA. */ prevPsState = STA_IS_PWR_SLEEP(conn); if (psState) { STA_SET_PWR_SLEEP(conn); } else { STA_CLR_PWR_SLEEP(conn); } if (prevPsState ^ STA_IS_PWR_SLEEP(conn)) { if (!STA_IS_PWR_SLEEP(conn)) { A_MUTEX_LOCK(&conn->psqLock); while (!A_NETBUF_QUEUE_EMPTY(&conn->psq)) { struct sk_buff *skb=NULL; skb = A_NETBUF_DEQUEUE(&conn->psq); A_MUTEX_UNLOCK(&conn->psqLock); ar6000_data_tx(skb,ar->arNetDev); A_MUTEX_LOCK(&conn->psqLock); } A_MUTEX_UNLOCK(&conn->psqLock); /* Clear the PVB for this STA */ wmi_set_pvb_cmd(ar->arWmi, conn->aid, 0); } } } else { /* This frame is from a STA that is not associated*/ A_ASSERT(false); } /* Drop NULL data frames here */ if((pPacket->ActualLength < minHdrLen) || (pPacket->ActualLength > AR6000_MAX_RX_MESSAGE_SIZE)) { A_NETBUF_FREE(skb); goto rx_done; } } is_amsdu = WMI_DATA_HDR_IS_AMSDU(dhdr) ? true : false; tid = WMI_DATA_HDR_GET_UP(dhdr); seq_no = WMI_DATA_HDR_GET_SEQNO(dhdr); meta_type = WMI_DATA_HDR_GET_META(dhdr); containsDot11Hdr = WMI_DATA_HDR_GET_DOT11(dhdr); wmi_data_hdr_remove(ar->arWmi, skb); switch (meta_type) { case WMI_META_VERSION_1: { WMI_RX_META_V1 *pMeta = (WMI_RX_META_V1 *)A_NETBUF_DATA(skb); A_PRINTF("META %d %d %d %d %x\n", pMeta->status, pMeta->rix, pMeta->rssi, pMeta->channel, pMeta->flags); A_NETBUF_PULL((void*)skb, sizeof(WMI_RX_META_V1)); break; } case WMI_META_VERSION_2: { WMI_RX_META_V2 *pMeta = (WMI_RX_META_V2 *)A_NETBUF_DATA(skb); if(pMeta->csumFlags & 0x1){ skb->ip_summed=CHECKSUM_COMPLETE; skb->csum=(pMeta->csum); } A_NETBUF_PULL((void*)skb, sizeof(WMI_RX_META_V2)); break; } default: break; } A_ASSERT(status == 0); /* NWF: print the 802.11 hdr bytes */ if(containsDot11Hdr) { status = wmi_dot11_hdr_remove(ar->arWmi,skb); } else if(!is_amsdu) { status = wmi_dot3_2_dix(skb); } if (status) { /* Drop frames that could not be processed (lack of memory, etc.) */ A_NETBUF_FREE(skb); goto rx_done; } if ((ar->arNetDev->flags & IFF_UP) == IFF_UP) { if (ar->arNetworkType == AP_NETWORK) { struct sk_buff *skb1 = NULL; ATH_MAC_HDR *datap; datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb); if (IEEE80211_IS_MULTICAST(datap->dstMac)) { /* Bcast/Mcast frames should be sent to the OS * stack as well as on the air. */ skb1 = skb_copy(skb,GFP_ATOMIC); } else { /* Search for a connected STA with dstMac as * the Mac address. If found send the frame to * it on the air else send the frame up the * stack */ sta_t *conn = NULL; conn = ieee80211_find_conn(ar, datap->dstMac); if (conn && ar->intra_bss) { skb1 = skb; skb = NULL; } else if(conn && !ar->intra_bss) { A_NETBUF_FREE(skb); skb = NULL; } } if (skb1) { ar6000_data_tx(skb1, ar->arNetDev); } } } aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, is_amsdu, (void **)&skb); ar6000_deliver_frames_to_nw_stack((void *) ar->arNetDev, (void *)skb); } } } else { if (EPPING_ALIGNMENT_PAD > 0) { A_NETBUF_PULL(skb, EPPING_ALIGNMENT_PAD); } ar6000_deliver_frames_to_nw_stack((void *)ar->arNetDev, (void *)skb); } rx_done: return; } static void ar6000_deliver_frames_to_nw_stack(void *dev, void *osbuf) { struct sk_buff *skb = (struct sk_buff *)osbuf; if(skb) { skb->dev = dev; if ((skb->dev->flags & IFF_UP) == IFF_UP) { #ifdef CONFIG_PM ar6000_check_wow_status((struct ar6_softc *)ar6k_priv(dev), skb, false); #endif /* CONFIG_PM */ skb->protocol = eth_type_trans(skb, skb->dev); /* * If this routine is called on a ISR (Hard IRQ) or DSR (Soft IRQ) * or tasklet use the netif_rx to deliver the packet to the stack * netif_rx will queue the packet onto the receive queue and mark * the softirq thread has a pending action to complete. Kernel will * schedule the softIrq kernel thread after processing the DSR. * * If this routine is called on a process context, use netif_rx_ni * which will schedle the softIrq kernel thread after queuing the packet. */ if (in_interrupt()) { netif_rx(skb); } else { netif_rx_ni(skb); } } else { A_NETBUF_FREE(skb); } } } #if 0 static void ar6000_deliver_frames_to_bt_stack(void *dev, void *osbuf) { struct sk_buff *skb = (struct sk_buff *)osbuf; if(skb) { skb->dev = dev; if ((skb->dev->flags & IFF_UP) == IFF_UP) { skb->protocol = htons(ETH_P_CONTROL); netif_rx(skb); } else { A_NETBUF_FREE(skb); } } } #endif static void ar6000_rx_refill(void *Context, HTC_ENDPOINT_ID Endpoint) { struct ar6_softc *ar = (struct ar6_softc *)Context; void *osBuf; int RxBuffers; int buffersToRefill; struct htc_packet *pPacket; struct htc_packet_queue queue; buffersToRefill = (int)AR6000_MAX_RX_BUFFERS - HTCGetNumRecvBuffers(ar->arHtcTarget, Endpoint); if (buffersToRefill <= 0) { /* fast return, nothing to fill */ return; } INIT_HTC_PACKET_QUEUE(&queue); AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_rx_refill: providing htc with %d buffers at eid=%d\n", buffersToRefill, Endpoint)); for (RxBuffers = 0; RxBuffers < buffersToRefill; RxBuffers++) { osBuf = A_NETBUF_ALLOC(AR6000_BUFFER_SIZE); if (NULL == osBuf) { break; } /* the HTC packet wrapper is at the head of the reserved area * in the skb */ pPacket = (struct htc_packet *)(A_NETBUF_HEAD(osBuf)); /* set re-fill info */ SET_HTC_PACKET_INFO_RX_REFILL(pPacket,osBuf,A_NETBUF_DATA(osBuf),AR6000_BUFFER_SIZE,Endpoint); /* add to queue */ HTC_PACKET_ENQUEUE(&queue,pPacket); } if (!HTC_QUEUE_EMPTY(&queue)) { /* add packets */ HTCAddReceivePktMultiple(ar->arHtcTarget, &queue); } } /* clean up our amsdu buffer list */ static void ar6000_cleanup_amsdu_rxbufs(struct ar6_softc *ar) { struct htc_packet *pPacket; void *osBuf; /* empty AMSDU buffer queue and free OS bufs */ while (true) { AR6000_SPIN_LOCK(&ar->arLock, 0); pPacket = HTC_PACKET_DEQUEUE(&ar->amsdu_rx_buffer_queue); AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (NULL == pPacket) { break; } osBuf = pPacket->pPktContext; if (NULL == osBuf) { A_ASSERT(false); break; } A_NETBUF_FREE(osBuf); } } /* refill the amsdu buffer list */ static void ar6000_refill_amsdu_rxbufs(struct ar6_softc *ar, int Count) { struct htc_packet *pPacket; void *osBuf; while (Count > 0) { osBuf = A_NETBUF_ALLOC(AR6000_AMSDU_BUFFER_SIZE); if (NULL == osBuf) { break; } /* the HTC packet wrapper is at the head of the reserved area * in the skb */ pPacket = (struct htc_packet *)(A_NETBUF_HEAD(osBuf)); /* set re-fill info */ SET_HTC_PACKET_INFO_RX_REFILL(pPacket,osBuf,A_NETBUF_DATA(osBuf),AR6000_AMSDU_BUFFER_SIZE,0); AR6000_SPIN_LOCK(&ar->arLock, 0); /* put it in the list */ HTC_PACKET_ENQUEUE(&ar->amsdu_rx_buffer_queue,pPacket); AR6000_SPIN_UNLOCK(&ar->arLock, 0); Count--; } } /* callback to allocate a large receive buffer for a pending packet. This function is called when * an HTC packet arrives whose length exceeds a threshold value * * We use a pre-allocated list of buffers of maximum AMSDU size (4K). Under linux it is more optimal to * keep the allocation size the same to optimize cached-slab allocations. * * */ static struct htc_packet *ar6000_alloc_amsdu_rxbuf(void *Context, HTC_ENDPOINT_ID Endpoint, int Length) { struct htc_packet *pPacket = NULL; struct ar6_softc *ar = (struct ar6_softc *)Context; int refillCount = 0; AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_alloc_amsdu_rxbuf: eid=%d, Length:%d\n",Endpoint,Length)); do { if (Length <= AR6000_BUFFER_SIZE) { /* shouldn't be getting called on normal sized packets */ A_ASSERT(false); break; } if (Length > AR6000_AMSDU_BUFFER_SIZE) { A_ASSERT(false); break; } AR6000_SPIN_LOCK(&ar->arLock, 0); /* allocate a packet from the list */ pPacket = HTC_PACKET_DEQUEUE(&ar->amsdu_rx_buffer_queue); /* see if we need to refill again */ refillCount = AR6000_MAX_AMSDU_RX_BUFFERS - HTC_PACKET_QUEUE_DEPTH(&ar->amsdu_rx_buffer_queue); AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (NULL == pPacket) { break; } /* set actual endpoint ID */ pPacket->Endpoint = Endpoint; } while (false); if (refillCount >= AR6000_AMSDU_REFILL_THRESHOLD) { ar6000_refill_amsdu_rxbufs(ar,refillCount); } return pPacket; } static void ar6000_set_multicast_list(struct net_device *dev) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000: Multicast filter not supported\n")); } static struct net_device_stats * ar6000_get_stats(struct net_device *dev) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); return &ar->arNetStats; } void ar6000_ready_event(void *devt, u8 *datap, u8 phyCap, u32 sw_ver, u32 abi_ver) { struct ar6_softc *ar = (struct ar6_softc *)devt; struct net_device *dev = ar->arNetDev; memcpy(dev->dev_addr, datap, AR6000_ETH_ADDR_LEN); AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("mac address = %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5])); ar->arPhyCapability = phyCap; ar->arVersion.wlan_ver = sw_ver; ar->arVersion.abi_ver = abi_ver; /* Indicate to the waiting thread that the ready event was received */ ar->arWmiReady = true; wake_up(&arEvent); } void ar6000_install_static_wep_keys(struct ar6_softc *ar) { u8 index; u8 keyUsage; for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) { if (ar->arWepKeyList[index].arKeyLen) { keyUsage = GROUP_USAGE; if (index == ar->arDefTxKeyIndex) { keyUsage |= TX_USAGE; } wmi_addKey_cmd(ar->arWmi, index, WEP_CRYPT, keyUsage, ar->arWepKeyList[index].arKeyLen, NULL, ar->arWepKeyList[index].arKey, KEY_OP_INIT_VAL, NULL, NO_SYNC_WMIFLAG); } } } void add_new_sta(struct ar6_softc *ar, u8 *mac, u16 aid, u8 *wpaie, u8 ielen, u8 keymgmt, u8 ucipher, u8 auth) { u8 free_slot=aid-1; memcpy(ar->sta_list[free_slot].mac, mac, ATH_MAC_LEN); memcpy(ar->sta_list[free_slot].wpa_ie, wpaie, ielen); ar->sta_list[free_slot].aid = aid; ar->sta_list[free_slot].keymgmt = keymgmt; ar->sta_list[free_slot].ucipher = ucipher; ar->sta_list[free_slot].auth = auth; ar->sta_list_index = ar->sta_list_index | (1 << free_slot); ar->arAPStats.sta[free_slot].aid = aid; } void ar6000_connect_event(struct ar6_softc *ar, u16 channel, u8 *bssid, u16 listenInterval, u16 beaconInterval, NETWORK_TYPE networkType, u8 beaconIeLen, u8 assocReqLen, u8 assocRespLen, u8 *assocInfo) { union iwreq_data wrqu; int i, beacon_ie_pos, assoc_resp_ie_pos, assoc_req_ie_pos; static const char *tag1 = "ASSOCINFO(ReqIEs="; static const char *tag2 = "ASSOCRESPIE="; static const char *beaconIetag = "BEACONIE="; char buf[WMI_CONTROL_MSG_MAX_LEN * 2 + strlen(tag1) + 1]; char *pos; u8 key_op_ctrl; unsigned long flags; struct ieee80211req_key *ik; CRYPTO_TYPE keyType = NONE_CRYPT; if(ar->arNetworkType & AP_NETWORK) { struct net_device *dev = ar->arNetDev; if(memcmp(dev->dev_addr, bssid, ATH_MAC_LEN)==0) { ar->arACS = channel; ik = &ar->ap_mode_bkey; switch(ar->arAuthMode) { case NONE_AUTH: if(ar->arPairwiseCrypto == WEP_CRYPT) { ar6000_install_static_wep_keys(ar); } #ifdef WAPI_ENABLE else if(ar->arPairwiseCrypto == WAPI_CRYPT) { ap_set_wapi_key(ar, ik); } #endif break; case WPA_PSK_AUTH: case WPA2_PSK_AUTH: case (WPA_PSK_AUTH|WPA2_PSK_AUTH): switch (ik->ik_type) { case IEEE80211_CIPHER_TKIP: keyType = TKIP_CRYPT; break; case IEEE80211_CIPHER_AES_CCM: keyType = AES_CRYPT; break; default: goto skip_key; } wmi_addKey_cmd(ar->arWmi, ik->ik_keyix, keyType, GROUP_USAGE, ik->ik_keylen, (u8 *)&ik->ik_keyrsc, ik->ik_keydata, KEY_OP_INIT_VAL, ik->ik_macaddr, SYNC_BOTH_WMIFLAG); break; } skip_key: ar->arConnected = true; return; } A_PRINTF("NEW STA %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x \n " " AID=%d \n", bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5], channel); switch ((listenInterval>>8)&0xFF) { case OPEN_AUTH: A_PRINTF("AUTH: OPEN\n"); break; case SHARED_AUTH: A_PRINTF("AUTH: SHARED\n"); break; default: A_PRINTF("AUTH: Unknown\n"); break; } switch (listenInterval&0xFF) { case WPA_PSK_AUTH: A_PRINTF("KeyMgmt: WPA-PSK\n"); break; case WPA2_PSK_AUTH: A_PRINTF("KeyMgmt: WPA2-PSK\n"); break; default: A_PRINTF("KeyMgmt: NONE\n"); break; } switch (beaconInterval) { case AES_CRYPT: A_PRINTF("Cipher: AES\n"); break; case TKIP_CRYPT: A_PRINTF("Cipher: TKIP\n"); break; case WEP_CRYPT: A_PRINTF("Cipher: WEP\n"); break; #ifdef WAPI_ENABLE case WAPI_CRYPT: A_PRINTF("Cipher: WAPI\n"); break; #endif default: A_PRINTF("Cipher: NONE\n"); break; } add_new_sta(ar, bssid, channel /*aid*/, assocInfo /* WPA IE */, assocRespLen /* IE len */, listenInterval&0xFF /* Keymgmt */, beaconInterval /* cipher */, (listenInterval>>8)&0xFF /* auth alg */); /* Send event to application */ A_MEMZERO(&wrqu, sizeof(wrqu)); memcpy(wrqu.addr.sa_data, bssid, ATH_MAC_LEN); wireless_send_event(ar->arNetDev, IWEVREGISTERED, &wrqu, NULL); /* In case the queue is stopped when we switch modes, this will * wake it up */ netif_wake_queue(ar->arNetDev); return; } ar6k_cfg80211_connect_event(ar, channel, bssid, listenInterval, beaconInterval, networkType, beaconIeLen, assocReqLen, assocRespLen, assocInfo); memcpy(ar->arBssid, bssid, sizeof(ar->arBssid)); ar->arBssChannel = channel; A_PRINTF("AR6000 connected event on freq %d ", channel); A_PRINTF("with bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " " listenInterval=%d, beaconInterval = %d, beaconIeLen = %d assocReqLen=%d" " assocRespLen =%d\n", bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5], listenInterval, beaconInterval, beaconIeLen, assocReqLen, assocRespLen); if (networkType & ADHOC_NETWORK) { if (networkType & ADHOC_CREATOR) { A_PRINTF("Network: Adhoc (Creator)\n"); } else { A_PRINTF("Network: Adhoc (Joiner)\n"); } } else { A_PRINTF("Network: Infrastructure\n"); } if ((ar->arNetworkType == INFRA_NETWORK)) { wmi_listeninterval_cmd(ar->arWmi, ar->arListenIntervalT, ar->arListenIntervalB); } if (beaconIeLen && (sizeof(buf) > (9 + beaconIeLen * 2))) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nBeaconIEs= ")); beacon_ie_pos = 0; A_MEMZERO(buf, sizeof(buf)); sprintf(buf, "%s", beaconIetag); pos = buf + 9; for (i = beacon_ie_pos; i < beacon_ie_pos + beaconIeLen; i++) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i])); sprintf(pos, "%2.2x", assocInfo[i]); pos += 2; } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n")); A_MEMZERO(&wrqu, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); } if (assocRespLen && (sizeof(buf) > (12 + (assocRespLen * 2)))) { assoc_resp_ie_pos = beaconIeLen + assocReqLen + sizeof(u16) + /* capinfo*/ sizeof(u16) + /* status Code */ sizeof(u16) ; /* associd */ A_MEMZERO(buf, sizeof(buf)); sprintf(buf, "%s", tag2); pos = buf + 12; AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nAssocRespIEs= ")); /* * The Association Response Frame w.o. the WLAN header is delivered to * the host, so skip over to the IEs */ for (i = assoc_resp_ie_pos; i < assoc_resp_ie_pos + assocRespLen - 6; i++) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i])); sprintf(pos, "%2.2x", assocInfo[i]); pos += 2; } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n")); A_MEMZERO(&wrqu, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); } if (assocReqLen && (sizeof(buf) > (17 + (assocReqLen * 2)))) { /* * assoc Request includes capability and listen interval. Skip these. */ assoc_req_ie_pos = beaconIeLen + sizeof(u16) + /* capinfo*/ sizeof(u16); /* listen interval */ A_MEMZERO(buf, sizeof(buf)); sprintf(buf, "%s", tag1); pos = buf + 17; AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("AssocReqIEs= ")); for (i = assoc_req_ie_pos; i < assoc_req_ie_pos + assocReqLen - 4; i++) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i])); sprintf(pos, "%2.2x", assocInfo[i]); pos += 2; } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n")); A_MEMZERO(&wrqu, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); } if (ar->user_savedkeys_stat == USER_SAVEDKEYS_STAT_RUN && ar->user_saved_keys.keyOk == true) { key_op_ctrl = KEY_OP_VALID_MASK & ~KEY_OP_INIT_TSC; if (ar->user_key_ctrl & AR6000_USER_SETKEYS_RSC_UNCHANGED) { key_op_ctrl &= ~KEY_OP_INIT_RSC; } else { key_op_ctrl |= KEY_OP_INIT_RSC; } ar6000_reinstall_keys(ar, key_op_ctrl); } netif_wake_queue(ar->arNetDev); /* Update connect & link status atomically */ spin_lock_irqsave(&ar->arLock, flags); ar->arConnected = true; ar->arConnectPending = false; netif_carrier_on(ar->arNetDev); spin_unlock_irqrestore(&ar->arLock, flags); /* reset the rx aggr state */ aggr_reset_state(ar->aggr_cntxt); reconnect_flag = 0; A_MEMZERO(&wrqu, sizeof(wrqu)); memcpy(wrqu.addr.sa_data, bssid, IEEE80211_ADDR_LEN); wrqu.addr.sa_family = ARPHRD_ETHER; wireless_send_event(ar->arNetDev, SIOCGIWAP, &wrqu, NULL); if ((ar->arNetworkType == ADHOC_NETWORK) && ar->arIbssPsEnable) { A_MEMZERO(ar->arNodeMap, sizeof(ar->arNodeMap)); ar->arNodeNum = 0; ar->arNexEpId = ENDPOINT_2; } if (!ar->arUserBssFilter) { wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0); } } void ar6000_set_numdataendpts(struct ar6_softc *ar, u32 num) { A_ASSERT(num <= (HTC_MAILBOX_NUM_MAX - 1)); ar->arNumDataEndPts = num; } void sta_cleanup(struct ar6_softc *ar, u8 i) { struct sk_buff *skb; /* empty the queued pkts in the PS queue if any */ A_MUTEX_LOCK(&ar->sta_list[i].psqLock); while (!A_NETBUF_QUEUE_EMPTY(&ar->sta_list[i].psq)) { skb = A_NETBUF_DEQUEUE(&ar->sta_list[i].psq); A_NETBUF_FREE(skb); } A_MUTEX_UNLOCK(&ar->sta_list[i].psqLock); /* Zero out the state fields */ A_MEMZERO(&ar->arAPStats.sta[ar->sta_list[i].aid-1], sizeof(WMI_PER_STA_STAT)); A_MEMZERO(&ar->sta_list[i].mac, ATH_MAC_LEN); A_MEMZERO(&ar->sta_list[i].wpa_ie, IEEE80211_MAX_IE); ar->sta_list[i].aid = 0; ar->sta_list[i].flags = 0; ar->sta_list_index = ar->sta_list_index & ~(1 << i); } u8 remove_sta(struct ar6_softc *ar, u8 *mac, u16 reason) { u8 i, removed=0; if(IS_MAC_NULL(mac)) { return removed; } if(IS_MAC_BCAST(mac)) { A_PRINTF("DEL ALL STA\n"); for(i=0; i < AP_MAX_NUM_STA; i++) { if(!IS_MAC_NULL(ar->sta_list[i].mac)) { sta_cleanup(ar, i); removed = 1; } } } else { for(i=0; i < AP_MAX_NUM_STA; i++) { if(memcmp(ar->sta_list[i].mac, mac, ATH_MAC_LEN)==0) { A_PRINTF("DEL STA %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " " aid=%d REASON=%d\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], ar->sta_list[i].aid, reason); sta_cleanup(ar, i); removed = 1; break; } } } return removed; } void ar6000_disconnect_event(struct ar6_softc *ar, u8 reason, u8 *bssid, u8 assocRespLen, u8 *assocInfo, u16 protocolReasonStatus) { u8 i; unsigned long flags; union iwreq_data wrqu; if(ar->arNetworkType & AP_NETWORK) { union iwreq_data wrqu; struct sk_buff *skb; if(!remove_sta(ar, bssid, protocolReasonStatus)) { return; } /* If there are no more associated STAs, empty the mcast PS q */ if (ar->sta_list_index == 0) { A_MUTEX_LOCK(&ar->mcastpsqLock); while (!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) { skb = A_NETBUF_DEQUEUE(&ar->mcastpsq); A_NETBUF_FREE(skb); } A_MUTEX_UNLOCK(&ar->mcastpsqLock); /* Clear the LSB of the BitMapCtl field of the TIM IE */ if (ar->arWmiReady) { wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 0); } } if(!IS_MAC_BCAST(bssid)) { /* Send event to application */ A_MEMZERO(&wrqu, sizeof(wrqu)); memcpy(wrqu.addr.sa_data, bssid, ATH_MAC_LEN); wireless_send_event(ar->arNetDev, IWEVEXPIRED, &wrqu, NULL); } ar->arConnected = false; return; } ar6k_cfg80211_disconnect_event(ar, reason, bssid, assocRespLen, assocInfo, protocolReasonStatus); /* Send disconnect event to supplicant */ A_MEMZERO(&wrqu, sizeof(wrqu)); wrqu.addr.sa_family = ARPHRD_ETHER; wireless_send_event(ar->arNetDev, SIOCGIWAP, &wrqu, NULL); /* it is necessary to clear the host-side rx aggregation state */ aggr_reset_state(ar->aggr_cntxt); A_UNTIMEOUT(&ar->disconnect_timer); A_PRINTF("AR6000 disconnected"); if (bssid[0] || bssid[1] || bssid[2] || bssid[3] || bssid[4] || bssid[5]) { A_PRINTF(" from %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ", bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5]); } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nDisconnect Reason is %d", reason)); AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nProtocol Reason/Status Code is %d", protocolReasonStatus)); AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nAssocResp Frame = %s", assocRespLen ? " " : "NULL")); for (i = 0; i < assocRespLen; i++) { if (!(i % 0x10)) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n")); } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i])); } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n")); /* * If the event is due to disconnect cmd from the host, only they the target * would stop trying to connect. Under any other condition, target would * keep trying to connect. * */ if( reason == DISCONNECT_CMD) { if ((!ar->arUserBssFilter) && (ar->arWmiReady)) { wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0); } } else { ar->arConnectPending = true; if (((reason == ASSOC_FAILED) && (protocolReasonStatus == 0x11)) || ((reason == ASSOC_FAILED) && (protocolReasonStatus == 0x0) && (reconnect_flag == 1))) { ar->arConnected = true; return; } } if ((reason == NO_NETWORK_AVAIL) && (ar->arWmiReady)) { bss_t *pWmiSsidnode = NULL; /* remove the current associated bssid node */ wmi_free_node (ar->arWmi, bssid); /* * In case any other same SSID nodes are present * remove it, since those nodes also not available now */ do { /* * Find the nodes based on SSID and remove it * NOTE :: This case will not work out for Hidden-SSID */ pWmiSsidnode = wmi_find_Ssidnode (ar->arWmi, ar->arSsid, ar->arSsidLen, false, true); if (pWmiSsidnode) { wmi_free_node (ar->arWmi, pWmiSsidnode->ni_macaddr); } } while (pWmiSsidnode); } /* Update connect & link status atomically */ spin_lock_irqsave(&ar->arLock, flags); ar->arConnected = false; netif_carrier_off(ar->arNetDev); spin_unlock_irqrestore(&ar->arLock, flags); if( (reason != CSERV_DISCONNECT) || (reconnect_flag != 1) ) { reconnect_flag = 0; } if (reason != CSERV_DISCONNECT) { ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT; ar->user_key_ctrl = 0; } netif_stop_queue(ar->arNetDev); A_MEMZERO(ar->arBssid, sizeof(ar->arBssid)); ar->arBssChannel = 0; ar->arBeaconInterval = 0; ar6000_TxDataCleanup(ar); } void ar6000_regDomain_event(struct ar6_softc *ar, u32 regCode) { A_PRINTF("AR6000 Reg Code = 0x%x\n", regCode); ar->arRegCode = regCode; } void ar6000_aggr_rcv_addba_req_evt(struct ar6_softc *ar, WMI_ADDBA_REQ_EVENT *evt) { if(evt->status == 0) { aggr_recv_addba_req_evt(ar->aggr_cntxt, evt->tid, evt->st_seq_no, evt->win_sz); } } void ar6000_aggr_rcv_addba_resp_evt(struct ar6_softc *ar, WMI_ADDBA_RESP_EVENT *evt) { A_PRINTF("ADDBA RESP. tid %d status %d, sz %d\n", evt->tid, evt->status, evt->amsdu_sz); if(evt->status == 0) { } } void ar6000_aggr_rcv_delba_req_evt(struct ar6_softc *ar, WMI_DELBA_EVENT *evt) { aggr_recv_delba_req_evt(ar->aggr_cntxt, evt->tid); } void register_pal_cb(ar6k_pal_config_t *palConfig_p) { ar6k_pal_config_g = *palConfig_p; } void ar6000_hci_event_rcv_evt(struct ar6_softc *ar, WMI_HCI_EVENT *cmd) { void *osbuf = NULL; s8 i; u8 size, *buf; int ret = 0; size = cmd->evt_buf_sz + 4; osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { ret = A_NO_MEMORY; A_PRINTF("Error in allocating netbuf \n"); return; } A_NETBUF_PUT(osbuf, size); buf = (u8 *)A_NETBUF_DATA(osbuf); /* First 2-bytes carry HCI event/ACL data type * the next 2 are free */ *((short *)buf) = WMI_HCI_EVENT_EVENTID; buf += sizeof(int); memcpy(buf, cmd->buf, cmd->evt_buf_sz); ar6000_deliver_frames_to_nw_stack(ar->arNetDev, osbuf); if(loghci) { A_PRINTF_LOG("HCI Event From PAL <-- \n"); for(i = 0; i < cmd->evt_buf_sz; i++) { A_PRINTF_LOG("0x%02x ", cmd->buf[i]); if((i % 10) == 0) { A_PRINTF_LOG("\n"); } } A_PRINTF_LOG("\n"); A_PRINTF_LOG("==================================\n"); } } void ar6000_neighborReport_event(struct ar6_softc *ar, int numAps, WMI_NEIGHBOR_INFO *info) { #if WIRELESS_EXT >= 18 struct iw_pmkid_cand *pmkcand; #else /* WIRELESS_EXT >= 18 */ static const char *tag = "PRE-AUTH"; char buf[128]; #endif /* WIRELESS_EXT >= 18 */ union iwreq_data wrqu; int i; AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("AR6000 Neighbor Report Event\n")); for (i=0; i < numAps; info++, i++) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ", info->bssid[0], info->bssid[1], info->bssid[2], info->bssid[3], info->bssid[4], info->bssid[5])); if (info->bssFlags & WMI_PREAUTH_CAPABLE_BSS) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("preauth-cap")); } if (info->bssFlags & WMI_PMKID_VALID_BSS) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,(" pmkid-valid\n")); continue; /* we skip bss if the pmkid is already valid */ } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("\n")); A_MEMZERO(&wrqu, sizeof(wrqu)); #if WIRELESS_EXT >= 18 pmkcand = A_MALLOC_NOWAIT(sizeof(struct iw_pmkid_cand)); A_MEMZERO(pmkcand, sizeof(struct iw_pmkid_cand)); pmkcand->index = i; pmkcand->flags = info->bssFlags; memcpy(pmkcand->bssid.sa_data, info->bssid, ATH_MAC_LEN); wrqu.data.length = sizeof(struct iw_pmkid_cand); wireless_send_event(ar->arNetDev, IWEVPMKIDCAND, &wrqu, (char *)pmkcand); kfree(pmkcand); #else /* WIRELESS_EXT >= 18 */ snprintf(buf, sizeof(buf), "%s%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x", tag, info->bssid[0], info->bssid[1], info->bssid[2], info->bssid[3], info->bssid[4], info->bssid[5], i, info->bssFlags); wrqu.data.length = strlen(buf); wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); #endif /* WIRELESS_EXT >= 18 */ } } void ar6000_tkip_micerr_event(struct ar6_softc *ar, u8 keyid, bool ismcast) { static const char *tag = "MLME-MICHAELMICFAILURE.indication"; char buf[128]; union iwreq_data wrqu; /* * For AP case, keyid will have aid of STA which sent pkt with * MIC error. Use this aid to get MAC & send it to hostapd. */ if (ar->arNetworkType == AP_NETWORK) { sta_t *s = ieee80211_find_conn_for_aid(ar, (keyid >> 2)); if(!s){ A_PRINTF("AP TKIP MIC error received from Invalid aid / STA not found =%d\n", keyid); return; } A_PRINTF("AP TKIP MIC error received from aid=%d\n", keyid); snprintf(buf,sizeof(buf), "%s addr=%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", tag, s->mac[0],s->mac[1],s->mac[2],s->mac[3],s->mac[4],s->mac[5]); } else { ar6k_cfg80211_tkip_micerr_event(ar, keyid, ismcast); A_PRINTF("AR6000 TKIP MIC error received for keyid %d %scast\n", keyid & 0x3, ismcast ? "multi": "uni"); snprintf(buf, sizeof(buf), "%s(keyid=%d %sicast)", tag, keyid & 0x3, ismcast ? "mult" : "un"); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); } void ar6000_scanComplete_event(struct ar6_softc *ar, int status) { ar6k_cfg80211_scanComplete_event(ar, status); if (!ar->arUserBssFilter) { wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0); } if (ar->scan_triggered) { if (status== 0) { union iwreq_data wrqu; A_MEMZERO(&wrqu, sizeof(wrqu)); wireless_send_event(ar->arNetDev, SIOCGIWSCAN, &wrqu, NULL); } ar->scan_triggered = 0; } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,( "AR6000 scan complete: %d\n", status)); } void ar6000_targetStats_event(struct ar6_softc *ar, u8 *ptr, u32 len) { u8 ac; if(ar->arNetworkType == AP_NETWORK) { WMI_AP_MODE_STAT *p = (WMI_AP_MODE_STAT *)ptr; WMI_AP_MODE_STAT *ap = &ar->arAPStats; if (len < sizeof(*p)) { return; } for(ac=0;ac<AP_MAX_NUM_STA;ac++) { ap->sta[ac].tx_bytes += p->sta[ac].tx_bytes; ap->sta[ac].tx_pkts += p->sta[ac].tx_pkts; ap->sta[ac].tx_error += p->sta[ac].tx_error; ap->sta[ac].tx_discard += p->sta[ac].tx_discard; ap->sta[ac].rx_bytes += p->sta[ac].rx_bytes; ap->sta[ac].rx_pkts += p->sta[ac].rx_pkts; ap->sta[ac].rx_error += p->sta[ac].rx_error; ap->sta[ac].rx_discard += p->sta[ac].rx_discard; } } else { WMI_TARGET_STATS *pTarget = (WMI_TARGET_STATS *)ptr; TARGET_STATS *pStats = &ar->arTargetStats; if (len < sizeof(*pTarget)) { return; } // Update the RSSI of the connected bss. if (ar->arConnected) { bss_t *pConnBss = NULL; pConnBss = wmi_find_node(ar->arWmi,ar->arBssid); if (pConnBss) { pConnBss->ni_rssi = pTarget->cservStats.cs_aveBeacon_rssi; pConnBss->ni_snr = pTarget->cservStats.cs_aveBeacon_snr; wmi_node_return(ar->arWmi, pConnBss); } } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 updating target stats\n")); pStats->tx_packets += pTarget->txrxStats.tx_stats.tx_packets; pStats->tx_bytes += pTarget->txrxStats.tx_stats.tx_bytes; pStats->tx_unicast_pkts += pTarget->txrxStats.tx_stats.tx_unicast_pkts; pStats->tx_unicast_bytes += pTarget->txrxStats.tx_stats.tx_unicast_bytes; pStats->tx_multicast_pkts += pTarget->txrxStats.tx_stats.tx_multicast_pkts; pStats->tx_multicast_bytes += pTarget->txrxStats.tx_stats.tx_multicast_bytes; pStats->tx_broadcast_pkts += pTarget->txrxStats.tx_stats.tx_broadcast_pkts; pStats->tx_broadcast_bytes += pTarget->txrxStats.tx_stats.tx_broadcast_bytes; pStats->tx_rts_success_cnt += pTarget->txrxStats.tx_stats.tx_rts_success_cnt; for(ac = 0; ac < WMM_NUM_AC; ac++) pStats->tx_packet_per_ac[ac] += pTarget->txrxStats.tx_stats.tx_packet_per_ac[ac]; pStats->tx_errors += pTarget->txrxStats.tx_stats.tx_errors; pStats->tx_failed_cnt += pTarget->txrxStats.tx_stats.tx_failed_cnt; pStats->tx_retry_cnt += pTarget->txrxStats.tx_stats.tx_retry_cnt; pStats->tx_mult_retry_cnt += pTarget->txrxStats.tx_stats.tx_mult_retry_cnt; pStats->tx_rts_fail_cnt += pTarget->txrxStats.tx_stats.tx_rts_fail_cnt; pStats->tx_unicast_rate = wmi_get_rate(pTarget->txrxStats.tx_stats.tx_unicast_rate); pStats->rx_packets += pTarget->txrxStats.rx_stats.rx_packets; pStats->rx_bytes += pTarget->txrxStats.rx_stats.rx_bytes; pStats->rx_unicast_pkts += pTarget->txrxStats.rx_stats.rx_unicast_pkts; pStats->rx_unicast_bytes += pTarget->txrxStats.rx_stats.rx_unicast_bytes; pStats->rx_multicast_pkts += pTarget->txrxStats.rx_stats.rx_multicast_pkts; pStats->rx_multicast_bytes += pTarget->txrxStats.rx_stats.rx_multicast_bytes; pStats->rx_broadcast_pkts += pTarget->txrxStats.rx_stats.rx_broadcast_pkts; pStats->rx_broadcast_bytes += pTarget->txrxStats.rx_stats.rx_broadcast_bytes; pStats->rx_fragment_pkt += pTarget->txrxStats.rx_stats.rx_fragment_pkt; pStats->rx_errors += pTarget->txrxStats.rx_stats.rx_errors; pStats->rx_crcerr += pTarget->txrxStats.rx_stats.rx_crcerr; pStats->rx_key_cache_miss += pTarget->txrxStats.rx_stats.rx_key_cache_miss; pStats->rx_decrypt_err += pTarget->txrxStats.rx_stats.rx_decrypt_err; pStats->rx_duplicate_frames += pTarget->txrxStats.rx_stats.rx_duplicate_frames; pStats->rx_unicast_rate = wmi_get_rate(pTarget->txrxStats.rx_stats.rx_unicast_rate); pStats->tkip_local_mic_failure += pTarget->txrxStats.tkipCcmpStats.tkip_local_mic_failure; pStats->tkip_counter_measures_invoked += pTarget->txrxStats.tkipCcmpStats.tkip_counter_measures_invoked; pStats->tkip_replays += pTarget->txrxStats.tkipCcmpStats.tkip_replays; pStats->tkip_format_errors += pTarget->txrxStats.tkipCcmpStats.tkip_format_errors; pStats->ccmp_format_errors += pTarget->txrxStats.tkipCcmpStats.ccmp_format_errors; pStats->ccmp_replays += pTarget->txrxStats.tkipCcmpStats.ccmp_replays; pStats->power_save_failure_cnt += pTarget->pmStats.power_save_failure_cnt; pStats->noise_floor_calibation = pTarget->noise_floor_calibation; pStats->cs_bmiss_cnt += pTarget->cservStats.cs_bmiss_cnt; pStats->cs_lowRssi_cnt += pTarget->cservStats.cs_lowRssi_cnt; pStats->cs_connect_cnt += pTarget->cservStats.cs_connect_cnt; pStats->cs_disconnect_cnt += pTarget->cservStats.cs_disconnect_cnt; pStats->cs_aveBeacon_snr = pTarget->cservStats.cs_aveBeacon_snr; pStats->cs_aveBeacon_rssi = pTarget->cservStats.cs_aveBeacon_rssi; if (enablerssicompensation) { pStats->cs_aveBeacon_rssi = rssi_compensation_calc(ar, pStats->cs_aveBeacon_rssi); } pStats->cs_lastRoam_msec = pTarget->cservStats.cs_lastRoam_msec; pStats->cs_snr = pTarget->cservStats.cs_snr; pStats->cs_rssi = pTarget->cservStats.cs_rssi; pStats->lq_val = pTarget->lqVal; pStats->wow_num_pkts_dropped += pTarget->wowStats.wow_num_pkts_dropped; pStats->wow_num_host_pkt_wakeups += pTarget->wowStats.wow_num_host_pkt_wakeups; pStats->wow_num_host_event_wakeups += pTarget->wowStats.wow_num_host_event_wakeups; pStats->wow_num_events_discarded += pTarget->wowStats.wow_num_events_discarded; pStats->arp_received += pTarget->arpStats.arp_received; pStats->arp_matched += pTarget->arpStats.arp_matched; pStats->arp_replied += pTarget->arpStats.arp_replied; if (ar->statsUpdatePending) { ar->statsUpdatePending = false; wake_up(&arEvent); } } } void ar6000_rssiThreshold_event(struct ar6_softc *ar, WMI_RSSI_THRESHOLD_VAL newThreshold, s16 rssi) { USER_RSSI_THOLD userRssiThold; rssi = rssi + SIGNAL_QUALITY_NOISE_FLOOR; if (enablerssicompensation) { rssi = rssi_compensation_calc(ar, rssi); } /* Send an event to the app */ userRssiThold.tag = ar->rssi_map[newThreshold].tag; userRssiThold.rssi = rssi; A_PRINTF("rssi Threshold range = %d tag = %d rssi = %d\n", newThreshold, userRssiThold.tag, userRssiThold.rssi); } void ar6000_hbChallengeResp_event(struct ar6_softc *ar, u32 cookie, u32 source) { if (source != APP_HB_CHALLENGE) { /* This would ignore the replys that come in after their due time */ if (cookie == ar->arHBChallengeResp.seqNum) { ar->arHBChallengeResp.outstanding = false; } } } void ar6000_reportError_event(struct ar6_softc *ar, WMI_TARGET_ERROR_VAL errorVal) { static const char * const errString[] = { [WMI_TARGET_PM_ERR_FAIL] "WMI_TARGET_PM_ERR_FAIL", [WMI_TARGET_KEY_NOT_FOUND] "WMI_TARGET_KEY_NOT_FOUND", [WMI_TARGET_DECRYPTION_ERR] "WMI_TARGET_DECRYPTION_ERR", [WMI_TARGET_BMISS] "WMI_TARGET_BMISS", [WMI_PSDISABLE_NODE_JOIN] "WMI_PSDISABLE_NODE_JOIN" }; A_PRINTF("AR6000 Error on Target. Error = 0x%x\n", errorVal); /* One error is reported at a time, and errorval is a bitmask */ if(errorVal & (errorVal - 1)) return; A_PRINTF("AR6000 Error type = "); switch(errorVal) { case WMI_TARGET_PM_ERR_FAIL: case WMI_TARGET_KEY_NOT_FOUND: case WMI_TARGET_DECRYPTION_ERR: case WMI_TARGET_BMISS: case WMI_PSDISABLE_NODE_JOIN: A_PRINTF("%s\n", errString[errorVal]); break; default: A_PRINTF("INVALID\n"); break; } } void ar6000_cac_event(struct ar6_softc *ar, u8 ac, u8 cacIndication, u8 statusCode, u8 *tspecSuggestion) { WMM_TSPEC_IE *tspecIe; /* * This is the TSPEC IE suggestion from AP. * Suggestion provided by AP under some error * cases, could be helpful for the host app. * Check documentation. */ tspecIe = (WMM_TSPEC_IE *)tspecSuggestion; /* * What do we do, if we get TSPEC rejection? One thought * that comes to mind is implictly delete the pstream... */ A_PRINTF("AR6000 CAC notification. " "AC = %d, cacIndication = 0x%x, statusCode = 0x%x\n", ac, cacIndication, statusCode); } void ar6000_channel_change_event(struct ar6_softc *ar, u16 oldChannel, u16 newChannel) { A_PRINTF("Channel Change notification\nOld Channel: %d, New Channel: %d\n", oldChannel, newChannel); } #define AR6000_PRINT_BSSID(_pBss) do { \ A_PRINTF("%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ",\ (_pBss)[0],(_pBss)[1],(_pBss)[2],(_pBss)[3],\ (_pBss)[4],(_pBss)[5]); \ } while(0) void ar6000_roam_tbl_event(struct ar6_softc *ar, WMI_TARGET_ROAM_TBL *pTbl) { u8 i; A_PRINTF("ROAM TABLE NO OF ENTRIES is %d ROAM MODE is %d\n", pTbl->numEntries, pTbl->roamMode); for (i= 0; i < pTbl->numEntries; i++) { A_PRINTF("[%d]bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ", i, pTbl->bssRoamInfo[i].bssid[0], pTbl->bssRoamInfo[i].bssid[1], pTbl->bssRoamInfo[i].bssid[2], pTbl->bssRoamInfo[i].bssid[3], pTbl->bssRoamInfo[i].bssid[4], pTbl->bssRoamInfo[i].bssid[5]); A_PRINTF("RSSI %d RSSIDT %d LAST RSSI %d UTIL %d ROAM_UTIL %d" " BIAS %d\n", pTbl->bssRoamInfo[i].rssi, pTbl->bssRoamInfo[i].rssidt, pTbl->bssRoamInfo[i].last_rssi, pTbl->bssRoamInfo[i].util, pTbl->bssRoamInfo[i].roam_util, pTbl->bssRoamInfo[i].bias); } } void ar6000_wow_list_event(struct ar6_softc *ar, u8 num_filters, WMI_GET_WOW_LIST_REPLY *wow_reply) { u8 i,j; /*Each event now contains exactly one filter, see bug 26613*/ A_PRINTF("WOW pattern %d of %d patterns\n", wow_reply->this_filter_num, wow_reply->num_filters); A_PRINTF("wow mode = %s host mode = %s\n", (wow_reply->wow_mode == 0? "disabled":"enabled"), (wow_reply->host_mode == 1 ? "awake":"asleep")); /*If there are no patterns, the reply will only contain generic WoW information. Pattern information will exist only if there are patterns present. Bug 26716*/ /* If this event contains pattern information, display it*/ if (wow_reply->this_filter_num) { i=0; A_PRINTF("id=%d size=%d offset=%d\n", wow_reply->wow_filters[i].wow_filter_id, wow_reply->wow_filters[i].wow_filter_size, wow_reply->wow_filters[i].wow_filter_offset); A_PRINTF("wow pattern = "); for (j=0; j< wow_reply->wow_filters[i].wow_filter_size; j++) { A_PRINTF("%2.2x",wow_reply->wow_filters[i].wow_filter_pattern[j]); } A_PRINTF("\nwow mask = "); for (j=0; j< wow_reply->wow_filters[i].wow_filter_size; j++) { A_PRINTF("%2.2x",wow_reply->wow_filters[i].wow_filter_mask[j]); } A_PRINTF("\n"); } } /* * Report the Roaming related data collected on the target */ void ar6000_display_roam_time(WMI_TARGET_ROAM_TIME *p) { A_PRINTF("Disconnect Data : BSSID: "); AR6000_PRINT_BSSID(p->disassoc_bssid); A_PRINTF(" RSSI %d DISASSOC Time %d NO_TXRX_TIME %d\n", p->disassoc_bss_rssi,p->disassoc_time, p->no_txrx_time); A_PRINTF("Connect Data: BSSID: "); AR6000_PRINT_BSSID(p->assoc_bssid); A_PRINTF(" RSSI %d ASSOC Time %d TXRX_TIME %d\n", p->assoc_bss_rssi,p->assoc_time, p->allow_txrx_time); } void ar6000_roam_data_event(struct ar6_softc *ar, WMI_TARGET_ROAM_DATA *p) { switch (p->roamDataType) { case ROAM_DATA_TIME: ar6000_display_roam_time(&p->u.roamTime); break; default: break; } } void ar6000_bssInfo_event_rx(struct ar6_softc *ar, u8 *datap, int len) { struct sk_buff *skb; WMI_BSS_INFO_HDR *bih = (WMI_BSS_INFO_HDR *)datap; if (!ar->arMgmtFilter) { return; } if (((ar->arMgmtFilter & IEEE80211_FILTER_TYPE_BEACON) && (bih->frameType != BEACON_FTYPE)) || ((ar->arMgmtFilter & IEEE80211_FILTER_TYPE_PROBE_RESP) && (bih->frameType != PROBERESP_FTYPE))) { return; } if ((skb = A_NETBUF_ALLOC_RAW(len)) != NULL) { A_NETBUF_PUT(skb, len); memcpy(A_NETBUF_DATA(skb), datap, len); skb->dev = ar->arNetDev; memcpy(skb_mac_header(skb), A_NETBUF_DATA(skb), 6); skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(0x0019); netif_rx(skb); } } u32 wmiSendCmdNum; int ar6000_control_tx(void *devt, void *osbuf, HTC_ENDPOINT_ID eid) { struct ar6_softc *ar = (struct ar6_softc *)devt; int status = 0; struct ar_cookie *cookie = NULL; int i; #ifdef CONFIG_PM if (ar->arWowState != WLAN_WOW_STATE_NONE) { A_NETBUF_FREE(osbuf); return A_EACCES; } #endif /* CONFIG_PM */ /* take lock to protect ar6000_alloc_cookie() */ AR6000_SPIN_LOCK(&ar->arLock, 0); do { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar_contrstatus = ol_tx: skb=0x%lx, len=0x%x eid =%d\n", (unsigned long)osbuf, A_NETBUF_LEN(osbuf), eid)); if (ar->arWMIControlEpFull && (eid == ar->arControlEp)) { /* control endpoint is full, don't allocate resources, we * are just going to drop this packet */ cookie = NULL; AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" WMI Control EP full, dropping packet : 0x%lX, len:%d \n", (unsigned long)osbuf, A_NETBUF_LEN(osbuf))); } else { cookie = ar6000_alloc_cookie(ar); } if (cookie == NULL) { status = A_NO_MEMORY; break; } if(logWmiRawMsgs) { A_PRINTF("WMI cmd send, msgNo %d :", wmiSendCmdNum); for(i = 0; i < a_netbuf_to_len(osbuf); i++) A_PRINTF("%x ", ((u8 *)a_netbuf_to_data(osbuf))[i]); A_PRINTF("\n"); } wmiSendCmdNum++; } while (false); if (cookie != NULL) { /* got a structure to send it out on */ ar->arTxPending[eid]++; if (eid != ar->arControlEp) { ar->arTotalTxDataPending++; } } AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (cookie != NULL) { cookie->arc_bp[0] = (unsigned long)osbuf; cookie->arc_bp[1] = 0; SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, cookie, A_NETBUF_DATA(osbuf), A_NETBUF_LEN(osbuf), eid, AR6K_CONTROL_PKT_TAG); /* this interface is asynchronous, if there is an error, cleanup will happen in the * TX completion callback */ HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt); status = 0; } if (status) { A_NETBUF_FREE(osbuf); } return status; } /* indicate tx activity or inactivity on a WMI stream */ void ar6000_indicate_tx_activity(void *devt, u8 TrafficClass, bool Active) { struct ar6_softc *ar = (struct ar6_softc *)devt; HTC_ENDPOINT_ID eid ; int i; if (ar->arWmiEnabled) { eid = arAc2EndpointID(ar, TrafficClass); AR6000_SPIN_LOCK(&ar->arLock, 0); ar->arAcStreamActive[TrafficClass] = Active; if (Active) { /* when a stream goes active, keep track of the active stream with the highest priority */ if (ar->arAcStreamPriMap[TrafficClass] > ar->arHiAcStreamActivePri) { /* set the new highest active priority */ ar->arHiAcStreamActivePri = ar->arAcStreamPriMap[TrafficClass]; } } else { /* when a stream goes inactive, we may have to search for the next active stream * that is the highest priority */ if (ar->arHiAcStreamActivePri == ar->arAcStreamPriMap[TrafficClass]) { /* the highest priority stream just went inactive */ /* reset and search for the "next" highest "active" priority stream */ ar->arHiAcStreamActivePri = 0; for (i = 0; i < WMM_NUM_AC; i++) { if (ar->arAcStreamActive[i]) { if (ar->arAcStreamPriMap[i] > ar->arHiAcStreamActivePri) { /* set the new highest active priority */ ar->arHiAcStreamActivePri = ar->arAcStreamPriMap[i]; } } } } } AR6000_SPIN_UNLOCK(&ar->arLock, 0); } else { /* for mbox ping testing, the traffic class is mapped directly as a stream ID, * see handling of AR6000_XIOCTL_TRAFFIC_ACTIVITY_CHANGE in ioctl.c * convert the stream ID to a endpoint */ eid = arAc2EndpointID(ar, TrafficClass); } /* notify HTC, this may cause credit distribution changes */ HTCIndicateActivityChange(ar->arHtcTarget, eid, Active); } void ar6000_btcoex_config_event(struct ar6_softc *ar, u8 *ptr, u32 len) { WMI_BTCOEX_CONFIG_EVENT *pBtcoexConfig = (WMI_BTCOEX_CONFIG_EVENT *)ptr; WMI_BTCOEX_CONFIG_EVENT *pArbtcoexConfig =&ar->arBtcoexConfig; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 BTCOEX CONFIG EVENT \n")); A_PRINTF("received config event\n"); pArbtcoexConfig->btProfileType = pBtcoexConfig->btProfileType; pArbtcoexConfig->linkId = pBtcoexConfig->linkId; switch (pBtcoexConfig->btProfileType) { case WMI_BTCOEX_BT_PROFILE_SCO: memcpy(&pArbtcoexConfig->info.scoConfigCmd, &pBtcoexConfig->info.scoConfigCmd, sizeof(WMI_SET_BTCOEX_SCO_CONFIG_CMD)); break; case WMI_BTCOEX_BT_PROFILE_A2DP: memcpy(&pArbtcoexConfig->info.a2dpConfigCmd, &pBtcoexConfig->info.a2dpConfigCmd, sizeof(WMI_SET_BTCOEX_A2DP_CONFIG_CMD)); break; case WMI_BTCOEX_BT_PROFILE_ACLCOEX: memcpy(&pArbtcoexConfig->info.aclcoexConfig, &pBtcoexConfig->info.aclcoexConfig, sizeof(WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD)); break; case WMI_BTCOEX_BT_PROFILE_INQUIRY_PAGE: memcpy(&pArbtcoexConfig->info.btinquiryPageConfigCmd, &pBtcoexConfig->info.btinquiryPageConfigCmd, sizeof(WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD)); break; } if (ar->statsUpdatePending) { ar->statsUpdatePending = false; wake_up(&arEvent); } } void ar6000_btcoex_stats_event(struct ar6_softc *ar, u8 *ptr, u32 len) { WMI_BTCOEX_STATS_EVENT *pBtcoexStats = (WMI_BTCOEX_STATS_EVENT *)ptr; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 BTCOEX CONFIG EVENT \n")); memcpy(&ar->arBtcoexStats, pBtcoexStats, sizeof(WMI_BTCOEX_STATS_EVENT)); if (ar->statsUpdatePending) { ar->statsUpdatePending = false; wake_up(&arEvent); } } module_init(ar6000_init_module); module_exit(ar6000_cleanup_module); /* Init cookie queue */ static void ar6000_cookie_init(struct ar6_softc *ar) { u32 i; ar->arCookieList = NULL; ar->arCookieCount = 0; A_MEMZERO(s_ar_cookie_mem, sizeof(s_ar_cookie_mem)); for (i = 0; i < MAX_COOKIE_NUM; i++) { ar6000_free_cookie(ar, &s_ar_cookie_mem[i]); } } /* cleanup cookie queue */ static void ar6000_cookie_cleanup(struct ar6_softc *ar) { /* It is gone .... */ ar->arCookieList = NULL; ar->arCookieCount = 0; } /* Init cookie queue */ static void ar6000_free_cookie(struct ar6_softc *ar, struct ar_cookie * cookie) { /* Insert first */ A_ASSERT(ar != NULL); A_ASSERT(cookie != NULL); cookie->arc_list_next = ar->arCookieList; ar->arCookieList = cookie; ar->arCookieCount++; } /* cleanup cookie queue */ static struct ar_cookie * ar6000_alloc_cookie(struct ar6_softc *ar) { struct ar_cookie *cookie; cookie = ar->arCookieList; if(cookie != NULL) { ar->arCookieList = cookie->arc_list_next; ar->arCookieCount--; } return cookie; } void ar6000_tx_retry_err_event(void *devt) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Tx retries reach maximum!\n")); } void ar6000_snrThresholdEvent_rx(void *devt, WMI_SNR_THRESHOLD_VAL newThreshold, u8 snr) { WMI_SNR_THRESHOLD_EVENT event; event.range = newThreshold; event.snr = snr; } void ar6000_lqThresholdEvent_rx(void *devt, WMI_LQ_THRESHOLD_VAL newThreshold, u8 lq) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("lq threshold range %d, lq %d\n", newThreshold, lq)); } u32 a_copy_to_user(void *to, const void *from, u32 n) { return(copy_to_user(to, from, n)); } u32 a_copy_from_user(void *to, const void *from, u32 n) { return(copy_from_user(to, from, n)); } int ar6000_get_driver_cfg(struct net_device *dev, u16 cfgParam, void *result) { int ret = 0; switch(cfgParam) { case AR6000_DRIVER_CFG_GET_WLANNODECACHING: *((u32 *)result) = wlanNodeCaching; break; case AR6000_DRIVER_CFG_LOG_RAW_WMI_MSGS: *((u32 *)result) = logWmiRawMsgs; break; default: ret = EINVAL; break; } return ret; } void ar6000_keepalive_rx(void *devt, u8 configured) { struct ar6_softc *ar = (struct ar6_softc *)devt; ar->arKeepaliveConfigured = configured; wake_up(&arEvent); } void ar6000_pmkid_list_event(void *devt, u8 numPMKID, WMI_PMKID *pmkidList, u8 *bssidList) { u8 i, j; A_PRINTF("Number of Cached PMKIDs is %d\n", numPMKID); for (i = 0; i < numPMKID; i++) { A_PRINTF("\nBSSID %d ", i); for (j = 0; j < ATH_MAC_LEN; j++) { A_PRINTF("%2.2x", bssidList[j]); } bssidList += (ATH_MAC_LEN + WMI_PMKID_LEN); A_PRINTF("\nPMKID %d ", i); for (j = 0; j < WMI_PMKID_LEN; j++) { A_PRINTF("%2.2x", pmkidList->pmkid[j]); } pmkidList = (WMI_PMKID *)((u8 *)pmkidList + ATH_MAC_LEN + WMI_PMKID_LEN); } } void ar6000_pspoll_event(struct ar6_softc *ar,u8 aid) { sta_t *conn=NULL; bool isPsqEmpty = false; conn = ieee80211_find_conn_for_aid(ar, aid); /* If the PS q for this STA is not empty, dequeue and send a pkt from * the head of the q. Also update the More data bit in the WMI_DATA_HDR * if there are more pkts for this STA in the PS q. If there are no more * pkts for this STA, update the PVB for this STA. */ A_MUTEX_LOCK(&conn->psqLock); isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq); A_MUTEX_UNLOCK(&conn->psqLock); if (isPsqEmpty) { /* TODO:No buffered pkts for this STA. Send out a NULL data frame */ } else { struct sk_buff *skb = NULL; A_MUTEX_LOCK(&conn->psqLock); skb = A_NETBUF_DEQUEUE(&conn->psq); A_MUTEX_UNLOCK(&conn->psqLock); /* Set the STA flag to PSPolled, so that the frame will go out */ STA_SET_PS_POLLED(conn); ar6000_data_tx(skb, ar->arNetDev); STA_CLR_PS_POLLED(conn); /* Clear the PVB for this STA if the queue has become empty */ A_MUTEX_LOCK(&conn->psqLock); isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq); A_MUTEX_UNLOCK(&conn->psqLock); if (isPsqEmpty) { wmi_set_pvb_cmd(ar->arWmi, conn->aid, 0); } } } void ar6000_dtimexpiry_event(struct ar6_softc *ar) { bool isMcastQueued = false; struct sk_buff *skb = NULL; /* If there are no associated STAs, ignore the DTIM expiry event. * There can be potential race conditions where the last associated * STA may disconnect & before the host could clear the 'Indicate DTIM' * request to the firmware, the firmware would have just indicated a DTIM * expiry event. The race is between 'clear DTIM expiry cmd' going * from the host to the firmware & the DTIM expiry event happening from * the firmware to the host. */ if (ar->sta_list_index == 0) { return; } A_MUTEX_LOCK(&ar->mcastpsqLock); isMcastQueued = A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq); A_MUTEX_UNLOCK(&ar->mcastpsqLock); A_ASSERT(isMcastQueued == false); /* Flush the mcast psq to the target */ /* Set the STA flag to DTIMExpired, so that the frame will go out */ ar->DTIMExpired = true; A_MUTEX_LOCK(&ar->mcastpsqLock); while (!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) { skb = A_NETBUF_DEQUEUE(&ar->mcastpsq); A_MUTEX_UNLOCK(&ar->mcastpsqLock); ar6000_data_tx(skb, ar->arNetDev); A_MUTEX_LOCK(&ar->mcastpsqLock); } A_MUTEX_UNLOCK(&ar->mcastpsqLock); /* Reset the DTIMExpired flag back to 0 */ ar->DTIMExpired = false; /* Clear the LSB of the BitMapCtl field of the TIM IE */ wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 0); } void read_rssi_compensation_param(struct ar6_softc *ar) { u8 *cust_data_ptr; //#define RSSICOMPENSATION_PRINT #ifdef RSSICOMPENSATION_PRINT s16 i; cust_data_ptr = ar6000_get_cust_data_buffer(ar->arTargetType); for (i=0; i<16; i++) { A_PRINTF("cust_data_%d = %x \n", i, *(u8 *)cust_data_ptr); cust_data_ptr += 1; } #endif cust_data_ptr = ar6000_get_cust_data_buffer(ar->arTargetType); rssi_compensation_param.customerID = *(u16 *)cust_data_ptr & 0xffff; rssi_compensation_param.enable = *(u16 *)(cust_data_ptr+2) & 0xffff; rssi_compensation_param.bg_param_a = *(u16 *)(cust_data_ptr+4) & 0xffff; rssi_compensation_param.bg_param_b = *(u16 *)(cust_data_ptr+6) & 0xffff; rssi_compensation_param.a_param_a = *(u16 *)(cust_data_ptr+8) & 0xffff; rssi_compensation_param.a_param_b = *(u16 *)(cust_data_ptr+10) &0xffff; rssi_compensation_param.reserved = *(u32 *)(cust_data_ptr+12); #ifdef RSSICOMPENSATION_PRINT A_PRINTF("customerID = 0x%x \n", rssi_compensation_param.customerID); A_PRINTF("enable = 0x%x \n", rssi_compensation_param.enable); A_PRINTF("bg_param_a = 0x%x and %d \n", rssi_compensation_param.bg_param_a, rssi_compensation_param.bg_param_a); A_PRINTF("bg_param_b = 0x%x and %d \n", rssi_compensation_param.bg_param_b, rssi_compensation_param.bg_param_b); A_PRINTF("a_param_a = 0x%x and %d \n", rssi_compensation_param.a_param_a, rssi_compensation_param.a_param_a); A_PRINTF("a_param_b = 0x%x and %d \n", rssi_compensation_param.a_param_b, rssi_compensation_param.a_param_b); A_PRINTF("Last 4 bytes = 0x%x \n", rssi_compensation_param.reserved); #endif if (rssi_compensation_param.enable != 0x1) { rssi_compensation_param.enable = 0; } return; } s32 rssi_compensation_calc_tcmd(u32 freq, s32 rssi, u32 totalPkt) { if (freq > 5000) { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d, totalPkt = %d\n", rssi,totalPkt)); rssi = rssi * rssi_compensation_param.a_param_a + totalPkt * rssi_compensation_param.a_param_b; rssi = (rssi-50) /100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi)); } } else { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d, totalPkt = %d\n", rssi,totalPkt)); rssi = rssi * rssi_compensation_param.bg_param_a + totalPkt * rssi_compensation_param.bg_param_b; rssi = (rssi-50) /100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi)); } } return rssi; } s16 rssi_compensation_calc(struct ar6_softc *ar, s16 rssi) { if (ar->arBssChannel > 5000) { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d\n", rssi)); rssi = rssi * rssi_compensation_param.a_param_a + rssi_compensation_param.a_param_b; rssi = (rssi-50) /100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi)); } } else { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d\n", rssi)); rssi = rssi * rssi_compensation_param.bg_param_a + rssi_compensation_param.bg_param_b; rssi = (rssi-50) /100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi)); } } return rssi; } s16 rssi_compensation_reverse_calc(struct ar6_softc *ar, s16 rssi, bool Above) { s16 i; if (ar->arBssChannel > 5000) { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before rev compensation = %d\n", rssi)); rssi = rssi * 100; rssi = (rssi - rssi_compensation_param.a_param_b) / rssi_compensation_param.a_param_a; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after rev compensation = %d\n", rssi)); } } else { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before rev compensation = %d\n", rssi)); if (Above) { for (i=95; i>=0; i--) { if (rssi <= rssi_compensation_table[i]) { rssi = 0 - i; break; } } } else { for (i=0; i<=95; i++) { if (rssi >= rssi_compensation_table[i]) { rssi = 0 - i; break; } } } AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after rev compensation = %d\n", rssi)); } } return rssi; } #ifdef WAPI_ENABLE void ap_wapi_rekey_event(struct ar6_softc *ar, u8 type, u8 *mac) { union iwreq_data wrqu; char buf[20]; A_MEMZERO(buf, sizeof(buf)); strcpy(buf, "WAPI_REKEY"); buf[10] = type; memcpy(&buf[11], mac, ATH_MAC_LEN); A_MEMZERO(&wrqu, sizeof(wrqu)); wrqu.data.length = 10+1+ATH_MAC_LEN; wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); A_PRINTF("WAPI REKEY - %d - %02x:%02x\n", type, mac[4], mac[5]); } #endif static int ar6000_reinstall_keys(struct ar6_softc *ar, u8 key_op_ctrl) { int status = 0; struct ieee80211req_key *uik = &ar->user_saved_keys.ucast_ik; struct ieee80211req_key *bik = &ar->user_saved_keys.bcast_ik; CRYPTO_TYPE keyType = ar->user_saved_keys.keyType; if (IEEE80211_CIPHER_CCKM_KRK != uik->ik_type) { if (NONE_CRYPT == keyType) { goto _reinstall_keys_out; } if (uik->ik_keylen) { status = wmi_addKey_cmd(ar->arWmi, uik->ik_keyix, ar->user_saved_keys.keyType, PAIRWISE_USAGE, uik->ik_keylen, (u8 *)&uik->ik_keyrsc, uik->ik_keydata, key_op_ctrl, uik->ik_macaddr, SYNC_BEFORE_WMIFLAG); } } else { status = wmi_add_krk_cmd(ar->arWmi, uik->ik_keydata); } if (IEEE80211_CIPHER_CCKM_KRK != bik->ik_type) { if (NONE_CRYPT == keyType) { goto _reinstall_keys_out; } if (bik->ik_keylen) { status = wmi_addKey_cmd(ar->arWmi, bik->ik_keyix, ar->user_saved_keys.keyType, GROUP_USAGE, bik->ik_keylen, (u8 *)&bik->ik_keyrsc, bik->ik_keydata, key_op_ctrl, bik->ik_macaddr, NO_SYNC_WMIFLAG); } } else { status = wmi_add_krk_cmd(ar->arWmi, bik->ik_keydata); } _reinstall_keys_out: ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT; ar->user_key_ctrl = 0; return status; } void ar6000_dset_open_req( void *context, u32 id, u32 targHandle, u32 targReplyFn, u32 targReplyArg) { } void ar6000_dset_close( void *context, u32 access_cookie) { return; } void ar6000_dset_data_req( void *context, u32 accessCookie, u32 offset, u32 length, u32 targBuf, u32 targReplyFn, u32 targReplyArg) { } int ar6000_ap_mode_profile_commit(struct ar6_softc *ar) { WMI_CONNECT_CMD p; unsigned long flags; /* No change in AP's profile configuration */ if(ar->ap_profile_flag==0) { A_PRINTF("COMMIT: No change in profile!!!\n"); return -ENODATA; } if(!ar->arSsidLen) { A_PRINTF("SSID not set!!!\n"); return -ECHRNG; } switch(ar->arAuthMode) { case NONE_AUTH: if((ar->arPairwiseCrypto != NONE_CRYPT) && #ifdef WAPI_ENABLE (ar->arPairwiseCrypto != WAPI_CRYPT) && #endif (ar->arPairwiseCrypto != WEP_CRYPT)) { A_PRINTF("Cipher not supported in AP mode Open auth\n"); return -EOPNOTSUPP; } break; case WPA_PSK_AUTH: case WPA2_PSK_AUTH: case (WPA_PSK_AUTH|WPA2_PSK_AUTH): break; default: A_PRINTF("This key mgmt type not supported in AP mode\n"); return -EOPNOTSUPP; } /* Update the arNetworkType */ ar->arNetworkType = ar->arNextMode; A_MEMZERO(&p,sizeof(p)); p.ssidLength = ar->arSsidLen; memcpy(p.ssid,ar->arSsid,p.ssidLength); p.channel = ar->arChannelHint; p.networkType = ar->arNetworkType; p.dot11AuthMode = ar->arDot11AuthMode; p.authMode = ar->arAuthMode; p.pairwiseCryptoType = ar->arPairwiseCrypto; p.pairwiseCryptoLen = ar->arPairwiseCryptoLen; p.groupCryptoType = ar->arGroupCrypto; p.groupCryptoLen = ar->arGroupCryptoLen; p.ctrl_flags = ar->arConnectCtrlFlags; wmi_ap_profile_commit(ar->arWmi, &p); spin_lock_irqsave(&ar->arLock, flags); ar->arConnected = true; netif_carrier_on(ar->arNetDev); spin_unlock_irqrestore(&ar->arLock, flags); ar->ap_profile_flag = 0; return 0; } int ar6000_connect_to_ap(struct ar6_softc *ar) { /* The ssid length check prevents second "essid off" from the user, to be treated as a connect cmd. The second "essid off" is ignored. */ if((ar->arWmiReady == true) && (ar->arSsidLen > 0) && ar->arNetworkType!=AP_NETWORK) { int status; if((ADHOC_NETWORK != ar->arNetworkType) && (NONE_AUTH==ar->arAuthMode) && (WEP_CRYPT==ar->arPairwiseCrypto)) { ar6000_install_static_wep_keys(ar); } if (!ar->arUserBssFilter) { if (wmi_bssfilter_cmd(ar->arWmi, ALL_BSS_FILTER, 0) != 0) { return -EIO; } } #ifdef WAPI_ENABLE if (ar->arWapiEnable) { ar->arPairwiseCrypto = WAPI_CRYPT; ar->arPairwiseCryptoLen = 0; ar->arGroupCrypto = WAPI_CRYPT; ar->arGroupCryptoLen = 0; ar->arAuthMode = NONE_AUTH; ar->arConnectCtrlFlags |= CONNECT_IGNORE_WPAx_GROUP_CIPHER; } #endif AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("Connect called with authmode %d dot11 auth %d"\ " PW crypto %d PW crypto Len %d GRP crypto %d"\ " GRP crypto Len %d\n", ar->arAuthMode, ar->arDot11AuthMode, ar->arPairwiseCrypto, ar->arPairwiseCryptoLen, ar->arGroupCrypto, ar->arGroupCryptoLen)); reconnect_flag = 0; /* Set the listen interval into 1000TUs or more. This value will be indicated to Ap in the conn. later set it back locally at the STA to 100/1000 TUs depending on the power mode */ if ((ar->arNetworkType == INFRA_NETWORK)) { wmi_listeninterval_cmd(ar->arWmi, max(ar->arListenIntervalT, (u16)A_MAX_WOW_LISTEN_INTERVAL), 0); } status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType, ar->arDot11AuthMode, ar->arAuthMode, ar->arPairwiseCrypto, ar->arPairwiseCryptoLen, ar->arGroupCrypto,ar->arGroupCryptoLen, ar->arSsidLen, ar->arSsid, ar->arReqBssid, ar->arChannelHint, ar->arConnectCtrlFlags); if (status) { wmi_listeninterval_cmd(ar->arWmi, ar->arListenIntervalT, ar->arListenIntervalB); if (!ar->arUserBssFilter) { wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0); } return status; } if ((!(ar->arConnectCtrlFlags & CONNECT_DO_WPA_OFFLOAD)) && ((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode))) { A_TIMEOUT_MS(&ar->disconnect_timer, A_DISCONNECT_TIMER_INTERVAL, 0); } ar->arConnectCtrlFlags &= ~CONNECT_DO_WPA_OFFLOAD; ar->arConnectPending = true; return status; } return A_ERROR; } int ar6000_disconnect(struct ar6_softc *ar) { if ((ar->arConnected == true) || (ar->arConnectPending == true)) { wmi_disconnect_cmd(ar->arWmi); /* * Disconnect cmd is issued, clear connectPending. * arConnected will be cleard in disconnect_event notification. */ ar->arConnectPending = false; } return 0; } int ar6000_ap_mode_get_wpa_ie(struct ar6_softc *ar, struct ieee80211req_wpaie *wpaie) { sta_t *conn = NULL; conn = ieee80211_find_conn(ar, wpaie->wpa_macaddr); A_MEMZERO(wpaie->wpa_ie, IEEE80211_MAX_IE); A_MEMZERO(wpaie->rsn_ie, IEEE80211_MAX_IE); if(conn) { memcpy(wpaie->wpa_ie, conn->wpa_ie, IEEE80211_MAX_IE); } return 0; } int is_iwioctl_allowed(u8 mode, u16 cmd) { if(cmd >= SIOCSIWCOMMIT && cmd <= SIOCGIWPOWER) { cmd -= SIOCSIWCOMMIT; if(sioctl_filter[cmd] == 0xFF) return 0; if(sioctl_filter[cmd] & mode) return 0; } else if(cmd >= SIOCIWFIRSTPRIV && cmd <= (SIOCIWFIRSTPRIV+30)) { cmd -= SIOCIWFIRSTPRIV; if(pioctl_filter[cmd] == 0xFF) return 0; if(pioctl_filter[cmd] & mode) return 0; } else { return A_ERROR; } return A_ENOTSUP; } int is_xioctl_allowed(u8 mode, int cmd) { if(sizeof(xioctl_filter)-1 < cmd) { A_PRINTF("Filter for this cmd=%d not defined\n",cmd); return 0; } if(xioctl_filter[cmd] == 0xFF) return 0; if(xioctl_filter[cmd] & mode) return 0; return A_ERROR; } #ifdef WAPI_ENABLE int ap_set_wapi_key(struct ar6_softc *ar, void *ikey) { struct ieee80211req_key *ik = (struct ieee80211req_key *)ikey; KEY_USAGE keyUsage = 0; int status; if (memcmp(ik->ik_macaddr, bcast_mac, IEEE80211_ADDR_LEN) == 0) { keyUsage = GROUP_USAGE; } else { keyUsage = PAIRWISE_USAGE; } A_PRINTF("WAPI_KEY: Type:%d ix:%d mac:%02x:%02x len:%d\n", keyUsage, ik->ik_keyix, ik->ik_macaddr[4], ik->ik_macaddr[5], ik->ik_keylen); status = wmi_addKey_cmd(ar->arWmi, ik->ik_keyix, WAPI_CRYPT, keyUsage, ik->ik_keylen, (u8 *)&ik->ik_keyrsc, ik->ik_keydata, KEY_OP_INIT_VAL, ik->ik_macaddr, SYNC_BOTH_WMIFLAG); if (0 != status) { return -EIO; } return 0; } #endif void ar6000_peer_event( void *context, u8 eventCode, u8 *macAddr) { u8 pos; for (pos=0;pos<6;pos++) printk("%02x: ",*(macAddr+pos)); printk("\n"); } #ifdef HTC_TEST_SEND_PKTS #define HTC_TEST_DUPLICATE 8 static void DoHTCSendPktsTest(struct ar6_softc *ar, int MapNo, HTC_ENDPOINT_ID eid, struct sk_buff *dupskb) { struct ar_cookie *cookie; struct ar_cookie *cookieArray[HTC_TEST_DUPLICATE]; struct sk_buff *new_skb; int i; int pkts = 0; struct htc_packet_queue pktQueue; EPPING_HEADER *eppingHdr; eppingHdr = A_NETBUF_DATA(dupskb); if (eppingHdr->Cmd_h == EPPING_CMD_NO_ECHO) { /* skip test if this is already a tx perf test */ return; } for (i = 0; i < HTC_TEST_DUPLICATE; i++,pkts++) { AR6000_SPIN_LOCK(&ar->arLock, 0); cookie = ar6000_alloc_cookie(ar); if (cookie != NULL) { ar->arTxPending[eid]++; ar->arTotalTxDataPending++; } AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (NULL == cookie) { break; } new_skb = A_NETBUF_ALLOC(A_NETBUF_LEN(dupskb)); if (new_skb == NULL) { AR6000_SPIN_LOCK(&ar->arLock, 0); ar6000_free_cookie(ar,cookie); AR6000_SPIN_UNLOCK(&ar->arLock, 0); break; } A_NETBUF_PUT_DATA(new_skb, A_NETBUF_DATA(dupskb), A_NETBUF_LEN(dupskb)); cookie->arc_bp[0] = (unsigned long)new_skb; cookie->arc_bp[1] = MapNo; SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, cookie, A_NETBUF_DATA(new_skb), A_NETBUF_LEN(new_skb), eid, AR6K_DATA_PKT_TAG); cookieArray[i] = cookie; { EPPING_HEADER *pHdr = (EPPING_HEADER *)A_NETBUF_DATA(new_skb); pHdr->Cmd_h = EPPING_CMD_NO_ECHO; /* do not echo the packet */ } } if (pkts == 0) { return; } INIT_HTC_PACKET_QUEUE(&pktQueue); for (i = 0; i < pkts; i++) { HTC_PACKET_ENQUEUE(&pktQueue,&cookieArray[i]->HtcPkt); } HTCSendPktsMultiple(ar->arHtcTarget, &pktQueue); } #endif #ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT /* * Add support for adding and removing a virtual adapter for soft AP. * Some OS requires different adapters names for station and soft AP mode. * To support these requirement, create and destroy a netdevice instance * when the AP mode is operational. A full fledged support for virual device * is not implemented. Rather a virtual interface is created and is linked * with the existing physical device instance during the operation of the * AP mode. */ int ar6000_start_ap_interface(struct ar6_softc *ar) { struct ar_virtual_interface *arApDev; /* Change net_device to point to AP instance */ arApDev = (struct ar_virtual_interface *)ar->arApDev; ar->arNetDev = arApDev->arNetDev; return 0; } int ar6000_stop_ap_interface(struct ar6_softc *ar) { struct ar_virtual_interface *arApDev; /* Change net_device to point to sta instance */ arApDev = (struct ar_virtual_interface *)ar->arApDev; if (arApDev) { ar->arNetDev = arApDev->arStaNetDev; } return 0; } int ar6000_create_ap_interface(struct ar6_softc *ar, char *ap_ifname) { struct net_device *dev; struct ar_virtual_interface *arApDev; dev = alloc_etherdev(sizeof(struct ar_virtual_interface)); if (dev == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: can't alloc etherdev\n")); return A_ERROR; } ether_setup(dev); init_netdev(dev, ap_ifname); dev->priv_flags &= ~IFF_TX_SKB_SHARING; if (register_netdev(dev)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: register_netdev failed\n")); return A_ERROR; } arApDev = netdev_priv(dev); arApDev->arDev = ar; arApDev->arNetDev = dev; arApDev->arStaNetDev = ar->arNetDev; ar->arApDev = arApDev; arApNetDev = dev; /* Copy the MAC address */ memcpy(dev->dev_addr, ar->arNetDev->dev_addr, AR6000_ETH_ADDR_LEN); return 0; } int ar6000_add_ap_interface(struct ar6_softc *ar, char *ap_ifname) { /* Interface already added, need not proceed further */ if (ar->arApDev != NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_add_ap_interface: interface already present \n")); return 0; } if (ar6000_create_ap_interface(ar, ap_ifname) != 0) { return A_ERROR; } A_PRINTF("Add AP interface %s \n",ap_ifname); return ar6000_start_ap_interface(ar); } int ar6000_remove_ap_interface(struct ar6_softc *ar) { if (arApNetDev) { ar6000_stop_ap_interface(ar); unregister_netdev(arApNetDev); free_netdev(apApNetDev); A_PRINTF("Remove AP interface\n"); } ar->arApDev = NULL; arApNetDev = NULL; return 0; } #endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */ #ifdef EXPORT_HCI_BRIDGE_INTERFACE EXPORT_SYMBOL(setupbtdev); #endif
./CrossVul/dataset_final_sorted/CWE-264/c/good_3524_9
crossvul-cpp_data_bad_3524_2
/* drivers/net/ifb.c: The purpose of this driver is to provide a device that allows for sharing of resources: 1) qdiscs/policies that are per device as opposed to system wide. ifb allows for a device which can be redirected to thus providing an impression of sharing. 2) Allows for queueing incoming traffic for shaping instead of dropping. The original concept is based on what is known as the IMQ driver initially written by Martin Devera, later rewritten by Patrick McHardy and then maintained by Andre Correa. You need the tc action mirror or redirect to feed this device packets. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Authors: Jamal Hadi Salim (2005) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <net/pkt_sched.h> #include <net/net_namespace.h> #define TX_Q_LIMIT 32 struct ifb_private { struct tasklet_struct ifb_tasklet; int tasklet_pending; struct u64_stats_sync rsync; struct sk_buff_head rq; u64 rx_packets; u64 rx_bytes; struct u64_stats_sync tsync; struct sk_buff_head tq; u64 tx_packets; u64 tx_bytes; }; static int numifbs = 2; static void ri_tasklet(unsigned long dev); static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev); static int ifb_open(struct net_device *dev); static int ifb_close(struct net_device *dev); static void ri_tasklet(unsigned long dev) { struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); struct netdev_queue *txq; struct sk_buff *skb; txq = netdev_get_tx_queue(_dev, 0); if ((skb = skb_peek(&dp->tq)) == NULL) { if (__netif_tx_trylock(txq)) { skb_queue_splice_tail_init(&dp->rq, &dp->tq); __netif_tx_unlock(txq); } else { /* reschedule */ goto resched; } } while ((skb = __skb_dequeue(&dp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); u64_stats_update_begin(&dp->tsync); dp->tx_packets++; dp->tx_bytes += skb->len; u64_stats_update_end(&dp->tsync); rcu_read_lock(); skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); _dev->stats.tx_dropped++; if (skb_queue_len(&dp->tq) != 0) goto resched; break; } rcu_read_unlock(); skb->skb_iif = _dev->ifindex; if (from & AT_EGRESS) { dev_queue_xmit(skb); } else if (from & AT_INGRESS) { skb_pull(skb, skb->dev->hard_header_len); netif_receive_skb(skb); } else BUG(); } if (__netif_tx_trylock(txq)) { if ((skb = skb_peek(&dp->rq)) == NULL) { dp->tasklet_pending = 0; if (netif_queue_stopped(_dev)) netif_wake_queue(_dev); } else { __netif_tx_unlock(txq); goto resched; } __netif_tx_unlock(txq); } else { resched: dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } } static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct ifb_private *dp = netdev_priv(dev); unsigned int start; do { start = u64_stats_fetch_begin_bh(&dp->rsync); stats->rx_packets = dp->rx_packets; stats->rx_bytes = dp->rx_bytes; } while (u64_stats_fetch_retry_bh(&dp->rsync, start)); do { start = u64_stats_fetch_begin_bh(&dp->tsync); stats->tx_packets = dp->tx_packets; stats->tx_bytes = dp->tx_bytes; } while (u64_stats_fetch_retry_bh(&dp->tsync, start)); stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; return stats; } static const struct net_device_ops ifb_netdev_ops = { .ndo_open = ifb_open, .ndo_stop = ifb_close, .ndo_get_stats64 = ifb_stats64, .ndo_start_xmit = ifb_xmit, .ndo_validate_addr = eth_validate_addr, }; #define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX) static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ dev->destructor = free_netdev; dev->netdev_ops = &ifb_netdev_ops; /* Fill in device structure with ethernet-generic values. */ ether_setup(dev); dev->tx_queue_len = TX_Q_LIMIT; dev->features |= IFB_FEATURES; dev->vlan_features |= IFB_FEATURES; dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; random_ether_addr(dev->dev_addr); } static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); u32 from = G_TC_FROM(skb->tc_verd); u64_stats_update_begin(&dp->rsync); dp->rx_packets++; dp->rx_bytes += skb->len; u64_stats_update_end(&dp->rsync); if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) { dev_kfree_skb(skb); dev->stats.rx_dropped++; return NETDEV_TX_OK; } if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { netif_stop_queue(dev); } __skb_queue_tail(&dp->rq, skb); if (!dp->tasklet_pending) { dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } return NETDEV_TX_OK; } static int ifb_close(struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); tasklet_kill(&dp->ifb_tasklet); netif_stop_queue(dev); __skb_queue_purge(&dp->rq); __skb_queue_purge(&dp->tq); return 0; } static int ifb_open(struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); __skb_queue_head_init(&dp->rq); __skb_queue_head_init(&dp->tq); netif_start_queue(dev); return 0; } static int ifb_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } return 0; } static struct rtnl_link_ops ifb_link_ops __read_mostly = { .kind = "ifb", .priv_size = sizeof(struct ifb_private), .setup = ifb_setup, .validate = ifb_validate, }; /* Number of ifb devices to be set up by this module. */ module_param(numifbs, int, 0); MODULE_PARM_DESC(numifbs, "Number of ifb devices"); static int __init ifb_init_one(int index) { struct net_device *dev_ifb; int err; dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d", ifb_setup); if (!dev_ifb) return -ENOMEM; dev_ifb->rtnl_link_ops = &ifb_link_ops; err = register_netdevice(dev_ifb); if (err < 0) goto err; return 0; err: free_netdev(dev_ifb); return err; } static int __init ifb_init_module(void) { int i, err; rtnl_lock(); err = __rtnl_link_register(&ifb_link_ops); for (i = 0; i < numifbs && !err; i++) err = ifb_init_one(i); if (err) __rtnl_link_unregister(&ifb_link_ops); rtnl_unlock(); return err; } static void __exit ifb_cleanup_module(void) { rtnl_link_unregister(&ifb_link_ops); } module_init(ifb_init_module); module_exit(ifb_cleanup_module); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamal Hadi Salim"); MODULE_ALIAS_RTNL_LINK("ifb");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3524_2
crossvul-cpp_data_bad_5861_48
/* * Scatterlist Cryptographic API. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2002 David S. Miller (davem@redhat.com) * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> * * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> * and Nettle, by Niels Möller. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/err.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/module.h> #include <linux/param.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/string.h> #include "internal.h" LIST_HEAD(crypto_alg_list); EXPORT_SYMBOL_GPL(crypto_alg_list); DECLARE_RWSEM(crypto_alg_sem); EXPORT_SYMBOL_GPL(crypto_alg_sem); BLOCKING_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) { return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; } EXPORT_SYMBOL_GPL(crypto_mod_get); void crypto_mod_put(struct crypto_alg *alg) { struct module *module = alg->cra_module; crypto_alg_put(alg); module_put(module); } EXPORT_SYMBOL_GPL(crypto_mod_put); static inline int crypto_is_test_larval(struct crypto_larval *larval) { return larval->alg.cra_driver_name[0]; } static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *q, *alg = NULL; int best = -2; list_for_each_entry(q, &crypto_alg_list, cra_list) { int exact, fuzzy; if (crypto_is_moribund(q)) continue; if ((q->cra_flags ^ type) & mask) continue; if (crypto_is_larval(q) && !crypto_is_test_larval((struct crypto_larval *)q) && ((struct crypto_larval *)q)->mask != mask) continue; exact = !strcmp(q->cra_driver_name, name); fuzzy = !strcmp(q->cra_name, name); if (!exact && !(fuzzy && q->cra_priority > best)) continue; if (unlikely(!crypto_mod_get(q))) continue; best = q->cra_priority; if (alg) crypto_mod_put(alg); alg = q; if (exact) break; } return alg; } static void crypto_larval_destroy(struct crypto_alg *alg) { struct crypto_larval *larval = (void *)alg; BUG_ON(!crypto_is_larval(alg)); if (larval->adult) crypto_mod_put(larval->adult); kfree(larval); } struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) { struct crypto_larval *larval; larval = kzalloc(sizeof(*larval), GFP_KERNEL); if (!larval) return ERR_PTR(-ENOMEM); larval->mask = mask; larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; larval->alg.cra_priority = -1; larval->alg.cra_destroy = crypto_larval_destroy; strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); init_completion(&larval->completion); return larval; } EXPORT_SYMBOL_GPL(crypto_larval_alloc); static struct crypto_alg *crypto_larval_add(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; struct crypto_larval *larval; larval = crypto_larval_alloc(name, type, mask); if (IS_ERR(larval)) return ERR_CAST(larval); atomic_set(&larval->alg.cra_refcnt, 2); down_write(&crypto_alg_sem); alg = __crypto_alg_lookup(name, type, mask); if (!alg) { alg = &larval->alg; list_add(&alg->cra_list, &crypto_alg_list); } up_write(&crypto_alg_sem); if (alg != &larval->alg) { kfree(larval); if (crypto_is_larval(alg)) alg = crypto_larval_wait(alg); } return alg; } void crypto_larval_kill(struct crypto_alg *alg) { struct crypto_larval *larval = (void *)alg; down_write(&crypto_alg_sem); list_del(&alg->cra_list); up_write(&crypto_alg_sem); complete_all(&larval->completion); crypto_alg_put(alg); } EXPORT_SYMBOL_GPL(crypto_larval_kill); static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) { struct crypto_larval *larval = (void *)alg; long timeout; timeout = wait_for_completion_interruptible_timeout( &larval->completion, 60 * HZ); alg = larval->adult; if (timeout < 0) alg = ERR_PTR(-EINTR); else if (!timeout) alg = ERR_PTR(-ETIMEDOUT); else if (!alg) alg = ERR_PTR(-ENOENT); else if (crypto_is_test_larval(larval) && !(alg->cra_flags & CRYPTO_ALG_TESTED)) alg = ERR_PTR(-EAGAIN); else if (!crypto_mod_get(alg)) alg = ERR_PTR(-EAGAIN); crypto_mod_put(&larval->alg); return alg; } struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; down_read(&crypto_alg_sem); alg = __crypto_alg_lookup(name, type, mask); up_read(&crypto_alg_sem); return alg; } EXPORT_SYMBOL_GPL(crypto_alg_lookup); struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; if (!name) return ERR_PTR(-ENOENT); mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); type &= mask; alg = crypto_alg_lookup(name, type, mask); if (!alg) { request_module("%s", name); if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & CRYPTO_ALG_NEED_FALLBACK)) request_module("%s-all", name); alg = crypto_alg_lookup(name, type, mask); } if (alg) return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; return crypto_larval_add(name, type, mask); } EXPORT_SYMBOL_GPL(crypto_larval_lookup); int crypto_probing_notify(unsigned long val, void *v) { int ok; ok = blocking_notifier_call_chain(&crypto_chain, val, v); if (ok == NOTIFY_DONE) { request_module("cryptomgr"); ok = blocking_notifier_call_chain(&crypto_chain, val, v); } return ok; } EXPORT_SYMBOL_GPL(crypto_probing_notify); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; struct crypto_alg *larval; int ok; if (!((type | mask) & CRYPTO_ALG_TESTED)) { type |= CRYPTO_ALG_TESTED; mask |= CRYPTO_ALG_TESTED; } larval = crypto_larval_lookup(name, type, mask); if (IS_ERR(larval) || !crypto_is_larval(larval)) return larval; ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); if (ok == NOTIFY_STOP) alg = crypto_larval_wait(larval); else { crypto_mod_put(larval); alg = ERR_PTR(-ENOENT); } crypto_larval_kill(larval); return alg; } EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { const struct crypto_type *type_obj = tfm->__crt_alg->cra_type; if (type_obj) return type_obj->init(tfm, type, mask); switch (crypto_tfm_alg_type(tfm)) { case CRYPTO_ALG_TYPE_CIPHER: return crypto_init_cipher_ops(tfm); case CRYPTO_ALG_TYPE_COMPRESS: return crypto_init_compress_ops(tfm); default: break; } BUG(); return -EINVAL; } static void crypto_exit_ops(struct crypto_tfm *tfm) { const struct crypto_type *type = tfm->__crt_alg->cra_type; if (type) { if (tfm->exit) tfm->exit(tfm); return; } switch (crypto_tfm_alg_type(tfm)) { case CRYPTO_ALG_TYPE_CIPHER: crypto_exit_cipher_ops(tfm); break; case CRYPTO_ALG_TYPE_COMPRESS: crypto_exit_compress_ops(tfm); break; default: BUG(); } } static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { const struct crypto_type *type_obj = alg->cra_type; unsigned int len; len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1); if (type_obj) return len + type_obj->ctxsize(alg, type, mask); switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { default: BUG(); case CRYPTO_ALG_TYPE_CIPHER: len += crypto_cipher_ctxsize(alg); break; case CRYPTO_ALG_TYPE_COMPRESS: len += crypto_compress_ctxsize(alg); break; } return len; } void crypto_shoot_alg(struct crypto_alg *alg) { down_write(&crypto_alg_sem); alg->cra_flags |= CRYPTO_ALG_DYING; up_write(&crypto_alg_sem); } EXPORT_SYMBOL_GPL(crypto_shoot_alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask) { struct crypto_tfm *tfm = NULL; unsigned int tfm_size; int err = -ENOMEM; tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask); tfm = kzalloc(tfm_size, GFP_KERNEL); if (tfm == NULL) goto out_err; tfm->__crt_alg = alg; err = crypto_init_ops(tfm, type, mask); if (err) goto out_free_tfm; if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) goto cra_init_failed; goto out; cra_init_failed: crypto_exit_ops(tfm); out_free_tfm: if (err == -EAGAIN) crypto_shoot_alg(alg); kfree(tfm); out_err: tfm = ERR_PTR(err); out: return tfm; } EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); /* * crypto_alloc_base - Locate algorithm and allocate transform * @alg_name: Name of algorithm * @type: Type of algorithm * @mask: Mask for type comparison * * This function should not be used by new algorithm types. * Please use crypto_alloc_tfm instead. * * crypto_alloc_base() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable * modules, it will then attempt to load a module of the same name or * alias. If that fails it will send a query to any loaded crypto manager * to construct an algorithm on the fly. A refcount is grabbed on the * algorithm which is then associated with the new transform. * * The returned transform is of a non-determinate type. Most people * should use one of the more specific allocation functions such as * crypto_alloc_blkcipher. * * In case of error the return value is an error pointer. */ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask) { struct crypto_tfm *tfm; int err; for (;;) { struct crypto_alg *alg; alg = crypto_alg_mod_lookup(alg_name, type, mask); if (IS_ERR(alg)) { err = PTR_ERR(alg); goto err; } tfm = __crypto_alloc_tfm(alg, type, mask); if (!IS_ERR(tfm)) return tfm; crypto_mod_put(alg); err = PTR_ERR(tfm); err: if (err != -EAGAIN) break; if (signal_pending(current)) { err = -EINTR; break; } } return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_alloc_base); void *crypto_create_tfm(struct crypto_alg *alg, const struct crypto_type *frontend) { char *mem; struct crypto_tfm *tfm = NULL; unsigned int tfmsize; unsigned int total; int err = -ENOMEM; tfmsize = frontend->tfmsize; total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); mem = kzalloc(total, GFP_KERNEL); if (mem == NULL) goto out_err; tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; err = frontend->init_tfm(tfm); if (err) goto out_free_tfm; if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) goto cra_init_failed; goto out; cra_init_failed: crypto_exit_ops(tfm); out_free_tfm: if (err == -EAGAIN) crypto_shoot_alg(alg); kfree(mem); out_err: mem = ERR_PTR(err); out: return mem; } EXPORT_SYMBOL_GPL(crypto_create_tfm); struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask) { struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) = crypto_alg_mod_lookup; if (frontend) { type &= frontend->maskclear; mask &= frontend->maskclear; type |= frontend->type; mask |= frontend->maskset; if (frontend->lookup) lookup = frontend->lookup; } return lookup(alg_name, type, mask); } EXPORT_SYMBOL_GPL(crypto_find_alg); /* * crypto_alloc_tfm - Locate algorithm and allocate transform * @alg_name: Name of algorithm * @frontend: Frontend algorithm type * @type: Type of algorithm * @mask: Mask for type comparison * * crypto_alloc_tfm() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable * modules, it will then attempt to load a module of the same name or * alias. If that fails it will send a query to any loaded crypto manager * to construct an algorithm on the fly. A refcount is grabbed on the * algorithm which is then associated with the new transform. * * The returned transform is of a non-determinate type. Most people * should use one of the more specific allocation functions such as * crypto_alloc_blkcipher. * * In case of error the return value is an error pointer. */ void *crypto_alloc_tfm(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask) { void *tfm; int err; for (;;) { struct crypto_alg *alg; alg = crypto_find_alg(alg_name, frontend, type, mask); if (IS_ERR(alg)) { err = PTR_ERR(alg); goto err; } tfm = crypto_create_tfm(alg, frontend); if (!IS_ERR(tfm)) return tfm; crypto_mod_put(alg); err = PTR_ERR(tfm); err: if (err != -EAGAIN) break; if (signal_pending(current)) { err = -EINTR; break; } } return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_alloc_tfm); /* * crypto_destroy_tfm - Free crypto transform * @mem: Start of tfm slab * @tfm: Transform to free * * This function frees up the transform and any associated resources, * then drops the refcount on the associated algorithm. */ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) { struct crypto_alg *alg; if (unlikely(!mem)) return; alg = tfm->__crt_alg; if (!tfm->exit && alg->cra_exit) alg->cra_exit(tfm); crypto_exit_ops(tfm); crypto_mod_put(alg); kzfree(mem); } EXPORT_SYMBOL_GPL(crypto_destroy_tfm); int crypto_has_alg(const char *name, u32 type, u32 mask) { int ret = 0; struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); if (!IS_ERR(alg)) { crypto_mod_put(alg); ret = 1; } return ret; } EXPORT_SYMBOL_GPL(crypto_has_alg); MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5861_48
crossvul-cpp_data_good_2190_0
/* * linux/fs/attr.c * * Copyright (C) 1991, 1992 Linus Torvalds * changes by Thomas Schoebel-Theuer */ #include <linux/export.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/capability.h> #include <linux/fsnotify.h> #include <linux/fcntl.h> #include <linux/security.h> #include <linux/evm.h> #include <linux/ima.h> /** * inode_change_ok - check if attribute changes to an inode are allowed * @inode: inode to check * @attr: attributes to change * * Check if we are allowed to change the attributes contained in @attr * in the given inode. This includes the normal unix access permission * checks, as well as checks for rlimits and others. * * Should be called as the first thing in ->setattr implementations, * possibly after taking additional locks. */ int inode_change_ok(const struct inode *inode, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; /* * First check size constraints. These can't be overriden using * ATTR_FORCE. */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) return 0; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (!uid_eq(current_fsuid(), inode->i_uid) || !uid_eq(attr->ia_uid, inode->i_uid)) && !capable_wrt_inode_uidgid(inode, CAP_CHOWN)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (!uid_eq(current_fsuid(), inode->i_uid) || (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) && !capable_wrt_inode_uidgid(inode, CAP_CHOWN)) return -EPERM; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if (!inode_owner_or_capable(inode)) return -EPERM; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !capable_wrt_inode_uidgid(inode, CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!inode_owner_or_capable(inode)) return -EPERM; } return 0; } EXPORT_SYMBOL(inode_change_ok); /** * inode_newsize_ok - may this inode be truncated to a given size * @inode: the inode to be truncated * @offset: the new size to assign to the inode * @Returns: 0 on success, -ve errno on failure * * inode_newsize_ok must be called with i_mutex held. * * inode_newsize_ok will check filesystem limits and ulimits to check that the * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ * when necessary. Caller must not proceed with inode size change if failure is * returned. @inode must be a file (not directory), with appropriate * permissions to allow truncate (inode_newsize_ok does NOT check these * conditions). */ int inode_newsize_ok(const struct inode *inode, loff_t offset) { if (inode->i_size < offset) { unsigned long limit; limit = rlimit(RLIMIT_FSIZE); if (limit != RLIM_INFINITY && offset > limit) goto out_sig; if (offset > inode->i_sb->s_maxbytes) goto out_big; } else { /* * truncation of in-use swapfiles is disallowed - it would * cause subsequent swapout to scribble on the now-freed * blocks. */ if (IS_SWAPFILE(inode)) return -ETXTBSY; } return 0; out_sig: send_sig(SIGXFSZ, current, 0); out_big: return -EFBIG; } EXPORT_SYMBOL(inode_newsize_ok); /** * setattr_copy - copy simple metadata updates into the generic inode * @inode: the inode to be updated * @attr: the new attributes * * setattr_copy must be called with i_mutex held. * * setattr_copy updates the inode's metadata with that specified * in attr. Noticeably missing is inode size update, which is more complex * as it requires pagecache updates. * * The inode is not marked as dirty after this operation. The rationale is * that for "simple" filesystems, the struct inode is the inode storage. * The caller is free to mark the inode dirty afterwards if needed. */ void setattr_copy(struct inode *inode, const struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) inode->i_mtime = timespec_trunc(attr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) inode->i_ctime = timespec_trunc(attr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable_wrt_inode_uidgid(inode, CAP_FSETID)) mode &= ~S_ISGID; inode->i_mode = mode; } } EXPORT_SYMBOL(setattr_copy); /** * notify_change - modify attributes of a filesytem object * @dentry: object affected * @iattr: new attributes * @delegated_inode: returns inode, if the inode is delegated * * The caller must hold the i_mutex on the affected object. * * If notify_change discovers a delegation in need of breaking, * it will return -EWOULDBLOCK and return a reference to the inode in * delegated_inode. The caller should then break the delegation and * retry. Because breaking a delegation may take a long time, the * caller should drop the i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. Also, passing NULL is fine for callers holding * the file open for write, as there can be no conflicting delegation in * that case. */ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; umode_t mode = inode->i_mode; int error; struct timespec now; unsigned int ia_valid = attr->ia_valid; WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex)); if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) { if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; } if ((ia_valid & ATTR_MODE)) { umode_t amode = attr->ia_mode; /* Flag setting protected by i_mutex */ if (is_sxid(amode)) inode->i_flags &= ~S_NOSEC; } now = current_fs_time(inode->i_sb); attr->ia_ctime = now; if (!(ia_valid & ATTR_ATIME_SET)) attr->ia_atime = now; if (!(ia_valid & ATTR_MTIME_SET)) attr->ia_mtime = now; if (ia_valid & ATTR_KILL_PRIV) { attr->ia_valid &= ~ATTR_KILL_PRIV; ia_valid &= ~ATTR_KILL_PRIV; error = security_inode_need_killpriv(dentry); if (error > 0) error = security_inode_killpriv(dentry); if (error) return error; } /* * We now pass ATTR_KILL_S*ID to the lower level setattr function so * that the function has the ability to reinterpret a mode change * that's due to these bits. This adds an implicit restriction that * no function will ever call notify_change with both ATTR_MODE and * ATTR_KILL_S*ID set. */ if ((ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) && (ia_valid & ATTR_MODE)) BUG(); if (ia_valid & ATTR_KILL_SUID) { if (mode & S_ISUID) { ia_valid = attr->ia_valid |= ATTR_MODE; attr->ia_mode = (inode->i_mode & ~S_ISUID); } } if (ia_valid & ATTR_KILL_SGID) { if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { if (!(ia_valid & ATTR_MODE)) { ia_valid = attr->ia_valid |= ATTR_MODE; attr->ia_mode = inode->i_mode; } attr->ia_mode &= ~S_ISGID; } } if (!(attr->ia_valid & ~(ATTR_KILL_SUID | ATTR_KILL_SGID))) return 0; error = security_inode_setattr(dentry, attr); if (error) return error; error = try_break_deleg(inode, delegated_inode); if (error) return error; if (inode->i_op->setattr) error = inode->i_op->setattr(dentry, attr); else error = simple_setattr(dentry, attr); if (!error) { fsnotify_change(dentry, ia_valid); ima_inode_post_setattr(dentry); evm_inode_post_setattr(dentry, ia_valid); } return error; } EXPORT_SYMBOL(notify_change);
./CrossVul/dataset_final_sorted/CWE-264/c/good_2190_0
crossvul-cpp_data_good_5861_48
/* * Scatterlist Cryptographic API. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2002 David S. Miller (davem@redhat.com) * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> * * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> * and Nettle, by Niels Möller. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/err.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/module.h> #include <linux/param.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/string.h> #include "internal.h" LIST_HEAD(crypto_alg_list); EXPORT_SYMBOL_GPL(crypto_alg_list); DECLARE_RWSEM(crypto_alg_sem); EXPORT_SYMBOL_GPL(crypto_alg_sem); BLOCKING_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) { return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; } EXPORT_SYMBOL_GPL(crypto_mod_get); void crypto_mod_put(struct crypto_alg *alg) { struct module *module = alg->cra_module; crypto_alg_put(alg); module_put(module); } EXPORT_SYMBOL_GPL(crypto_mod_put); static inline int crypto_is_test_larval(struct crypto_larval *larval) { return larval->alg.cra_driver_name[0]; } static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *q, *alg = NULL; int best = -2; list_for_each_entry(q, &crypto_alg_list, cra_list) { int exact, fuzzy; if (crypto_is_moribund(q)) continue; if ((q->cra_flags ^ type) & mask) continue; if (crypto_is_larval(q) && !crypto_is_test_larval((struct crypto_larval *)q) && ((struct crypto_larval *)q)->mask != mask) continue; exact = !strcmp(q->cra_driver_name, name); fuzzy = !strcmp(q->cra_name, name); if (!exact && !(fuzzy && q->cra_priority > best)) continue; if (unlikely(!crypto_mod_get(q))) continue; best = q->cra_priority; if (alg) crypto_mod_put(alg); alg = q; if (exact) break; } return alg; } static void crypto_larval_destroy(struct crypto_alg *alg) { struct crypto_larval *larval = (void *)alg; BUG_ON(!crypto_is_larval(alg)); if (larval->adult) crypto_mod_put(larval->adult); kfree(larval); } struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) { struct crypto_larval *larval; larval = kzalloc(sizeof(*larval), GFP_KERNEL); if (!larval) return ERR_PTR(-ENOMEM); larval->mask = mask; larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; larval->alg.cra_priority = -1; larval->alg.cra_destroy = crypto_larval_destroy; strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); init_completion(&larval->completion); return larval; } EXPORT_SYMBOL_GPL(crypto_larval_alloc); static struct crypto_alg *crypto_larval_add(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; struct crypto_larval *larval; larval = crypto_larval_alloc(name, type, mask); if (IS_ERR(larval)) return ERR_CAST(larval); atomic_set(&larval->alg.cra_refcnt, 2); down_write(&crypto_alg_sem); alg = __crypto_alg_lookup(name, type, mask); if (!alg) { alg = &larval->alg; list_add(&alg->cra_list, &crypto_alg_list); } up_write(&crypto_alg_sem); if (alg != &larval->alg) { kfree(larval); if (crypto_is_larval(alg)) alg = crypto_larval_wait(alg); } return alg; } void crypto_larval_kill(struct crypto_alg *alg) { struct crypto_larval *larval = (void *)alg; down_write(&crypto_alg_sem); list_del(&alg->cra_list); up_write(&crypto_alg_sem); complete_all(&larval->completion); crypto_alg_put(alg); } EXPORT_SYMBOL_GPL(crypto_larval_kill); static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) { struct crypto_larval *larval = (void *)alg; long timeout; timeout = wait_for_completion_interruptible_timeout( &larval->completion, 60 * HZ); alg = larval->adult; if (timeout < 0) alg = ERR_PTR(-EINTR); else if (!timeout) alg = ERR_PTR(-ETIMEDOUT); else if (!alg) alg = ERR_PTR(-ENOENT); else if (crypto_is_test_larval(larval) && !(alg->cra_flags & CRYPTO_ALG_TESTED)) alg = ERR_PTR(-EAGAIN); else if (!crypto_mod_get(alg)) alg = ERR_PTR(-EAGAIN); crypto_mod_put(&larval->alg); return alg; } struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; down_read(&crypto_alg_sem); alg = __crypto_alg_lookup(name, type, mask); up_read(&crypto_alg_sem); return alg; } EXPORT_SYMBOL_GPL(crypto_alg_lookup); struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; if (!name) return ERR_PTR(-ENOENT); mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); type &= mask; alg = crypto_alg_lookup(name, type, mask); if (!alg) { request_module("crypto-%s", name); if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & CRYPTO_ALG_NEED_FALLBACK)) request_module("crypto-%s-all", name); alg = crypto_alg_lookup(name, type, mask); } if (alg) return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; return crypto_larval_add(name, type, mask); } EXPORT_SYMBOL_GPL(crypto_larval_lookup); int crypto_probing_notify(unsigned long val, void *v) { int ok; ok = blocking_notifier_call_chain(&crypto_chain, val, v); if (ok == NOTIFY_DONE) { request_module("cryptomgr"); ok = blocking_notifier_call_chain(&crypto_chain, val, v); } return ok; } EXPORT_SYMBOL_GPL(crypto_probing_notify); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; struct crypto_alg *larval; int ok; if (!((type | mask) & CRYPTO_ALG_TESTED)) { type |= CRYPTO_ALG_TESTED; mask |= CRYPTO_ALG_TESTED; } larval = crypto_larval_lookup(name, type, mask); if (IS_ERR(larval) || !crypto_is_larval(larval)) return larval; ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); if (ok == NOTIFY_STOP) alg = crypto_larval_wait(larval); else { crypto_mod_put(larval); alg = ERR_PTR(-ENOENT); } crypto_larval_kill(larval); return alg; } EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { const struct crypto_type *type_obj = tfm->__crt_alg->cra_type; if (type_obj) return type_obj->init(tfm, type, mask); switch (crypto_tfm_alg_type(tfm)) { case CRYPTO_ALG_TYPE_CIPHER: return crypto_init_cipher_ops(tfm); case CRYPTO_ALG_TYPE_COMPRESS: return crypto_init_compress_ops(tfm); default: break; } BUG(); return -EINVAL; } static void crypto_exit_ops(struct crypto_tfm *tfm) { const struct crypto_type *type = tfm->__crt_alg->cra_type; if (type) { if (tfm->exit) tfm->exit(tfm); return; } switch (crypto_tfm_alg_type(tfm)) { case CRYPTO_ALG_TYPE_CIPHER: crypto_exit_cipher_ops(tfm); break; case CRYPTO_ALG_TYPE_COMPRESS: crypto_exit_compress_ops(tfm); break; default: BUG(); } } static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { const struct crypto_type *type_obj = alg->cra_type; unsigned int len; len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1); if (type_obj) return len + type_obj->ctxsize(alg, type, mask); switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { default: BUG(); case CRYPTO_ALG_TYPE_CIPHER: len += crypto_cipher_ctxsize(alg); break; case CRYPTO_ALG_TYPE_COMPRESS: len += crypto_compress_ctxsize(alg); break; } return len; } void crypto_shoot_alg(struct crypto_alg *alg) { down_write(&crypto_alg_sem); alg->cra_flags |= CRYPTO_ALG_DYING; up_write(&crypto_alg_sem); } EXPORT_SYMBOL_GPL(crypto_shoot_alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask) { struct crypto_tfm *tfm = NULL; unsigned int tfm_size; int err = -ENOMEM; tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask); tfm = kzalloc(tfm_size, GFP_KERNEL); if (tfm == NULL) goto out_err; tfm->__crt_alg = alg; err = crypto_init_ops(tfm, type, mask); if (err) goto out_free_tfm; if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) goto cra_init_failed; goto out; cra_init_failed: crypto_exit_ops(tfm); out_free_tfm: if (err == -EAGAIN) crypto_shoot_alg(alg); kfree(tfm); out_err: tfm = ERR_PTR(err); out: return tfm; } EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); /* * crypto_alloc_base - Locate algorithm and allocate transform * @alg_name: Name of algorithm * @type: Type of algorithm * @mask: Mask for type comparison * * This function should not be used by new algorithm types. * Please use crypto_alloc_tfm instead. * * crypto_alloc_base() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable * modules, it will then attempt to load a module of the same name or * alias. If that fails it will send a query to any loaded crypto manager * to construct an algorithm on the fly. A refcount is grabbed on the * algorithm which is then associated with the new transform. * * The returned transform is of a non-determinate type. Most people * should use one of the more specific allocation functions such as * crypto_alloc_blkcipher. * * In case of error the return value is an error pointer. */ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask) { struct crypto_tfm *tfm; int err; for (;;) { struct crypto_alg *alg; alg = crypto_alg_mod_lookup(alg_name, type, mask); if (IS_ERR(alg)) { err = PTR_ERR(alg); goto err; } tfm = __crypto_alloc_tfm(alg, type, mask); if (!IS_ERR(tfm)) return tfm; crypto_mod_put(alg); err = PTR_ERR(tfm); err: if (err != -EAGAIN) break; if (signal_pending(current)) { err = -EINTR; break; } } return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_alloc_base); void *crypto_create_tfm(struct crypto_alg *alg, const struct crypto_type *frontend) { char *mem; struct crypto_tfm *tfm = NULL; unsigned int tfmsize; unsigned int total; int err = -ENOMEM; tfmsize = frontend->tfmsize; total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); mem = kzalloc(total, GFP_KERNEL); if (mem == NULL) goto out_err; tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; err = frontend->init_tfm(tfm); if (err) goto out_free_tfm; if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) goto cra_init_failed; goto out; cra_init_failed: crypto_exit_ops(tfm); out_free_tfm: if (err == -EAGAIN) crypto_shoot_alg(alg); kfree(mem); out_err: mem = ERR_PTR(err); out: return mem; } EXPORT_SYMBOL_GPL(crypto_create_tfm); struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask) { struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) = crypto_alg_mod_lookup; if (frontend) { type &= frontend->maskclear; mask &= frontend->maskclear; type |= frontend->type; mask |= frontend->maskset; if (frontend->lookup) lookup = frontend->lookup; } return lookup(alg_name, type, mask); } EXPORT_SYMBOL_GPL(crypto_find_alg); /* * crypto_alloc_tfm - Locate algorithm and allocate transform * @alg_name: Name of algorithm * @frontend: Frontend algorithm type * @type: Type of algorithm * @mask: Mask for type comparison * * crypto_alloc_tfm() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable * modules, it will then attempt to load a module of the same name or * alias. If that fails it will send a query to any loaded crypto manager * to construct an algorithm on the fly. A refcount is grabbed on the * algorithm which is then associated with the new transform. * * The returned transform is of a non-determinate type. Most people * should use one of the more specific allocation functions such as * crypto_alloc_blkcipher. * * In case of error the return value is an error pointer. */ void *crypto_alloc_tfm(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask) { void *tfm; int err; for (;;) { struct crypto_alg *alg; alg = crypto_find_alg(alg_name, frontend, type, mask); if (IS_ERR(alg)) { err = PTR_ERR(alg); goto err; } tfm = crypto_create_tfm(alg, frontend); if (!IS_ERR(tfm)) return tfm; crypto_mod_put(alg); err = PTR_ERR(tfm); err: if (err != -EAGAIN) break; if (signal_pending(current)) { err = -EINTR; break; } } return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_alloc_tfm); /* * crypto_destroy_tfm - Free crypto transform * @mem: Start of tfm slab * @tfm: Transform to free * * This function frees up the transform and any associated resources, * then drops the refcount on the associated algorithm. */ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) { struct crypto_alg *alg; if (unlikely(!mem)) return; alg = tfm->__crt_alg; if (!tfm->exit && alg->cra_exit) alg->cra_exit(tfm); crypto_exit_ops(tfm); crypto_mod_put(alg); kzfree(mem); } EXPORT_SYMBOL_GPL(crypto_destroy_tfm); int crypto_has_alg(const char *name, u32 type, u32 mask) { int ret = 0; struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); if (!IS_ERR(alg)) { crypto_mod_put(alg); ret = 1; } return ret; } EXPORT_SYMBOL_GPL(crypto_has_alg); MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_48
crossvul-cpp_data_bad_3438_4
/* * IPv6 over IPv4 tunnel device - Simple Internet Transition (SIT) * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * Roger Venning <r.venning@telstra.com>: 6to4 support * Nate Thompson <nate@thebog.net>: 6to4 support * Fred Templin <fred.l.templin@boeing.com>: isatap support */ #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/icmp.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/init.h> #include <linux/netfilter_ipv4.h> #include <linux/if_ether.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/ip6_fib.h> #include <net/ip6_route.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/ip.h> #include <net/udp.h> #include <net/icmp.h> #include <net/ipip.h> #include <net/inet_ecn.h> #include <net/xfrm.h> #include <net/dsfield.h> #include <net/net_namespace.h> #include <net/netns/generic.h> /* This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c For comments look at net/ipv4/ip_gre.c --ANK */ #define HASH_SIZE 16 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) static int ipip6_tunnel_init(struct net_device *dev); static void ipip6_tunnel_setup(struct net_device *dev); static void ipip6_dev_free(struct net_device *dev); static int sit_net_id __read_mostly; struct sit_net { struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE]; struct ip_tunnel __rcu *tunnels_r[HASH_SIZE]; struct ip_tunnel __rcu *tunnels_l[HASH_SIZE]; struct ip_tunnel __rcu *tunnels_wc[1]; struct ip_tunnel __rcu **tunnels[4]; struct net_device *fb_tunnel_dev; }; /* * Locking : hash tables are protected by RCU and RTNL */ #define for_each_ip_tunnel_rcu(start) \ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) /* often modified stats are per cpu, other are shared (netdev->stats) */ struct pcpu_tstats { unsigned long rx_packets; unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; }; static struct net_device_stats *ipip6_get_stats(struct net_device *dev) { struct pcpu_tstats sum = { 0 }; int i; for_each_possible_cpu(i) { const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); sum.rx_packets += tstats->rx_packets; sum.rx_bytes += tstats->rx_bytes; sum.tx_packets += tstats->tx_packets; sum.tx_bytes += tstats->tx_bytes; } dev->stats.rx_packets = sum.rx_packets; dev->stats.rx_bytes = sum.rx_bytes; dev->stats.tx_packets = sum.tx_packets; dev->stats.tx_bytes = sum.tx_bytes; return &dev->stats; } /* * Must be invoked with rcu_read_lock */ static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, struct net_device *dev, __be32 remote, __be32 local) { unsigned int h0 = HASH(remote); unsigned int h1 = HASH(local); struct ip_tunnel *t; struct sit_net *sitn = net_generic(net, sit_net_id); for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) { if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr && (!dev || !t->parms.link || dev->iflink == t->parms.link) && (t->dev->flags & IFF_UP)) return t; } for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) { if (remote == t->parms.iph.daddr && (!dev || !t->parms.link || dev->iflink == t->parms.link) && (t->dev->flags & IFF_UP)) return t; } for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) { if (local == t->parms.iph.saddr && (!dev || !t->parms.link || dev->iflink == t->parms.link) && (t->dev->flags & IFF_UP)) return t; } t = rcu_dereference(sitn->tunnels_wc[0]); if ((t != NULL) && (t->dev->flags & IFF_UP)) return t; return NULL; } static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn, struct ip_tunnel_parm *parms) { __be32 remote = parms->iph.daddr; __be32 local = parms->iph.saddr; unsigned int h = 0; int prio = 0; if (remote) { prio |= 2; h ^= HASH(remote); } if (local) { prio |= 1; h ^= HASH(local); } return &sitn->tunnels[prio][h]; } static inline struct ip_tunnel __rcu **ipip6_bucket(struct sit_net *sitn, struct ip_tunnel *t) { return __ipip6_bucket(sitn, &t->parms); } static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t) { struct ip_tunnel __rcu **tp; struct ip_tunnel *iter; for (tp = ipip6_bucket(sitn, t); (iter = rtnl_dereference(*tp)) != NULL; tp = &iter->next) { if (t == iter) { rcu_assign_pointer(*tp, t->next); break; } } } static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t) { struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t); rcu_assign_pointer(t->next, rtnl_dereference(*tp)); rcu_assign_pointer(*tp, t); } static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) { #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel *t = netdev_priv(dev); if (t->dev == sitn->fb_tunnel_dev) { ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); t->ip6rd.relay_prefix = 0; t->ip6rd.prefixlen = 16; t->ip6rd.relay_prefixlen = 0; } else { struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev); memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd)); } #endif } static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, struct ip_tunnel_parm *parms, int create) { __be32 remote = parms->iph.daddr; __be32 local = parms->iph.saddr; struct ip_tunnel *t, *nt; struct ip_tunnel __rcu **tp; struct net_device *dev; char name[IFNAMSIZ]; struct sit_net *sitn = net_generic(net, sit_net_id); for (tp = __ipip6_bucket(sitn, parms); (t = rtnl_dereference(*tp)) != NULL; tp = &t->next) { if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr && parms->link == t->parms.link) { if (create) return NULL; else return t; } } if (!create) goto failed; if (parms->name[0]) strlcpy(name, parms->name, IFNAMSIZ); else strcpy(name, "sit%d"); dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup); if (dev == NULL) return NULL; dev_net_set(dev, net); if (strchr(name, '%')) { if (dev_alloc_name(dev, name) < 0) goto failed_free; } nt = netdev_priv(dev); nt->parms = *parms; if (ipip6_tunnel_init(dev) < 0) goto failed_free; ipip6_tunnel_clone_6rd(dev, sitn); if (parms->i_flags & SIT_ISATAP) dev->priv_flags |= IFF_ISATAP; if (register_netdevice(dev) < 0) goto failed_free; dev_hold(dev); ipip6_tunnel_link(sitn, nt); return nt; failed_free: ipip6_dev_free(dev); failed: return NULL; } #define for_each_prl_rcu(start) \ for (prl = rcu_dereference(start); \ prl; \ prl = rcu_dereference(prl->next)) static struct ip_tunnel_prl_entry * __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) { struct ip_tunnel_prl_entry *prl; for_each_prl_rcu(t->prl) if (prl->addr == addr) break; return prl; } static int ipip6_tunnel_get_prl(struct ip_tunnel *t, struct ip_tunnel_prl __user *a) { struct ip_tunnel_prl kprl, *kp; struct ip_tunnel_prl_entry *prl; unsigned int cmax, c = 0, ca, len; int ret = 0; if (copy_from_user(&kprl, a, sizeof(kprl))) return -EFAULT; cmax = kprl.datalen / sizeof(kprl); if (cmax > 1 && kprl.addr != htonl(INADDR_ANY)) cmax = 1; /* For simple GET or for root users, * we try harder to allocate. */ kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ? kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : NULL; rcu_read_lock(); ca = t->prl_count < cmax ? t->prl_count : cmax; if (!kp) { /* We don't try hard to allocate much memory for * non-root users. * For root users, retry allocating enough memory for * the answer. */ kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC); if (!kp) { ret = -ENOMEM; goto out; } } c = 0; for_each_prl_rcu(t->prl) { if (c >= cmax) break; if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) continue; kp[c].addr = prl->addr; kp[c].flags = prl->flags; c++; if (kprl.addr != htonl(INADDR_ANY)) break; } out: rcu_read_unlock(); len = sizeof(*kp) * c; ret = 0; if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen)) ret = -EFAULT; kfree(kp); return ret; } static int ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) { struct ip_tunnel_prl_entry *p; int err = 0; if (a->addr == htonl(INADDR_ANY)) return -EINVAL; ASSERT_RTNL(); for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) { if (p->addr == a->addr) { if (chg) { p->flags = a->flags; goto out; } err = -EEXIST; goto out; } } if (chg) { err = -ENXIO; goto out; } p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL); if (!p) { err = -ENOBUFS; goto out; } p->next = t->prl; p->addr = a->addr; p->flags = a->flags; t->prl_count++; rcu_assign_pointer(t->prl, p); out: return err; } static void prl_entry_destroy_rcu(struct rcu_head *head) { kfree(container_of(head, struct ip_tunnel_prl_entry, rcu_head)); } static void prl_list_destroy_rcu(struct rcu_head *head) { struct ip_tunnel_prl_entry *p, *n; p = container_of(head, struct ip_tunnel_prl_entry, rcu_head); do { n = p->next; kfree(p); p = n; } while (p); } static int ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) { struct ip_tunnel_prl_entry *x, **p; int err = 0; ASSERT_RTNL(); if (a && a->addr != htonl(INADDR_ANY)) { for (p = &t->prl; *p; p = &(*p)->next) { if ((*p)->addr == a->addr) { x = *p; *p = x->next; call_rcu(&x->rcu_head, prl_entry_destroy_rcu); t->prl_count--; goto out; } } err = -ENXIO; } else { if (t->prl) { t->prl_count = 0; x = t->prl; call_rcu(&x->rcu_head, prl_list_destroy_rcu); t->prl = NULL; } } out: return err; } static int isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t) { struct ip_tunnel_prl_entry *p; int ok = 1; rcu_read_lock(); p = __ipip6_tunnel_locate_prl(t, iph->saddr); if (p) { if (p->flags & PRL_DEFAULT) skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT; else skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT; } else { struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr; if (ipv6_addr_is_isatap(addr6) && (addr6->s6_addr32[3] == iph->saddr) && ipv6_chk_prefix(addr6, t->dev)) skb->ndisc_nodetype = NDISC_NODETYPE_HOST; else ok = 0; } rcu_read_unlock(); return ok; } static void ipip6_tunnel_uninit(struct net_device *dev) { struct net *net = dev_net(dev); struct sit_net *sitn = net_generic(net, sit_net_id); if (dev == sitn->fb_tunnel_dev) { rcu_assign_pointer(sitn->tunnels_wc[0], NULL); } else { ipip6_tunnel_unlink(sitn, netdev_priv(dev)); ipip6_tunnel_del_prl(netdev_priv(dev), NULL); } dev_put(dev); } static int ipip6_err(struct sk_buff *skb, u32 info) { /* All the routers (except for Linux) return only 8 bytes of packet payload. It means, that precise relaying of ICMP in the real Internet is absolutely infeasible. */ struct iphdr *iph = (struct iphdr*)skb->data; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct ip_tunnel *t; int err; switch (type) { default: case ICMP_PARAMETERPROB: return 0; case ICMP_DEST_UNREACH: switch (code) { case ICMP_SR_FAILED: case ICMP_PORT_UNREACH: /* Impossible event. */ return 0; case ICMP_FRAG_NEEDED: /* Soft state for pmtu is maintained by IP core. */ return 0; default: /* All others are translated to HOST_UNREACH. rfc2003 contains "deep thoughts" about NET_UNREACH, I believe they are just ether pollution. --ANK */ break; } break; case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) return 0; break; } err = -ENOENT; rcu_read_lock(); t = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, iph->daddr, iph->saddr); if (t == NULL || t->parms.iph.daddr == 0) goto out; err = 0; if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) goto out; if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) t->err_count++; else t->err_count = 1; t->err_time = jiffies; out: rcu_read_unlock(); return err; } static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) { if (INET_ECN_is_ce(iph->tos)) IP6_ECN_set_ce(ipv6_hdr(skb)); } static int ipip6_rcv(struct sk_buff *skb) { struct iphdr *iph; struct ip_tunnel *tunnel; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto out; iph = ip_hdr(skb); rcu_read_lock(); tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, iph->saddr, iph->daddr); if (tunnel != NULL) { struct pcpu_tstats *tstats; secpath_reset(skb); skb->mac_header = skb->network_header; skb_reset_network_header(skb); IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IPV6); skb->pkt_type = PACKET_HOST; if ((tunnel->dev->priv_flags & IFF_ISATAP) && !isatap_chksrc(skb, iph, tunnel)) { tunnel->dev->stats.rx_errors++; rcu_read_unlock(); kfree_skb(skb); return 0; } tstats = this_cpu_ptr(tunnel->dev->tstats); tstats->rx_packets++; tstats->rx_bytes += skb->len; __skb_tunnel_rx(skb, tunnel->dev); ipip6_ecn_decapsulate(iph, skb); netif_rx(skb); rcu_read_unlock(); return 0; } /* no tunnel matched, let upstream know, ipsec may handle it */ rcu_read_unlock(); return 1; out: kfree_skb(skb); return 0; } /* * Returns the embedded IPv4 address if the IPv6 address * comes from 6rd / 6to4 (RFC 3056) addr space. */ static inline __be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel) { __be32 dst = 0; #ifdef CONFIG_IPV6_SIT_6RD if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix, tunnel->ip6rd.prefixlen)) { unsigned int pbw0, pbi0; int pbi1; u32 d; pbw0 = tunnel->ip6rd.prefixlen >> 5; pbi0 = tunnel->ip6rd.prefixlen & 0x1f; d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >> tunnel->ip6rd.relay_prefixlen; pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen; if (pbi1 > 0) d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >> (32 - pbi1); dst = tunnel->ip6rd.relay_prefix | htonl(d); } #else if (v6dst->s6_addr16[0] == htons(0x2002)) { /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ memcpy(&dst, &v6dst->s6_addr16[1], 4); } #endif return dst; } /* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct pcpu_tstats *tstats; struct iphdr *tiph = &tunnel->parms.iph; struct ipv6hdr *iph6 = ipv6_hdr(skb); u8 tos = tunnel->parms.iph.tos; __be16 df = tiph->frag_off; struct rtable *rt; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ struct iphdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ __be32 dst = tiph->daddr; int mtu; struct in6_addr *addr6; int addr_type; if (skb->protocol != htons(ETH_P_IPV6)) goto tx_error; /* ISATAP (RFC4214) - must come before 6to4 */ if (dev->priv_flags & IFF_ISATAP) { struct neighbour *neigh = NULL; if (skb_dst(skb)) neigh = skb_dst(skb)->neighbour; if (neigh == NULL) { if (net_ratelimit()) printk(KERN_DEBUG "sit: nexthop == NULL\n"); goto tx_error; } addr6 = (struct in6_addr*)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if ((addr_type & IPV6_ADDR_UNICAST) && ipv6_addr_is_isatap(addr6)) dst = addr6->s6_addr32[3]; else goto tx_error; } if (!dst) dst = try_6rd(&iph6->daddr, tunnel); if (!dst) { struct neighbour *neigh = NULL; if (skb_dst(skb)) neigh = skb_dst(skb)->neighbour; if (neigh == NULL) { if (net_ratelimit()) printk(KERN_DEBUG "sit: nexthop == NULL\n"); goto tx_error; } addr6 = (struct in6_addr*)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if (addr_type == IPV6_ADDR_ANY) { addr6 = &ipv6_hdr(skb)->daddr; addr_type = ipv6_addr_type(addr6); } if ((addr_type & IPV6_ADDR_COMPATv4) == 0) goto tx_error_icmp; dst = addr6->s6_addr32[3]; } { struct flowi fl = { .fl4_dst = dst, .fl4_src = tiph->saddr, .fl4_tos = RT_TOS(tos), .oif = tunnel->parms.link, .proto = IPPROTO_IPV6 }; if (ip_route_output_key(dev_net(dev), &rt, &fl)) { dev->stats.tx_carrier_errors++; goto tx_error_icmp; } } if (rt->rt_type != RTN_UNICAST) { ip_rt_put(rt); dev->stats.tx_carrier_errors++; goto tx_error_icmp; } tdev = rt->dst.dev; if (tdev == dev) { ip_rt_put(rt); dev->stats.collisions++; goto tx_error; } if (df) { mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); if (mtu < 68) { dev->stats.collisions++; ip_rt_put(rt); goto tx_error; } if (mtu < IPV6_MIN_MTU) { mtu = IPV6_MIN_MTU; df = 0; } if (tunnel->parms.iph.daddr && skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); if (skb->len > mtu) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ip_rt_put(rt); goto tx_error; } } if (tunnel->err_count > 0) { if (time_before(jiffies, tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { tunnel->err_count--; dst_link_failure(skb); } else tunnel->err_count = 0; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) { ip_rt_put(rt); dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; iph6 = ipv6_hdr(skb); } skb->transport_header = skb->network_header; skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags = 0; skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); /* * Push down and install the IPIP header. */ iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = df; iph->protocol = IPPROTO_IPV6; iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); iph->daddr = rt->rt_dst; iph->saddr = rt->rt_src; if ((iph->ttl = tiph->ttl) == 0) iph->ttl = iph6->hop_limit; nf_reset(skb); tstats = this_cpu_ptr(dev->tstats); __IPTUNNEL_XMIT(tstats, &dev->stats); return NETDEV_TX_OK; tx_error_icmp: dst_link_failure(skb); tx_error: dev->stats.tx_errors++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static void ipip6_tunnel_bind_dev(struct net_device *dev) { struct net_device *tdev = NULL; struct ip_tunnel *tunnel; struct iphdr *iph; tunnel = netdev_priv(dev); iph = &tunnel->parms.iph; if (iph->daddr) { struct flowi fl = { .fl4_dst = iph->daddr, .fl4_src = iph->saddr, .fl4_tos = RT_TOS(iph->tos), .oif = tunnel->parms.link, .proto = IPPROTO_IPV6 }; struct rtable *rt; if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { tdev = rt->dst.dev; ip_rt_put(rt); } dev->flags |= IFF_POINTOPOINT; } if (!tdev && tunnel->parms.link) tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link); if (tdev) { dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); dev->mtu = tdev->mtu - sizeof(struct iphdr); if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; } dev->iflink = tunnel->parms.link; } static int ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) { int err = 0; struct ip_tunnel_parm p; struct ip_tunnel_prl prl; struct ip_tunnel *t; struct net *net = dev_net(dev); struct sit_net *sitn = net_generic(net, sit_net_id); #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd ip6rd; #endif switch (cmd) { case SIOCGETTUNNEL: #ifdef CONFIG_IPV6_SIT_6RD case SIOCGET6RD: #endif t = NULL; if (dev == sitn->fb_tunnel_dev) { if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { err = -EFAULT; break; } t = ipip6_tunnel_locate(net, &p, 0); } if (t == NULL) t = netdev_priv(dev); err = -EFAULT; if (cmd == SIOCGETTUNNEL) { memcpy(&p, &t->parms, sizeof(p)); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) goto done; #ifdef CONFIG_IPV6_SIT_6RD } else { ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix); ip6rd.relay_prefix = t->ip6rd.relay_prefix; ip6rd.prefixlen = t->ip6rd.prefixlen; ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen; if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd, sizeof(ip6rd))) goto done; #endif } err = 0; break; case SIOCADDTUNNEL: case SIOCCHGTUNNEL: err = -EPERM; if (!capable(CAP_NET_ADMIN)) goto done; err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) goto done; err = -EINVAL; if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 || p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF))) goto done; if (p.iph.ttl) p.iph.frag_off |= htons(IP_DF); t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL); if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { if (t != NULL) { if (t->dev != dev) { err = -EEXIST; break; } } else { if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) || (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) { err = -EINVAL; break; } t = netdev_priv(dev); ipip6_tunnel_unlink(sitn, t); synchronize_net(); t->parms.iph.saddr = p.iph.saddr; t->parms.iph.daddr = p.iph.daddr; memcpy(dev->dev_addr, &p.iph.saddr, 4); memcpy(dev->broadcast, &p.iph.daddr, 4); ipip6_tunnel_link(sitn, t); netdev_state_change(dev); } } if (t) { err = 0; if (cmd == SIOCCHGTUNNEL) { t->parms.iph.ttl = p.iph.ttl; t->parms.iph.tos = p.iph.tos; if (t->parms.link != p.link) { t->parms.link = p.link; ipip6_tunnel_bind_dev(dev); netdev_state_change(dev); } } if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) err = -EFAULT; } else err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); break; case SIOCDELTUNNEL: err = -EPERM; if (!capable(CAP_NET_ADMIN)) goto done; if (dev == sitn->fb_tunnel_dev) { err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) goto done; err = -ENOENT; if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL) goto done; err = -EPERM; if (t == netdev_priv(sitn->fb_tunnel_dev)) goto done; dev = t->dev; } unregister_netdevice(dev); err = 0; break; case SIOCGETPRL: err = -EINVAL; if (dev == sitn->fb_tunnel_dev) goto done; err = -ENOENT; if (!(t = netdev_priv(dev))) goto done; err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data); break; case SIOCADDPRL: case SIOCDELPRL: case SIOCCHGPRL: err = -EPERM; if (!capable(CAP_NET_ADMIN)) goto done; err = -EINVAL; if (dev == sitn->fb_tunnel_dev) goto done; err = -EFAULT; if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl))) goto done; err = -ENOENT; if (!(t = netdev_priv(dev))) goto done; switch (cmd) { case SIOCDELPRL: err = ipip6_tunnel_del_prl(t, &prl); break; case SIOCADDPRL: case SIOCCHGPRL: err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL); break; } netdev_state_change(dev); break; #ifdef CONFIG_IPV6_SIT_6RD case SIOCADD6RD: case SIOCCHG6RD: case SIOCDEL6RD: err = -EPERM; if (!capable(CAP_NET_ADMIN)) goto done; err = -EFAULT; if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data, sizeof(ip6rd))) goto done; t = netdev_priv(dev); if (cmd != SIOCDEL6RD) { struct in6_addr prefix; __be32 relay_prefix; err = -EINVAL; if (ip6rd.relay_prefixlen > 32 || ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64) goto done; ipv6_addr_prefix(&prefix, &ip6rd.prefix, ip6rd.prefixlen); if (!ipv6_addr_equal(&prefix, &ip6rd.prefix)) goto done; if (ip6rd.relay_prefixlen) relay_prefix = ip6rd.relay_prefix & htonl(0xffffffffUL << (32 - ip6rd.relay_prefixlen)); else relay_prefix = 0; if (relay_prefix != ip6rd.relay_prefix) goto done; ipv6_addr_copy(&t->ip6rd.prefix, &prefix); t->ip6rd.relay_prefix = relay_prefix; t->ip6rd.prefixlen = ip6rd.prefixlen; t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen; } else ipip6_tunnel_clone_6rd(dev, sitn); err = 0; break; #endif default: err = -EINVAL; } done: return err; } static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops ipip6_netdev_ops = { .ndo_uninit = ipip6_tunnel_uninit, .ndo_start_xmit = ipip6_tunnel_xmit, .ndo_do_ioctl = ipip6_tunnel_ioctl, .ndo_change_mtu = ipip6_tunnel_change_mtu, .ndo_get_stats = ipip6_get_stats, }; static void ipip6_dev_free(struct net_device *dev) { free_percpu(dev->tstats); free_netdev(dev); } static void ipip6_tunnel_setup(struct net_device *dev) { dev->netdev_ops = &ipip6_netdev_ops; dev->destructor = ipip6_dev_free; dev->type = ARPHRD_SIT; dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr); dev->flags = IFF_NOARP; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; dev->iflink = 0; dev->addr_len = 4; dev->features |= NETIF_F_NETNS_LOCAL; dev->features |= NETIF_F_LLTX; } static int ipip6_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); tunnel->dev = dev; strcpy(tunnel->parms.name, dev->name); memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); ipip6_tunnel_bind_dev(dev); dev->tstats = alloc_percpu(struct pcpu_tstats); if (!dev->tstats) return -ENOMEM; return 0; } static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct iphdr *iph = &tunnel->parms.iph; struct net *net = dev_net(dev); struct sit_net *sitn = net_generic(net, sit_net_id); tunnel->dev = dev; strcpy(tunnel->parms.name, dev->name); iph->version = 4; iph->protocol = IPPROTO_IPV6; iph->ihl = 5; iph->ttl = 64; dev->tstats = alloc_percpu(struct pcpu_tstats); if (!dev->tstats) return -ENOMEM; dev_hold(dev); sitn->tunnels_wc[0] = tunnel; return 0; } static struct xfrm_tunnel sit_handler __read_mostly = { .handler = ipip6_rcv, .err_handler = ipip6_err, .priority = 1, }; static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) { int prio; for (prio = 1; prio < 4; prio++) { int h; for (h = 0; h < HASH_SIZE; h++) { struct ip_tunnel *t = sitn->tunnels[prio][h]; while (t != NULL) { unregister_netdevice_queue(t->dev, head); t = t->next; } } } } static int __net_init sit_init_net(struct net *net) { struct sit_net *sitn = net_generic(net, sit_net_id); int err; sitn->tunnels[0] = sitn->tunnels_wc; sitn->tunnels[1] = sitn->tunnels_l; sitn->tunnels[2] = sitn->tunnels_r; sitn->tunnels[3] = sitn->tunnels_r_l; sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0", ipip6_tunnel_setup); if (!sitn->fb_tunnel_dev) { err = -ENOMEM; goto err_alloc_dev; } dev_net_set(sitn->fb_tunnel_dev, net); err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev); if (err) goto err_dev_free; ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn); if ((err = register_netdev(sitn->fb_tunnel_dev))) goto err_reg_dev; return 0; err_reg_dev: dev_put(sitn->fb_tunnel_dev); err_dev_free: ipip6_dev_free(sitn->fb_tunnel_dev); err_alloc_dev: return err; } static void __net_exit sit_exit_net(struct net *net) { struct sit_net *sitn = net_generic(net, sit_net_id); LIST_HEAD(list); rtnl_lock(); sit_destroy_tunnels(sitn, &list); unregister_netdevice_queue(sitn->fb_tunnel_dev, &list); unregister_netdevice_many(&list); rtnl_unlock(); } static struct pernet_operations sit_net_ops = { .init = sit_init_net, .exit = sit_exit_net, .id = &sit_net_id, .size = sizeof(struct sit_net), }; static void __exit sit_cleanup(void) { xfrm4_tunnel_deregister(&sit_handler, AF_INET6); unregister_pernet_device(&sit_net_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ } static int __init sit_init(void) { int err; printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n"); err = register_pernet_device(&sit_net_ops); if (err < 0) return err; err = xfrm4_tunnel_register(&sit_handler, AF_INET6); if (err < 0) { unregister_pernet_device(&sit_net_ops); printk(KERN_INFO "sit init: Can't add protocol\n"); } return err; } module_init(sit_init); module_exit(sit_cleanup); MODULE_LICENSE("GPL"); MODULE_ALIAS("sit0");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3438_4
crossvul-cpp_data_bad_2287_13
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_error.h" #include "xfs_dir2.h" #include "xfs_dir2_priv.h" #include "xfs_ioctl.h" #include "xfs_trace.h" #include "xfs_log.h" #include "xfs_dinode.h" #include <linux/aio.h> #include <linux/dcache.h> #include <linux/falloc.h> #include <linux/pagevec.h> static const struct vm_operations_struct xfs_file_vm_ops; /* * Locking primitives for read and write IO paths to ensure we consistently use * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. */ static inline void xfs_rw_ilock( struct xfs_inode *ip, int type) { if (type & XFS_IOLOCK_EXCL) mutex_lock(&VFS_I(ip)->i_mutex); xfs_ilock(ip, type); } static inline void xfs_rw_iunlock( struct xfs_inode *ip, int type) { xfs_iunlock(ip, type); if (type & XFS_IOLOCK_EXCL) mutex_unlock(&VFS_I(ip)->i_mutex); } static inline void xfs_rw_ilock_demote( struct xfs_inode *ip, int type) { xfs_ilock_demote(ip, type); if (type & XFS_IOLOCK_EXCL) mutex_unlock(&VFS_I(ip)->i_mutex); } /* * xfs_iozero * * xfs_iozero clears the specified range of buffer supplied, * and marks all the affected blocks as valid and modified. If * an affected block is not allocated, it will be allocated. If * an affected block is not completely overwritten, and is not * valid before the operation, it will be read from disk before * being partially zeroed. */ int xfs_iozero( struct xfs_inode *ip, /* inode */ loff_t pos, /* offset in file */ size_t count) /* size of data to zero */ { struct page *page; struct address_space *mapping; int status; mapping = VFS_I(ip)->i_mapping; do { unsigned offset, bytes; void *fsdata; offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ bytes = PAGE_CACHE_SIZE - offset; if (bytes > count) bytes = count; status = pagecache_write_begin(NULL, mapping, pos, bytes, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); if (status) break; zero_user(page, offset, bytes); status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, page, fsdata); WARN_ON(status <= 0); /* can't return less than zero! */ pos += bytes; count -= bytes; status = 0; } while (count); return (-status); } /* * Fsync operations on directories are much simpler than on regular files, * as there is no file data to flush, and thus also no need for explicit * cache flush operations, and there are no non-transaction metadata updates * on directories either. */ STATIC int xfs_dir_fsync( struct file *file, loff_t start, loff_t end, int datasync) { struct xfs_inode *ip = XFS_I(file->f_mapping->host); struct xfs_mount *mp = ip->i_mount; xfs_lsn_t lsn = 0; trace_xfs_dir_fsync(ip); xfs_ilock(ip, XFS_ILOCK_SHARED); if (xfs_ipincount(ip)) lsn = ip->i_itemp->ili_last_lsn; xfs_iunlock(ip, XFS_ILOCK_SHARED); if (!lsn) return 0; return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); } STATIC int xfs_file_fsync( struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; int error = 0; int log_flushed = 0; xfs_lsn_t lsn = 0; trace_xfs_file_fsync(ip); error = filemap_write_and_wait_range(inode->i_mapping, start, end); if (error) return error; if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); xfs_iflags_clear(ip, XFS_ITRUNCATED); if (mp->m_flags & XFS_MOUNT_BARRIER) { /* * If we have an RT and/or log subvolume we need to make sure * to flush the write cache the device used for file data * first. This is to ensure newly written file data make * it to disk before logging the new inode size in case of * an extending write. */ if (XFS_IS_REALTIME_INODE(ip)) xfs_blkdev_issue_flush(mp->m_rtdev_targp); else if (mp->m_logdev_targp != mp->m_ddev_targp) xfs_blkdev_issue_flush(mp->m_ddev_targp); } /* * All metadata updates are logged, which means that we just have * to flush the log up to the latest LSN that touched the inode. */ xfs_ilock(ip, XFS_ILOCK_SHARED); if (xfs_ipincount(ip)) { if (!datasync || (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) lsn = ip->i_itemp->ili_last_lsn; } xfs_iunlock(ip, XFS_ILOCK_SHARED); if (lsn) error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); /* * If we only have a single device, and the log force about was * a no-op we might have to flush the data device cache here. * This can only happen for fdatasync/O_DSYNC if we were overwriting * an already allocated file and thus do not have any metadata to * commit. */ if ((mp->m_flags & XFS_MOUNT_BARRIER) && mp->m_logdev_targp == mp->m_ddev_targp && !XFS_IS_REALTIME_INODE(ip) && !log_flushed) xfs_blkdev_issue_flush(mp->m_ddev_targp); return -error; } STATIC ssize_t xfs_file_read_iter( struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; size_t size = iov_iter_count(to); ssize_t ret = 0; int ioflags = 0; xfs_fsize_t n; loff_t pos = iocb->ki_pos; XFS_STATS_INC(xs_read_calls); if (unlikely(file->f_flags & O_DIRECT)) ioflags |= IO_ISDIRECT; if (file->f_mode & FMODE_NOCMTIME) ioflags |= IO_INVIS; if (unlikely(ioflags & IO_ISDIRECT)) { xfs_buftarg_t *target = XFS_IS_REALTIME_INODE(ip) ? mp->m_rtdev_targp : mp->m_ddev_targp; /* DIO must be aligned to device logical sector size */ if ((pos | size) & target->bt_logical_sectormask) { if (pos == i_size_read(inode)) return 0; return -XFS_ERROR(EINVAL); } } n = mp->m_super->s_maxbytes - pos; if (n <= 0 || size == 0) return 0; if (n < size) size = n; if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; /* * Locking is a bit tricky here. If we take an exclusive lock * for direct IO, we effectively serialise all new concurrent * read IO to this file and block it behind IO that is currently in * progress because IO in progress holds the IO lock shared. We only * need to hold the lock exclusive to blow away the page cache, so * only take lock exclusively if the page cache needs invalidation. * This allows the normal direct IO case of no page cache pages to * proceeed concurrently without serialisation. */ xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); if (inode->i_mapping->nrpages) { ret = -filemap_write_and_wait_range( VFS_I(ip)->i_mapping, pos, -1); if (ret) { xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); return ret; } truncate_pagecache_range(VFS_I(ip), pos, -1); } xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); } trace_xfs_file_read(ip, size, pos, ioflags); ret = generic_file_read_iter(iocb, to); if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); return ret; } STATIC ssize_t xfs_file_splice_read( struct file *infilp, loff_t *ppos, struct pipe_inode_info *pipe, size_t count, unsigned int flags) { struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); int ioflags = 0; ssize_t ret; XFS_STATS_INC(xs_read_calls); if (infilp->f_mode & FMODE_NOCMTIME) ioflags |= IO_INVIS; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); trace_xfs_file_splice_read(ip, count, *ppos, ioflags); ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); return ret; } /* * xfs_file_splice_write() does not use xfs_rw_ilock() because * generic_file_splice_write() takes the i_mutex itself. This, in theory, * couuld cause lock inversions between the aio_write path and the splice path * if someone is doing concurrent splice(2) based writes and write(2) based * writes to the same inode. The only real way to fix this is to re-implement * the generic code here with correct locking orders. */ STATIC ssize_t xfs_file_splice_write( struct pipe_inode_info *pipe, struct file *outfilp, loff_t *ppos, size_t count, unsigned int flags) { struct inode *inode = outfilp->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); int ioflags = 0; ssize_t ret; XFS_STATS_INC(xs_write_calls); if (outfilp->f_mode & FMODE_NOCMTIME) ioflags |= IO_INVIS; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; xfs_ilock(ip, XFS_IOLOCK_EXCL); trace_xfs_file_splice_write(ip, count, *ppos, ioflags); ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); if (ret > 0) XFS_STATS_ADD(xs_write_bytes, ret); xfs_iunlock(ip, XFS_IOLOCK_EXCL); return ret; } /* * This routine is called to handle zeroing any space in the last block of the * file that is beyond the EOF. We do this since the size is being increased * without writing anything to that block and we don't want to read the * garbage on the disk. */ STATIC int /* error (positive) */ xfs_zero_last_block( struct xfs_inode *ip, xfs_fsize_t offset, xfs_fsize_t isize) { struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); int zero_offset = XFS_B_FSB_OFFSET(mp, isize); int zero_len; int nimaps = 1; int error = 0; struct xfs_bmbt_irec imap; xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) return error; ASSERT(nimaps > 0); /* * If the block underlying isize is just a hole, then there * is nothing to zero. */ if (imap.br_startblock == HOLESTARTBLOCK) return 0; zero_len = mp->m_sb.sb_blocksize - zero_offset; if (isize + zero_len > offset) zero_len = offset - isize; return xfs_iozero(ip, isize, zero_len); } /* * Zero any on disk space between the current EOF and the new, larger EOF. * * This handles the normal case of zeroing the remainder of the last block in * the file and the unusual case of zeroing blocks out beyond the size of the * file. This second case only happens with fixed size extents and when the * system crashes before the inode size was updated but after blocks were * allocated. * * Expects the iolock to be held exclusive, and will take the ilock internally. */ int /* error (positive) */ xfs_zero_eof( struct xfs_inode *ip, xfs_off_t offset, /* starting I/O offset */ xfs_fsize_t isize) /* current inode size */ { struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t start_zero_fsb; xfs_fileoff_t end_zero_fsb; xfs_fileoff_t zero_count_fsb; xfs_fileoff_t last_fsb; xfs_fileoff_t zero_off; xfs_fsize_t zero_len; int nimaps; int error = 0; struct xfs_bmbt_irec imap; ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); ASSERT(offset > isize); /* * First handle zeroing the block on which isize resides. * * We only zero a part of that block so it is handled specially. */ if (XFS_B_FSB_OFFSET(mp, isize) != 0) { error = xfs_zero_last_block(ip, offset, isize); if (error) return error; } /* * Calculate the range between the new size and the old where blocks * needing to be zeroed may exist. * * To get the block where the last byte in the file currently resides, * we need to subtract one from the size and truncate back to a block * boundary. We subtract 1 in case the size is exactly on a block * boundary. */ last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); if (last_fsb == end_zero_fsb) { /* * The size was only incremented on its last block. * We took care of that above, so just return. */ return 0; } ASSERT(start_zero_fsb <= end_zero_fsb); while (start_zero_fsb <= end_zero_fsb) { nimaps = 1; zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, &imap, &nimaps, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) return error; ASSERT(nimaps > 0); if (imap.br_state == XFS_EXT_UNWRITTEN || imap.br_startblock == HOLESTARTBLOCK) { start_zero_fsb = imap.br_startoff + imap.br_blockcount; ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); continue; } /* * There are blocks we need to zero. */ zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); if ((zero_off + zero_len) > offset) zero_len = offset - zero_off; error = xfs_iozero(ip, zero_off, zero_len); if (error) return error; start_zero_fsb = imap.br_startoff + imap.br_blockcount; ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); } return 0; } /* * Common pre-write limit and setup checks. * * Called with the iolocked held either shared and exclusive according to * @iolock, and returns with it held. Might upgrade the iolock to exclusive * if called for a direct write beyond i_size. */ STATIC ssize_t xfs_file_aio_write_checks( struct file *file, loff_t *pos, size_t *count, int *iolock) { struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); int error = 0; restart: error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); if (error) return error; /* * If the offset is beyond the size of the file, we need to zero any * blocks that fall between the existing EOF and the start of this * write. If zeroing is needed and we are currently holding the * iolock shared, we need to update it to exclusive which implies * having to redo all checks before. */ if (*pos > i_size_read(inode)) { if (*iolock == XFS_IOLOCK_SHARED) { xfs_rw_iunlock(ip, *iolock); *iolock = XFS_IOLOCK_EXCL; xfs_rw_ilock(ip, *iolock); goto restart; } error = -xfs_zero_eof(ip, *pos, i_size_read(inode)); if (error) return error; } /* * Updating the timestamps will grab the ilock again from * xfs_fs_dirty_inode, so we have to call it after dropping the * lock above. Eventually we should look into a way to avoid * the pointless lock roundtrip. */ if (likely(!(file->f_mode & FMODE_NOCMTIME))) { error = file_update_time(file); if (error) return error; } /* * If we're writing the file then make sure to clear the setuid and * setgid bits if the process is not being run by root. This keeps * people from modifying setuid and setgid binaries. */ return file_remove_suid(file); } /* * xfs_file_dio_aio_write - handle direct IO writes * * Lock the inode appropriately to prepare for and issue a direct IO write. * By separating it from the buffered write path we remove all the tricky to * follow locking changes and looping. * * If there are cached pages or we're extending the file, we need IOLOCK_EXCL * until we're sure the bytes at the new EOF have been zeroed and/or the cached * pages are flushed out. * * In most cases the direct IO writes will be done holding IOLOCK_SHARED * allowing them to be done in parallel with reads and other direct IO writes. * However, if the IO is not aligned to filesystem blocks, the direct IO layer * needs to do sub-block zeroing and that requires serialisation against other * direct IOs to the same block. In this case we need to serialise the * submission of the unaligned IOs so that we don't get racing block zeroing in * the dio layer. To avoid the problem with aio, we also need to wait for * outstanding IOs to complete so that unwritten extent conversion is completed * before we try to map the overlapping block. This is currently implemented by * hitting it with a big hammer (i.e. inode_dio_wait()). * * Returns with locks held indicated by @iolock and errors indicated by * negative return values. */ STATIC ssize_t xfs_file_dio_aio_write( struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; ssize_t ret = 0; int unaligned_io = 0; int iolock; size_t count = iov_iter_count(from); loff_t pos = iocb->ki_pos; struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? mp->m_rtdev_targp : mp->m_ddev_targp; /* DIO must be aligned to device logical sector size */ if ((pos | count) & target->bt_logical_sectormask) return -XFS_ERROR(EINVAL); /* "unaligned" here means not aligned to a filesystem block */ if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) unaligned_io = 1; /* * We don't need to take an exclusive lock unless there page cache needs * to be invalidated or unaligned IO is being executed. We don't need to * consider the EOF extension case here because * xfs_file_aio_write_checks() will relock the inode as necessary for * EOF zeroing cases and fill out the new inode size as appropriate. */ if (unaligned_io || mapping->nrpages) iolock = XFS_IOLOCK_EXCL; else iolock = XFS_IOLOCK_SHARED; xfs_rw_ilock(ip, iolock); /* * Recheck if there are cached pages that need invalidate after we got * the iolock to protect against other threads adding new pages while * we were waiting for the iolock. */ if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { xfs_rw_iunlock(ip, iolock); iolock = XFS_IOLOCK_EXCL; xfs_rw_ilock(ip, iolock); } ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); if (ret) goto out; iov_iter_truncate(from, count); if (mapping->nrpages) { ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, pos, -1); if (ret) goto out; truncate_pagecache_range(VFS_I(ip), pos, -1); } /* * If we are doing unaligned IO, wait for all other IO to drain, * otherwise demote the lock if we had to flush cached pages */ if (unaligned_io) inode_dio_wait(inode); else if (iolock == XFS_IOLOCK_EXCL) { xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); iolock = XFS_IOLOCK_SHARED; } trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); ret = generic_file_direct_write(iocb, from, pos); out: xfs_rw_iunlock(ip, iolock); /* No fallback to buffered IO on errors for XFS. */ ASSERT(ret < 0 || ret == count); return ret; } STATIC ssize_t xfs_file_buffered_aio_write( struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct xfs_inode *ip = XFS_I(inode); ssize_t ret; int enospc = 0; int iolock = XFS_IOLOCK_EXCL; loff_t pos = iocb->ki_pos; size_t count = iov_iter_count(from); xfs_rw_ilock(ip, iolock); ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); if (ret) goto out; iov_iter_truncate(from, count); /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; write_retry: trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); ret = generic_perform_write(file, from, pos); if (likely(ret >= 0)) iocb->ki_pos = pos + ret; /* * If we just got an ENOSPC, try to write back all dirty inodes to * convert delalloc space to free up some of the excess reserved * metadata space. */ if (ret == -ENOSPC && !enospc) { enospc = 1; xfs_flush_inodes(ip->i_mount); goto write_retry; } current->backing_dev_info = NULL; out: xfs_rw_iunlock(ip, iolock); return ret; } STATIC ssize_t xfs_file_write_iter( struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct xfs_inode *ip = XFS_I(inode); ssize_t ret; size_t ocount = iov_iter_count(from); XFS_STATS_INC(xs_write_calls); if (ocount == 0) return 0; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; if (unlikely(file->f_flags & O_DIRECT)) ret = xfs_file_dio_aio_write(iocb, from); else ret = xfs_file_buffered_aio_write(iocb, from); if (ret > 0) { ssize_t err; XFS_STATS_ADD(xs_write_bytes, ret); /* Handle various SYNC-type writes */ err = generic_write_sync(file, iocb->ki_pos - ret, ret); if (err < 0) ret = err; } return ret; } STATIC long xfs_file_fallocate( struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct xfs_inode *ip = XFS_I(inode); struct xfs_trans *tp; long error; loff_t new_size = 0; if (!S_ISREG(inode->i_mode)) return -EINVAL; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) return -EOPNOTSUPP; xfs_ilock(ip, XFS_IOLOCK_EXCL); if (mode & FALLOC_FL_PUNCH_HOLE) { error = xfs_free_file_space(ip, offset, len); if (error) goto out_unlock; } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { unsigned blksize_mask = (1 << inode->i_blkbits) - 1; if (offset & blksize_mask || len & blksize_mask) { error = -EINVAL; goto out_unlock; } /* * There is no need to overlap collapse range with EOF, * in which case it is effectively a truncate operation */ if (offset + len >= i_size_read(inode)) { error = -EINVAL; goto out_unlock; } new_size = i_size_read(inode) - len; error = xfs_collapse_file_space(ip, offset, len); if (error) goto out_unlock; } else { if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; error = -inode_newsize_ok(inode, new_size); if (error) goto out_unlock; } if (mode & FALLOC_FL_ZERO_RANGE) error = xfs_zero_file_space(ip, offset, len); else error = xfs_alloc_file_space(ip, offset, len, XFS_BMAPI_PREALLOC); if (error) goto out_unlock; } tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID); error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0); if (error) { xfs_trans_cancel(tp, 0); goto out_unlock; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); ip->i_d.di_mode &= ~S_ISUID; if (ip->i_d.di_mode & S_IXGRP) ip->i_d.di_mode &= ~S_ISGID; if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE))) ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); if (file->f_flags & O_DSYNC) xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); if (error) goto out_unlock; /* Change file size if needed */ if (new_size) { struct iattr iattr; iattr.ia_valid = ATTR_SIZE; iattr.ia_size = new_size; error = xfs_setattr_size(ip, &iattr); } out_unlock: xfs_iunlock(ip, XFS_IOLOCK_EXCL); return -error; } STATIC int xfs_file_open( struct inode *inode, struct file *file) { if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) return -EFBIG; if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) return -EIO; return 0; } STATIC int xfs_dir_open( struct inode *inode, struct file *file) { struct xfs_inode *ip = XFS_I(inode); int mode; int error; error = xfs_file_open(inode, file); if (error) return error; /* * If there are any blocks, read-ahead block 0 as we're almost * certain to have the next operation be a read there. */ mode = xfs_ilock_data_map_shared(ip); if (ip->i_d.di_nextents > 0) xfs_dir3_data_readahead(NULL, ip, 0, -1); xfs_iunlock(ip, mode); return 0; } STATIC int xfs_file_release( struct inode *inode, struct file *filp) { return -xfs_release(XFS_I(inode)); } STATIC int xfs_file_readdir( struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); xfs_inode_t *ip = XFS_I(inode); int error; size_t bufsize; /* * The Linux API doesn't pass down the total size of the buffer * we read into down to the filesystem. With the filldir concept * it's not needed for correct information, but the XFS dir2 leaf * code wants an estimate of the buffer size to calculate it's * readahead window and size the buffers used for mapping to * physical blocks. * * Try to give it an estimate that's good enough, maybe at some * point we can change the ->readdir prototype to include the * buffer size. For now we use the current glibc buffer size. */ bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); error = xfs_readdir(ip, ctx, bufsize); if (error) return -error; return 0; } STATIC int xfs_file_mmap( struct file *filp, struct vm_area_struct *vma) { vma->vm_ops = &xfs_file_vm_ops; file_accessed(filp); return 0; } /* * mmap()d file has taken write protection fault and is being made * writable. We can set the page state up correctly for a writable * page, which means we can do correct delalloc accounting (ENOSPC * checking!) and unwritten extent mapping. */ STATIC int xfs_vm_page_mkwrite( struct vm_area_struct *vma, struct vm_fault *vmf) { return block_page_mkwrite(vma, vmf, xfs_get_blocks); } /* * This type is designed to indicate the type of offset we would like * to search from page cache for either xfs_seek_data() or xfs_seek_hole(). */ enum { HOLE_OFF = 0, DATA_OFF, }; /* * Lookup the desired type of offset from the given page. * * On success, return true and the offset argument will point to the * start of the region that was found. Otherwise this function will * return false and keep the offset argument unchanged. */ STATIC bool xfs_lookup_buffer_offset( struct page *page, loff_t *offset, unsigned int type) { loff_t lastoff = page_offset(page); bool found = false; struct buffer_head *bh, *head; bh = head = page_buffers(page); do { /* * Unwritten extents that have data in the page * cache covering them can be identified by the * BH_Unwritten state flag. Pages with multiple * buffers might have a mix of holes, data and * unwritten extents - any buffer with valid * data in it should have BH_Uptodate flag set * on it. */ if (buffer_unwritten(bh) || buffer_uptodate(bh)) { if (type == DATA_OFF) found = true; } else { if (type == HOLE_OFF) found = true; } if (found) { *offset = lastoff; break; } lastoff += bh->b_size; } while ((bh = bh->b_this_page) != head); return found; } /* * This routine is called to find out and return a data or hole offset * from the page cache for unwritten extents according to the desired * type for xfs_seek_data() or xfs_seek_hole(). * * The argument offset is used to tell where we start to search from the * page cache. Map is used to figure out the end points of the range to * lookup pages. * * Return true if the desired type of offset was found, and the argument * offset is filled with that address. Otherwise, return false and keep * offset unchanged. */ STATIC bool xfs_find_get_desired_pgoff( struct inode *inode, struct xfs_bmbt_irec *map, unsigned int type, loff_t *offset) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; struct pagevec pvec; pgoff_t index; pgoff_t end; loff_t endoff; loff_t startoff = *offset; loff_t lastoff = startoff; bool found = false; pagevec_init(&pvec, 0); index = startoff >> PAGE_CACHE_SHIFT; endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); end = endoff >> PAGE_CACHE_SHIFT; do { int want; unsigned nr_pages; unsigned int i; want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, want); /* * No page mapped into given range. If we are searching holes * and if this is the first time we got into the loop, it means * that the given offset is landed in a hole, return it. * * If we have already stepped through some block buffers to find * holes but they all contains data. In this case, the last * offset is already updated and pointed to the end of the last * mapped page, if it does not reach the endpoint to search, * that means there should be a hole between them. */ if (nr_pages == 0) { /* Data search found nothing */ if (type == DATA_OFF) break; ASSERT(type == HOLE_OFF); if (lastoff == startoff || lastoff < endoff) { found = true; *offset = lastoff; } break; } /* * At lease we found one page. If this is the first time we * step into the loop, and if the first page index offset is * greater than the given search offset, a hole was found. */ if (type == HOLE_OFF && lastoff == startoff && lastoff < page_offset(pvec.pages[0])) { found = true; break; } for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; loff_t b_offset; /* * At this point, the page may be truncated or * invalidated (changing page->mapping to NULL), * or even swizzled back from swapper_space to tmpfs * file mapping. However, page->index will not change * because we have a reference on the page. * * Searching done if the page index is out of range. * If the current offset is not reaches the end of * the specified search range, there should be a hole * between them. */ if (page->index > end) { if (type == HOLE_OFF && lastoff < endoff) { *offset = lastoff; found = true; } goto out; } lock_page(page); /* * Page truncated or invalidated(page->mapping == NULL). * We can freely skip it and proceed to check the next * page. */ if (unlikely(page->mapping != inode->i_mapping)) { unlock_page(page); continue; } if (!page_has_buffers(page)) { unlock_page(page); continue; } found = xfs_lookup_buffer_offset(page, &b_offset, type); if (found) { /* * The found offset may be less than the start * point to search if this is the first time to * come here. */ *offset = max_t(loff_t, startoff, b_offset); unlock_page(page); goto out; } /* * We either searching data but nothing was found, or * searching hole but found a data buffer. In either * case, probably the next page contains the desired * things, update the last offset to it so. */ lastoff = page_offset(page) + PAGE_SIZE; unlock_page(page); } /* * The number of returned pages less than our desired, search * done. In this case, nothing was found for searching data, * but we found a hole behind the last offset. */ if (nr_pages < want) { if (type == HOLE_OFF) { *offset = lastoff; found = true; } break; } index = pvec.pages[i - 1]->index + 1; pagevec_release(&pvec); } while (index <= end); out: pagevec_release(&pvec); return found; } STATIC loff_t xfs_seek_data( struct file *file, loff_t start) { struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; loff_t uninitialized_var(offset); xfs_fsize_t isize; xfs_fileoff_t fsbno; xfs_filblks_t end; uint lock; int error; lock = xfs_ilock_data_map_shared(ip); isize = i_size_read(inode); if (start >= isize) { error = ENXIO; goto out_unlock; } /* * Try to read extents from the first block indicated * by fsbno to the end block of the file. */ fsbno = XFS_B_TO_FSBT(mp, start); end = XFS_B_TO_FSB(mp, isize); for (;;) { struct xfs_bmbt_irec map[2]; int nmap = 2; unsigned int i; error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, XFS_BMAPI_ENTIRE); if (error) goto out_unlock; /* No extents at given offset, must be beyond EOF */ if (nmap == 0) { error = ENXIO; goto out_unlock; } for (i = 0; i < nmap; i++) { offset = max_t(loff_t, start, XFS_FSB_TO_B(mp, map[i].br_startoff)); /* Landed in a data extent */ if (map[i].br_startblock == DELAYSTARTBLOCK || (map[i].br_state == XFS_EXT_NORM && !isnullstartblock(map[i].br_startblock))) goto out; /* * Landed in an unwritten extent, try to search data * from page cache. */ if (map[i].br_state == XFS_EXT_UNWRITTEN) { if (xfs_find_get_desired_pgoff(inode, &map[i], DATA_OFF, &offset)) goto out; } } /* * map[0] is hole or its an unwritten extent but * without data in page cache. Probably means that * we are reading after EOF if nothing in map[1]. */ if (nmap == 1) { error = ENXIO; goto out_unlock; } ASSERT(i > 1); /* * Nothing was found, proceed to the next round of search * if reading offset not beyond or hit EOF. */ fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; start = XFS_FSB_TO_B(mp, fsbno); if (start >= isize) { error = ENXIO; goto out_unlock; } } out: offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out_unlock: xfs_iunlock(ip, lock); if (error) return -error; return offset; } STATIC loff_t xfs_seek_hole( struct file *file, loff_t start) { struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; loff_t uninitialized_var(offset); xfs_fsize_t isize; xfs_fileoff_t fsbno; xfs_filblks_t end; uint lock; int error; if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); lock = xfs_ilock_data_map_shared(ip); isize = i_size_read(inode); if (start >= isize) { error = ENXIO; goto out_unlock; } fsbno = XFS_B_TO_FSBT(mp, start); end = XFS_B_TO_FSB(mp, isize); for (;;) { struct xfs_bmbt_irec map[2]; int nmap = 2; unsigned int i; error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, XFS_BMAPI_ENTIRE); if (error) goto out_unlock; /* No extents at given offset, must be beyond EOF */ if (nmap == 0) { error = ENXIO; goto out_unlock; } for (i = 0; i < nmap; i++) { offset = max_t(loff_t, start, XFS_FSB_TO_B(mp, map[i].br_startoff)); /* Landed in a hole */ if (map[i].br_startblock == HOLESTARTBLOCK) goto out; /* * Landed in an unwritten extent, try to search hole * from page cache. */ if (map[i].br_state == XFS_EXT_UNWRITTEN) { if (xfs_find_get_desired_pgoff(inode, &map[i], HOLE_OFF, &offset)) goto out; } } /* * map[0] contains data or its unwritten but contains * data in page cache, probably means that we are * reading after EOF. We should fix offset to point * to the end of the file(i.e., there is an implicit * hole at the end of any file). */ if (nmap == 1) { offset = isize; break; } ASSERT(i > 1); /* * Both mappings contains data, proceed to the next round of * search if the current reading offset not beyond or hit EOF. */ fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; start = XFS_FSB_TO_B(mp, fsbno); if (start >= isize) { offset = isize; break; } } out: /* * At this point, we must have found a hole. However, the returned * offset may be bigger than the file size as it may be aligned to * page boundary for unwritten extents, we need to deal with this * situation in particular. */ offset = min_t(loff_t, offset, isize); offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out_unlock: xfs_iunlock(ip, lock); if (error) return -error; return offset; } STATIC loff_t xfs_file_llseek( struct file *file, loff_t offset, int origin) { switch (origin) { case SEEK_END: case SEEK_CUR: case SEEK_SET: return generic_file_llseek(file, offset, origin); case SEEK_DATA: return xfs_seek_data(file, offset); case SEEK_HOLE: return xfs_seek_hole(file, offset); default: return -EINVAL; } } const struct file_operations xfs_file_operations = { .llseek = xfs_file_llseek, .read = new_sync_read, .write = new_sync_write, .read_iter = xfs_file_read_iter, .write_iter = xfs_file_write_iter, .splice_read = xfs_file_splice_read, .splice_write = xfs_file_splice_write, .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = xfs_file_compat_ioctl, #endif .mmap = xfs_file_mmap, .open = xfs_file_open, .release = xfs_file_release, .fsync = xfs_file_fsync, .fallocate = xfs_file_fallocate, }; const struct file_operations xfs_dir_file_operations = { .open = xfs_dir_open, .read = generic_read_dir, .iterate = xfs_file_readdir, .llseek = generic_file_llseek, .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = xfs_file_compat_ioctl, #endif .fsync = xfs_dir_fsync, }; static const struct vm_operations_struct xfs_file_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = xfs_vm_page_mkwrite, .remap_pages = generic_file_remap_pages, };
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2287_13
crossvul-cpp_data_bad_2399_17
/* * PCBC: Propagating Cipher Block Chaining mode * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * Derived from cbc.c * - Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/algapi.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> struct crypto_pcbc_ctx { struct crypto_cipher *child; }; static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key, unsigned int keylen) { struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent); struct crypto_cipher *child = ctx->child; int err; crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(child, key, keylen); crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; } static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 *iv = walk->iv; do { crypto_xor(iv, src, bsize); fn(crypto_cipher_tfm(tfm), dst, iv); memcpy(iv, dst, bsize); crypto_xor(iv, src, bsize); src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; } static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *iv = walk->iv; u8 tmpbuf[bsize]; do { memcpy(tmpbuf, src, bsize); crypto_xor(iv, src, bsize); fn(crypto_cipher_tfm(tfm), src, iv); memcpy(iv, tmpbuf, bsize); crypto_xor(iv, src, bsize); src += bsize; } while ((nbytes -= bsize) >= bsize); memcpy(walk->iv, iv, bsize); return nbytes; } static int crypto_pcbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; struct crypto_blkcipher *tfm = desc->tfm; struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, child); else nbytes = crypto_pcbc_encrypt_segment(desc, &walk, child); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_decrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 *iv = walk->iv; do { fn(crypto_cipher_tfm(tfm), dst, src); crypto_xor(dst, iv, bsize); memcpy(iv, src, bsize); crypto_xor(iv, dst, bsize); src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); memcpy(walk->iv, iv, bsize); return nbytes; } static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_decrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *iv = walk->iv; u8 tmpbuf[bsize]; do { memcpy(tmpbuf, src, bsize); fn(crypto_cipher_tfm(tfm), src, src); crypto_xor(src, iv, bsize); memcpy(iv, tmpbuf, bsize); crypto_xor(iv, src, bsize); src += bsize; } while ((nbytes -= bsize) >= bsize); memcpy(walk->iv, iv, bsize); return nbytes; } static int crypto_pcbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; struct crypto_blkcipher *tfm = desc->tfm; struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, child); else nbytes = crypto_pcbc_decrypt_segment(desc, &walk, child); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_cipher *cipher; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; return 0; } static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm) { struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_cipher(ctx->child); } static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); inst = crypto_alloc_instance("pcbc", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; /* We access the data as u32s when xoring. */ inst->alg.cra_alignmask |= __alignof__(u32) - 1; inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); inst->alg.cra_init = crypto_pcbc_init_tfm; inst->alg.cra_exit = crypto_pcbc_exit_tfm; inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey; inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt; inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void crypto_pcbc_free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); } static struct crypto_template crypto_pcbc_tmpl = { .name = "pcbc", .alloc = crypto_pcbc_alloc, .free = crypto_pcbc_free, .module = THIS_MODULE, }; static int __init crypto_pcbc_module_init(void) { return crypto_register_template(&crypto_pcbc_tmpl); } static void __exit crypto_pcbc_module_exit(void) { crypto_unregister_template(&crypto_pcbc_tmpl); } module_init(crypto_pcbc_module_init); module_exit(crypto_pcbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PCBC block cipher algorithm");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2399_17
crossvul-cpp_data_bad_3524_1
/* * originally based on the dummy device. * * Copyright 1999, Thomas Davis, tadavis@lbl.gov. * Licensed under the GPL. Based on dummy.c, and eql.c devices. * * bonding.c: an Ethernet Bonding driver * * This is useful to talk to a Cisco EtherChannel compatible equipment: * Cisco 5500 * Sun Trunking (Solaris) * Alteon AceDirector Trunks * Linux Bonding * and probably many L2 switches ... * * How it works: * ifconfig bond0 ipaddress netmask up * will setup a network device, with an ip address. No mac address * will be assigned at this time. The hw mac address will come from * the first slave bonded to the channel. All slaves will then use * this hw mac address. * * ifconfig bond0 down * will release all slaves, marking them as down. * * ifenslave bond0 eth0 * will attach eth0 to bond0 as a slave. eth0 hw mac address will either * a: be used as initial mac address * b: if a hw mac address already is there, eth0's hw mac address * will then be set from bond0. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <net/ip.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/socket.h> #include <linux/ctype.h> #include <linux/inet.h> #include <linux/bitops.h> #include <linux/io.h> #include <asm/system.h> #include <asm/dma.h> #include <linux/uaccess.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/rtnetlink.h> #include <linux/smp.h> #include <linux/if_ether.h> #include <net/arp.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/if_bonding.h> #include <linux/jiffies.h> #include <linux/preempt.h> #include <net/route.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include "bonding.h" #include "bond_3ad.h" #include "bond_alb.h" /*---------------------------- Module parameters ----------------------------*/ /* monitor all links that often (in milliseconds). <=0 disables monitoring */ #define BOND_LINK_MON_INTERV 0 #define BOND_LINK_ARP_INTERV 0 static int max_bonds = BOND_DEFAULT_MAX_BONDS; static int tx_queues = BOND_DEFAULT_TX_QUEUES; static int num_peer_notif = 1; static int miimon = BOND_LINK_MON_INTERV; static int updelay; static int downdelay; static int use_carrier = 1; static char *mode; static char *primary; static char *primary_reselect; static char *lacp_rate; static int min_links; static char *ad_select; static char *xmit_hash_policy; static int arp_interval = BOND_LINK_ARP_INTERV; static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; static char *arp_validate; static char *fail_over_mac; static int all_slaves_active = 0; static struct bond_params bonding_defaults; static int resend_igmp = BOND_DEFAULT_RESEND_IGMP; module_param(max_bonds, int, 0); MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); module_param(tx_queues, int, 0); MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); module_param_named(num_grat_arp, num_peer_notif, int, 0644); MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on " "failover event (alias of num_unsol_na)"); module_param_named(num_unsol_na, num_peer_notif, int, 0644); MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on " "failover event (alias of num_grat_arp)"); module_param(miimon, int, 0); MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); module_param(updelay, int, 0); MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds"); module_param(downdelay, int, 0); MODULE_PARM_DESC(downdelay, "Delay before considering link down, " "in milliseconds"); module_param(use_carrier, int, 0); MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " "0 for off, 1 for on (default)"); module_param(mode, charp, 0); MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " "1 for active-backup, 2 for balance-xor, " "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " "6 for balance-alb"); module_param(primary, charp, 0); MODULE_PARM_DESC(primary, "Primary network device to use"); module_param(primary_reselect, charp, 0); MODULE_PARM_DESC(primary_reselect, "Reselect primary slave " "once it comes up; " "0 for always (default), " "1 for only if speed of primary is " "better, " "2 for only on active slave " "failure"); module_param(lacp_rate, charp, 0); MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " "0 for slow, 1 for fast"); module_param(ad_select, charp, 0); MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " "0 for stable (default), 1 for bandwidth, " "2 for count"); module_param(min_links, int, 0); MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier"); module_param(xmit_hash_policy, charp, 0); MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; " "0 for layer 2 (default), 1 for layer 3+4, " "2 for layer 2+3"); module_param(arp_interval, int, 0); MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); module_param_array(arp_ip_target, charp, NULL, 0); MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); module_param(arp_validate, charp, 0); MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; " "0 for none (default), 1 for active, " "2 for backup, 3 for all"); module_param(fail_over_mac, charp, 0); MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " "the same MAC; 0 for none (default), " "1 for active, 2 for follow"); module_param(all_slaves_active, int, 0); MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" "by setting active flag for all slaves; " "0 for never (default), 1 for always."); module_param(resend_igmp, int, 0); MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on " "link failure"); /*----------------------------- Global variables ----------------------------*/ #ifdef CONFIG_NET_POLL_CONTROLLER atomic_t netpoll_block_tx = ATOMIC_INIT(0); #endif int bond_net_id __read_mostly; static __be32 arp_target[BOND_MAX_ARP_TARGETS]; static int arp_ip_count; static int bond_mode = BOND_MODE_ROUNDROBIN; static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; static int lacp_fast; const struct bond_parm_tbl bond_lacp_tbl[] = { { "slow", AD_LACP_SLOW}, { "fast", AD_LACP_FAST}, { NULL, -1}, }; const struct bond_parm_tbl bond_mode_tbl[] = { { "balance-rr", BOND_MODE_ROUNDROBIN}, { "active-backup", BOND_MODE_ACTIVEBACKUP}, { "balance-xor", BOND_MODE_XOR}, { "broadcast", BOND_MODE_BROADCAST}, { "802.3ad", BOND_MODE_8023AD}, { "balance-tlb", BOND_MODE_TLB}, { "balance-alb", BOND_MODE_ALB}, { NULL, -1}, }; const struct bond_parm_tbl xmit_hashtype_tbl[] = { { "layer2", BOND_XMIT_POLICY_LAYER2}, { "layer3+4", BOND_XMIT_POLICY_LAYER34}, { "layer2+3", BOND_XMIT_POLICY_LAYER23}, { NULL, -1}, }; const struct bond_parm_tbl arp_validate_tbl[] = { { "none", BOND_ARP_VALIDATE_NONE}, { "active", BOND_ARP_VALIDATE_ACTIVE}, { "backup", BOND_ARP_VALIDATE_BACKUP}, { "all", BOND_ARP_VALIDATE_ALL}, { NULL, -1}, }; const struct bond_parm_tbl fail_over_mac_tbl[] = { { "none", BOND_FOM_NONE}, { "active", BOND_FOM_ACTIVE}, { "follow", BOND_FOM_FOLLOW}, { NULL, -1}, }; const struct bond_parm_tbl pri_reselect_tbl[] = { { "always", BOND_PRI_RESELECT_ALWAYS}, { "better", BOND_PRI_RESELECT_BETTER}, { "failure", BOND_PRI_RESELECT_FAILURE}, { NULL, -1}, }; struct bond_parm_tbl ad_select_tbl[] = { { "stable", BOND_AD_STABLE}, { "bandwidth", BOND_AD_BANDWIDTH}, { "count", BOND_AD_COUNT}, { NULL, -1}, }; /*-------------------------- Forward declarations ---------------------------*/ static int bond_init(struct net_device *bond_dev); static void bond_uninit(struct net_device *bond_dev); /*---------------------------- General routines -----------------------------*/ const char *bond_mode_name(int mode) { static const char *names[] = { [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)", [BOND_MODE_XOR] = "load balancing (xor)", [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)", [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation", [BOND_MODE_TLB] = "transmit load balancing", [BOND_MODE_ALB] = "adaptive load balancing", }; if (mode < 0 || mode > BOND_MODE_ALB) return "unknown"; return names[mode]; } /*---------------------------------- VLAN -----------------------------------*/ /** * bond_add_vlan - add a new vlan id on bond * @bond: bond that got the notification * @vlan_id: the vlan id to add * * Returns -ENOMEM if allocation failed. */ static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id) { struct vlan_entry *vlan; pr_debug("bond: %s, vlan id %d\n", (bond ? bond->dev->name : "None"), vlan_id); vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL); if (!vlan) return -ENOMEM; INIT_LIST_HEAD(&vlan->vlan_list); vlan->vlan_id = vlan_id; write_lock_bh(&bond->lock); list_add_tail(&vlan->vlan_list, &bond->vlan_list); write_unlock_bh(&bond->lock); pr_debug("added VLAN ID %d on bond %s\n", vlan_id, bond->dev->name); return 0; } /** * bond_del_vlan - delete a vlan id from bond * @bond: bond that got the notification * @vlan_id: the vlan id to delete * * returns -ENODEV if @vlan_id was not found in @bond. */ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) { struct vlan_entry *vlan; int res = -ENODEV; pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); block_netpoll_tx(); write_lock_bh(&bond->lock); list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { if (vlan->vlan_id == vlan_id) { list_del(&vlan->vlan_list); if (bond_is_lb(bond)) bond_alb_clear_vlan(bond, vlan_id); pr_debug("removed VLAN ID %d from bond %s\n", vlan_id, bond->dev->name); kfree(vlan); res = 0; goto out; } } pr_debug("couldn't find VLAN ID %d in bond %s\n", vlan_id, bond->dev->name); out: write_unlock_bh(&bond->lock); unblock_netpoll_tx(); return res; } /** * bond_next_vlan - safely skip to the next item in the vlans list. * @bond: the bond we're working on * @curr: item we're advancing from * * Returns %NULL if list is empty, bond->next_vlan if @curr is %NULL, * or @curr->next otherwise (even if it is @curr itself again). * * Caller must hold bond->lock */ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr) { struct vlan_entry *next, *last; if (list_empty(&bond->vlan_list)) return NULL; if (!curr) { next = list_entry(bond->vlan_list.next, struct vlan_entry, vlan_list); } else { last = list_entry(bond->vlan_list.prev, struct vlan_entry, vlan_list); if (last == curr) { next = list_entry(bond->vlan_list.next, struct vlan_entry, vlan_list); } else { next = list_entry(curr->vlan_list.next, struct vlan_entry, vlan_list); } } return next; } #define bond_queue_mapping(skb) (*(u16 *)((skb)->cb)) /** * bond_dev_queue_xmit - Prepare skb for xmit. * * @bond: bond device that got this skb for tx. * @skb: hw accel VLAN tagged skb to transmit * @slave_dev: slave that is supposed to xmit this skbuff */ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev) { skb->dev = slave_dev; skb->priority = 1; skb->queue_mapping = bond_queue_mapping(skb); if (unlikely(netpoll_tx_running(slave_dev))) bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); else dev_queue_xmit(skb); return 0; } /* * In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, * We don't protect the slave list iteration with a lock because: * a. This operation is performed in IOCTL context, * b. The operation is protected by the RTNL semaphore in the 8021q code, * c. Holding a lock with BH disabled while directly calling a base driver * entry point is generally a BAD idea. * * The design of synchronization/protection for this operation in the 8021q * module is good for one or more VLAN devices over a single physical device * and cannot be extended for a teaming solution like bonding, so there is a * potential race condition here where a net device from the vlan group might * be referenced (either by a base driver or the 8021q code) while it is being * removed from the system. However, it turns out we're not making matters * worse, and if it works for regular VLAN usage it will work here too. */ /** * bond_vlan_rx_add_vid - Propagates adding an id to slaves * @bond_dev: bonding net device that got called * @vid: vlan id being added */ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; int i, res; bond_for_each_slave(bond, slave, i) { struct net_device *slave_dev = slave->dev; const struct net_device_ops *slave_ops = slave_dev->netdev_ops; if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) && slave_ops->ndo_vlan_rx_add_vid) { slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid); } } res = bond_add_vlan(bond, vid); if (res) { pr_err("%s: Error: Failed to add vlan id %d\n", bond_dev->name, vid); } } /** * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves * @bond_dev: bonding net device that got called * @vid: vlan id being removed */ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; int i, res; bond_for_each_slave(bond, slave, i) { struct net_device *slave_dev = slave->dev; const struct net_device_ops *slave_ops = slave_dev->netdev_ops; if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) && slave_ops->ndo_vlan_rx_kill_vid) { slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid); } } res = bond_del_vlan(bond, vid); if (res) { pr_err("%s: Error: Failed to remove vlan id %d\n", bond_dev->name, vid); } } static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev) { struct vlan_entry *vlan; const struct net_device_ops *slave_ops = slave_dev->netdev_ops; if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || !(slave_ops->ndo_vlan_rx_add_vid)) return; list_for_each_entry(vlan, &bond->vlan_list, vlan_list) slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id); } static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *slave_dev) { const struct net_device_ops *slave_ops = slave_dev->netdev_ops; struct vlan_entry *vlan; if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || !(slave_ops->ndo_vlan_rx_kill_vid)) return; list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { if (!vlan->vlan_id) continue; slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id); } } /*------------------------------- Link status -------------------------------*/ /* * Set the carrier state for the master according to the state of its * slaves. If any slaves are up, the master is up. In 802.3ad mode, * do special 802.3ad magic. * * Returns zero if carrier state does not change, nonzero if it does. */ static int bond_set_carrier(struct bonding *bond) { struct slave *slave; int i; if (bond->slave_cnt == 0) goto down; if (bond->params.mode == BOND_MODE_8023AD) return bond_3ad_set_carrier(bond); bond_for_each_slave(bond, slave, i) { if (slave->link == BOND_LINK_UP) { if (!netif_carrier_ok(bond->dev)) { netif_carrier_on(bond->dev); return 1; } return 0; } } down: if (netif_carrier_ok(bond->dev)) { netif_carrier_off(bond->dev); return 1; } return 0; } /* * Get link speed and duplex from the slave's base driver * using ethtool. If for some reason the call fails or the * values are invalid, fake speed and duplex to 100/Full * and return error. */ static int bond_update_speed_duplex(struct slave *slave) { struct net_device *slave_dev = slave->dev; struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET }; u32 slave_speed; int res; /* Fake speed and duplex */ slave->speed = SPEED_100; slave->duplex = DUPLEX_FULL; if (!slave_dev->ethtool_ops || !slave_dev->ethtool_ops->get_settings) return -1; res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool); if (res < 0) return -1; slave_speed = ethtool_cmd_speed(&etool); if (slave_speed == 0 || slave_speed == ((__u32) -1)) return -1; switch (etool.duplex) { case DUPLEX_FULL: case DUPLEX_HALF: break; default: return -1; } slave->speed = slave_speed; slave->duplex = etool.duplex; return 0; } /* * if <dev> supports MII link status reporting, check its link status. * * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(), * depending upon the setting of the use_carrier parameter. * * Return either BMSR_LSTATUS, meaning that the link is up (or we * can't tell and just pretend it is), or 0, meaning that the link is * down. * * If reporting is non-zero, instead of faking link up, return -1 if * both ETHTOOL and MII ioctls fail (meaning the device does not * support them). If use_carrier is set, return whatever it says. * It'd be nice if there was a good way to tell if a driver supports * netif_carrier, but there really isn't. */ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_dev, int reporting) { const struct net_device_ops *slave_ops = slave_dev->netdev_ops; int (*ioctl)(struct net_device *, struct ifreq *, int); struct ifreq ifr; struct mii_ioctl_data *mii; if (!reporting && !netif_running(slave_dev)) return 0; if (bond->params.use_carrier) return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; /* Try to get link status using Ethtool first. */ if (slave_dev->ethtool_ops) { if (slave_dev->ethtool_ops->get_link) { u32 link; link = slave_dev->ethtool_ops->get_link(slave_dev); return link ? BMSR_LSTATUS : 0; } } /* Ethtool can't be used, fallback to MII ioctls. */ ioctl = slave_ops->ndo_do_ioctl; if (ioctl) { /* TODO: set pointer to correct ioctl on a per team member */ /* bases to make this more efficient. that is, once */ /* we determine the correct ioctl, we will always */ /* call it and not the others for that team */ /* member. */ /* * We cannot assume that SIOCGMIIPHY will also read a * register; not all network drivers (e.g., e100) * support that. */ /* Yes, the mii is overlaid on the ifreq.ifr_ifru */ strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ); mii = if_mii(&ifr); if (IOCTL(slave_dev, &ifr, SIOCGMIIPHY) == 0) { mii->reg_num = MII_BMSR; if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0) return mii->val_out & BMSR_LSTATUS; } } /* * If reporting, report that either there's no dev->do_ioctl, * or both SIOCGMIIREG and get_link failed (meaning that we * cannot report link status). If not reporting, pretend * we're ok. */ return reporting ? -1 : BMSR_LSTATUS; } /*----------------------------- Multicast list ------------------------------*/ /* * Push the promiscuity flag down to appropriate slaves */ static int bond_set_promiscuity(struct bonding *bond, int inc) { int err = 0; if (USES_PRIMARY(bond->params.mode)) { /* write lock already acquired */ if (bond->curr_active_slave) { err = dev_set_promiscuity(bond->curr_active_slave->dev, inc); } } else { struct slave *slave; int i; bond_for_each_slave(bond, slave, i) { err = dev_set_promiscuity(slave->dev, inc); if (err) return err; } } return err; } /* * Push the allmulti flag down to all slaves */ static int bond_set_allmulti(struct bonding *bond, int inc) { int err = 0; if (USES_PRIMARY(bond->params.mode)) { /* write lock already acquired */ if (bond->curr_active_slave) { err = dev_set_allmulti(bond->curr_active_slave->dev, inc); } } else { struct slave *slave; int i; bond_for_each_slave(bond, slave, i) { err = dev_set_allmulti(slave->dev, inc); if (err) return err; } } return err; } /* * Add a Multicast address to slaves * according to mode */ static void bond_mc_add(struct bonding *bond, void *addr) { if (USES_PRIMARY(bond->params.mode)) { /* write lock already acquired */ if (bond->curr_active_slave) dev_mc_add(bond->curr_active_slave->dev, addr); } else { struct slave *slave; int i; bond_for_each_slave(bond, slave, i) dev_mc_add(slave->dev, addr); } } /* * Remove a multicast address from slave * according to mode */ static void bond_mc_del(struct bonding *bond, void *addr) { if (USES_PRIMARY(bond->params.mode)) { /* write lock already acquired */ if (bond->curr_active_slave) dev_mc_del(bond->curr_active_slave->dev, addr); } else { struct slave *slave; int i; bond_for_each_slave(bond, slave, i) { dev_mc_del(slave->dev, addr); } } } static void __bond_resend_igmp_join_requests(struct net_device *dev) { struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (in_dev) ip_mc_rejoin_groups(in_dev); rcu_read_unlock(); } /* * Retrieve the list of registered multicast addresses for the bonding * device and retransmit an IGMP JOIN request to the current active * slave. */ static void bond_resend_igmp_join_requests(struct bonding *bond) { struct net_device *vlan_dev; struct vlan_entry *vlan; read_lock(&bond->lock); /* rejoin all groups on bond device */ __bond_resend_igmp_join_requests(bond->dev); /* rejoin all groups on vlan devices */ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { rcu_read_lock(); vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id); rcu_read_unlock(); if (vlan_dev) __bond_resend_igmp_join_requests(vlan_dev); } if (--bond->igmp_retrans > 0) queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); read_unlock(&bond->lock); } static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, mcast_work.work); bond_resend_igmp_join_requests(bond); } /* * flush all members of flush->mc_list from device dev->mc_list */ static void bond_mc_list_flush(struct net_device *bond_dev, struct net_device *slave_dev) { struct bonding *bond = netdev_priv(bond_dev); struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, bond_dev) dev_mc_del(slave_dev, ha->addr); if (bond->params.mode == BOND_MODE_8023AD) { /* del lacpdu mc addr from mc list */ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; dev_mc_del(slave_dev, lacpdu_multicast); } } /*--------------------------- Active slave change ---------------------------*/ /* * Update the mc list and multicast-related flags for the new and * old active slaves (if any) according to the multicast mode, and * promiscuous flags unconditionally. */ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct slave *old_active) { struct netdev_hw_addr *ha; if (!USES_PRIMARY(bond->params.mode)) /* nothing to do - mc list is already up-to-date on * all slaves */ return; if (old_active) { if (bond->dev->flags & IFF_PROMISC) dev_set_promiscuity(old_active->dev, -1); if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1); netdev_for_each_mc_addr(ha, bond->dev) dev_mc_del(old_active->dev, ha->addr); } if (new_active) { /* FIXME: Signal errors upstream. */ if (bond->dev->flags & IFF_PROMISC) dev_set_promiscuity(new_active->dev, 1); if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1); netdev_for_each_mc_addr(ha, bond->dev) dev_mc_add(new_active->dev, ha->addr); } } /* * bond_do_fail_over_mac * * Perform special MAC address swapping for fail_over_mac settings * * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh. */ static void bond_do_fail_over_mac(struct bonding *bond, struct slave *new_active, struct slave *old_active) __releases(&bond->curr_slave_lock) __releases(&bond->lock) __acquires(&bond->lock) __acquires(&bond->curr_slave_lock) { u8 tmp_mac[ETH_ALEN]; struct sockaddr saddr; int rv; switch (bond->params.fail_over_mac) { case BOND_FOM_ACTIVE: if (new_active) memcpy(bond->dev->dev_addr, new_active->dev->dev_addr, new_active->dev->addr_len); break; case BOND_FOM_FOLLOW: /* * if new_active && old_active, swap them * if just old_active, do nothing (going to no active slave) * if just new_active, set new_active to bond's MAC */ if (!new_active) return; write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); if (old_active) { memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN); memcpy(saddr.sa_data, old_active->dev->dev_addr, ETH_ALEN); saddr.sa_family = new_active->dev->type; } else { memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN); saddr.sa_family = bond->dev->type; } rv = dev_set_mac_address(new_active->dev, &saddr); if (rv) { pr_err("%s: Error %d setting MAC of slave %s\n", bond->dev->name, -rv, new_active->dev->name); goto out; } if (!old_active) goto out; memcpy(saddr.sa_data, tmp_mac, ETH_ALEN); saddr.sa_family = old_active->dev->type; rv = dev_set_mac_address(old_active->dev, &saddr); if (rv) pr_err("%s: Error %d setting MAC of slave %s\n", bond->dev->name, -rv, new_active->dev->name); out: read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); break; default: pr_err("%s: bond_do_fail_over_mac impossible: bad policy %d\n", bond->dev->name, bond->params.fail_over_mac); break; } } static bool bond_should_change_active(struct bonding *bond) { struct slave *prim = bond->primary_slave; struct slave *curr = bond->curr_active_slave; if (!prim || !curr || curr->link != BOND_LINK_UP) return true; if (bond->force_primary) { bond->force_primary = false; return true; } if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER && (prim->speed < curr->speed || (prim->speed == curr->speed && prim->duplex <= curr->duplex))) return false; if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE) return false; return true; } /** * find_best_interface - select the best available slave to be the active one * @bond: our bonding struct * * Warning: Caller must hold curr_slave_lock for writing. */ static struct slave *bond_find_best_slave(struct bonding *bond) { struct slave *new_active, *old_active; struct slave *bestslave = NULL; int mintime = bond->params.updelay; int i; new_active = bond->curr_active_slave; if (!new_active) { /* there were no active slaves left */ if (bond->slave_cnt > 0) /* found one slave */ new_active = bond->first_slave; else return NULL; /* still no slave, return NULL */ } if ((bond->primary_slave) && bond->primary_slave->link == BOND_LINK_UP && bond_should_change_active(bond)) { new_active = bond->primary_slave; } /* remember where to stop iterating over the slaves */ old_active = new_active; bond_for_each_slave_from(bond, new_active, i, old_active) { if (new_active->link == BOND_LINK_UP) { return new_active; } else if (new_active->link == BOND_LINK_BACK && IS_UP(new_active->dev)) { /* link up, but waiting for stabilization */ if (new_active->delay < mintime) { mintime = new_active->delay; bestslave = new_active; } } } return bestslave; } static bool bond_should_notify_peers(struct bonding *bond) { struct slave *slave = bond->curr_active_slave; pr_debug("bond_should_notify_peers: bond %s slave %s\n", bond->dev->name, slave ? slave->dev->name : "NULL"); if (!slave || !bond->send_peer_notif || test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; bond->send_peer_notif--; return true; } /** * change_active_interface - change the active slave into the specified one * @bond: our bonding struct * @new: the new slave to make the active one * * Set the new slave to the bond's settings and unset them on the old * curr_active_slave. * Setting include flags, mc-list, promiscuity, allmulti, etc. * * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP, * because it is apparently the best available slave we have, even though its * updelay hasn't timed out yet. * * If new_active is not NULL, caller must hold bond->lock for read and * curr_slave_lock for write_bh. */ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) { struct slave *old_active = bond->curr_active_slave; if (old_active == new_active) return; if (new_active) { new_active->jiffies = jiffies; if (new_active->link == BOND_LINK_BACK) { if (USES_PRIMARY(bond->params.mode)) { pr_info("%s: making interface %s the new active one %d ms earlier.\n", bond->dev->name, new_active->dev->name, (bond->params.updelay - new_active->delay) * bond->params.miimon); } new_active->delay = 0; new_active->link = BOND_LINK_UP; if (bond->params.mode == BOND_MODE_8023AD) bond_3ad_handle_link_change(new_active, BOND_LINK_UP); if (bond_is_lb(bond)) bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); } else { if (USES_PRIMARY(bond->params.mode)) { pr_info("%s: making interface %s the new active one.\n", bond->dev->name, new_active->dev->name); } } } if (USES_PRIMARY(bond->params.mode)) bond_mc_swap(bond, new_active, old_active); if (bond_is_lb(bond)) { bond_alb_handle_active_change(bond, new_active); if (old_active) bond_set_slave_inactive_flags(old_active); if (new_active) bond_set_slave_active_flags(new_active); } else { bond->curr_active_slave = new_active; } if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { if (old_active) bond_set_slave_inactive_flags(old_active); if (new_active) { bool should_notify_peers = false; bond_set_slave_active_flags(new_active); if (bond->params.fail_over_mac) bond_do_fail_over_mac(bond, new_active, old_active); if (netif_running(bond->dev)) { bond->send_peer_notif = bond->params.num_peer_notif; should_notify_peers = bond_should_notify_peers(bond); } write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); if (should_notify_peers) netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); } } /* resend IGMP joins since active slave has changed or * all were sent on curr_active_slave. * resend only if bond is brought up with the affected * bonding modes and the retransmission is enabled */ if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && ((USES_PRIMARY(bond->params.mode) && new_active) || bond->params.mode == BOND_MODE_ROUNDROBIN)) { bond->igmp_retrans = bond->params.resend_igmp; queue_delayed_work(bond->wq, &bond->mcast_work, 0); } } /** * bond_select_active_slave - select a new active slave, if needed * @bond: our bonding struct * * This functions should be called when one of the following occurs: * - The old curr_active_slave has been released or lost its link. * - The primary_slave has got its link back. * - A slave has got its link back and there's no old curr_active_slave. * * Caller must hold bond->lock for read and curr_slave_lock for write_bh. */ void bond_select_active_slave(struct bonding *bond) { struct slave *best_slave; int rv; best_slave = bond_find_best_slave(bond); if (best_slave != bond->curr_active_slave) { bond_change_active_slave(bond, best_slave); rv = bond_set_carrier(bond); if (!rv) return; if (netif_carrier_ok(bond->dev)) { pr_info("%s: first active interface up!\n", bond->dev->name); } else { pr_info("%s: now running without any active interface !\n", bond->dev->name); } } } /*--------------------------- slave list handling ---------------------------*/ /* * This function attaches the slave to the end of list. * * bond->lock held for writing by caller. */ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave) { if (bond->first_slave == NULL) { /* attaching the first slave */ new_slave->next = new_slave; new_slave->prev = new_slave; bond->first_slave = new_slave; } else { new_slave->next = bond->first_slave; new_slave->prev = bond->first_slave->prev; new_slave->next->prev = new_slave; new_slave->prev->next = new_slave; } bond->slave_cnt++; } /* * This function detaches the slave from the list. * WARNING: no check is made to verify if the slave effectively * belongs to <bond>. * Nothing is freed on return, structures are just unchained. * If any slave pointer in bond was pointing to <slave>, * it should be changed by the calling function. * * bond->lock held for writing by caller. */ static void bond_detach_slave(struct bonding *bond, struct slave *slave) { if (slave->next) slave->next->prev = slave->prev; if (slave->prev) slave->prev->next = slave->next; if (bond->first_slave == slave) { /* slave is the first slave */ if (bond->slave_cnt > 1) { /* there are more slave */ bond->first_slave = slave->next; } else { bond->first_slave = NULL; /* slave was the last one */ } } slave->next = NULL; slave->prev = NULL; bond->slave_cnt--; } #ifdef CONFIG_NET_POLL_CONTROLLER static inline int slave_enable_netpoll(struct slave *slave) { struct netpoll *np; int err = 0; np = kzalloc(sizeof(*np), GFP_KERNEL); err = -ENOMEM; if (!np) goto out; np->dev = slave->dev; strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ); err = __netpoll_setup(np); if (err) { kfree(np); goto out; } slave->np = np; out: return err; } static inline void slave_disable_netpoll(struct slave *slave) { struct netpoll *np = slave->np; if (!np) return; slave->np = NULL; synchronize_rcu_bh(); __netpoll_cleanup(np); kfree(np); } static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) { if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL) return false; if (!slave_dev->netdev_ops->ndo_poll_controller) return false; return true; } static void bond_poll_controller(struct net_device *bond_dev) { } static void __bond_netpoll_cleanup(struct bonding *bond) { struct slave *slave; int i; bond_for_each_slave(bond, slave, i) if (IS_UP(slave->dev)) slave_disable_netpoll(slave); } static void bond_netpoll_cleanup(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); read_lock(&bond->lock); __bond_netpoll_cleanup(bond); read_unlock(&bond->lock); } static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) { struct bonding *bond = netdev_priv(dev); struct slave *slave; int i, err = 0; read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { err = slave_enable_netpoll(slave); if (err) { __bond_netpoll_cleanup(bond); break; } } read_unlock(&bond->lock); return err; } static struct netpoll_info *bond_netpoll_info(struct bonding *bond) { return bond->dev->npinfo; } #else static inline int slave_enable_netpoll(struct slave *slave) { return 0; } static inline void slave_disable_netpoll(struct slave *slave) { } static void bond_netpoll_cleanup(struct net_device *bond_dev) { } #endif /*---------------------------------- IOCTL ----------------------------------*/ static int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev) { pr_debug("bond_dev=%p\n", bond_dev); pr_debug("slave_dev=%p\n", slave_dev); pr_debug("slave_dev->addr_len=%d\n", slave_dev->addr_len); memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len); return 0; } static u32 bond_fix_features(struct net_device *dev, u32 features) { struct slave *slave; struct bonding *bond = netdev_priv(dev); u32 mask; int i; read_lock(&bond->lock); if (!bond->first_slave) { /* Disable adding VLANs to empty bond. But why? --mq */ features |= NETIF_F_VLAN_CHALLENGED; goto out; } mask = features; features &= ~NETIF_F_ONE_FOR_ALL; features |= NETIF_F_ALL_FOR_ALL; bond_for_each_slave(bond, slave, i) { features = netdev_increment_features(features, slave->dev->features, mask); } out: read_unlock(&bond->lock); return features; } #define BOND_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ NETIF_F_HIGHDMA | NETIF_F_LRO) static void bond_compute_features(struct bonding *bond) { struct slave *slave; struct net_device *bond_dev = bond->dev; u32 vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; int i; read_lock(&bond->lock); if (!bond->first_slave) goto done; bond_for_each_slave(bond, slave, i) { vlan_features = netdev_increment_features(vlan_features, slave->dev->vlan_features, BOND_VLAN_FEATURES); if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; } done: bond_dev->vlan_features = vlan_features; bond_dev->hard_header_len = max_hard_header_len; read_unlock(&bond->lock); netdev_change_features(bond_dev); } static void bond_setup_by_slave(struct net_device *bond_dev, struct net_device *slave_dev) { struct bonding *bond = netdev_priv(bond_dev); bond_dev->header_ops = slave_dev->header_ops; bond_dev->type = slave_dev->type; bond_dev->hard_header_len = slave_dev->hard_header_len; bond_dev->addr_len = slave_dev->addr_len; memcpy(bond_dev->broadcast, slave_dev->broadcast, slave_dev->addr_len); bond->setup_by_slave = 1; } /* On bonding slaves other than the currently active slave, suppress * duplicates except for alb non-mcast/bcast. */ static bool bond_should_deliver_exact_match(struct sk_buff *skb, struct slave *slave, struct bonding *bond) { if (bond_is_slave_inactive(slave)) { if (bond->params.mode == BOND_MODE_ALB && skb->pkt_type != PACKET_BROADCAST && skb->pkt_type != PACKET_MULTICAST) return false; return true; } return false; } static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct slave *slave; struct bonding *bond; skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return RX_HANDLER_CONSUMED; *pskb = skb; slave = bond_slave_get_rcu(skb->dev); bond = slave->bond; if (bond->params.arp_interval) slave->dev->last_rx = jiffies; if (bond->recv_probe) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (likely(nskb)) { bond->recv_probe(nskb, bond, slave); dev_kfree_skb(nskb); } } if (bond_should_deliver_exact_match(skb, slave, bond)) { return RX_HANDLER_EXACT; } skb->dev = bond->dev; if (bond->params.mode == BOND_MODE_ALB && bond->dev->priv_flags & IFF_BRIDGE_PORT && skb->pkt_type == PACKET_HOST) { if (unlikely(skb_cow_head(skb, skb->data - skb_mac_header(skb)))) { kfree_skb(skb); return RX_HANDLER_CONSUMED; } memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); } return RX_HANDLER_ANOTHER; } /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) { struct bonding *bond = netdev_priv(bond_dev); const struct net_device_ops *slave_ops = slave_dev->netdev_ops; struct slave *new_slave = NULL; struct netdev_hw_addr *ha; struct sockaddr addr; int link_reporting; int res = 0; if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && slave_ops->ndo_do_ioctl == NULL) { pr_warning("%s: Warning: no link monitoring support for %s\n", bond_dev->name, slave_dev->name); } /* already enslaved */ if (slave_dev->flags & IFF_SLAVE) { pr_debug("Error, Device was already enslaved\n"); return -EBUSY; } /* vlan challenged mutual exclusion */ /* no need to lock since we're protected by rtnl_lock */ if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); if (bond_vlan_used(bond)) { pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n", bond_dev->name, slave_dev->name, bond_dev->name); return -EPERM; } else { pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", bond_dev->name, slave_dev->name, slave_dev->name, bond_dev->name); } } else { pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); } /* * Old ifenslave binaries are no longer supported. These can * be identified with moderate accuracy by the state of the slave: * the current ifenslave will set the interface down prior to * enslaving it; the old ifenslave will not. */ if ((slave_dev->flags & IFF_UP)) { pr_err("%s is up. This may be due to an out of date ifenslave.\n", slave_dev->name); res = -EPERM; goto err_undo_flags; } /* set bonding device ether type by slave - bonding netdevices are * created with ether_setup, so when the slave type is not ARPHRD_ETHER * there is a need to override some of the type dependent attribs/funcs. * * bond ether type mutual exclusion - don't allow slaves of dissimilar * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond */ if (bond->slave_cnt == 0) { if (bond_dev->type != slave_dev->type) { pr_debug("%s: change device type from %d to %d\n", bond_dev->name, bond_dev->type, slave_dev->type); res = netdev_bonding_change(bond_dev, NETDEV_PRE_TYPE_CHANGE); res = notifier_to_errno(res); if (res) { pr_err("%s: refused to change device type\n", bond_dev->name); res = -EBUSY; goto err_undo_flags; } /* Flush unicast and multicast addresses */ dev_uc_flush(bond_dev); dev_mc_flush(bond_dev); if (slave_dev->type != ARPHRD_ETHER) bond_setup_by_slave(bond_dev, slave_dev); else ether_setup(bond_dev); netdev_bonding_change(bond_dev, NETDEV_POST_TYPE_CHANGE); } } else if (bond_dev->type != slave_dev->type) { pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", slave_dev->name, slave_dev->type, bond_dev->type); res = -EINVAL; goto err_undo_flags; } if (slave_ops->ndo_set_mac_address == NULL) { if (bond->slave_cnt == 0) { pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.", bond_dev->name); bond->params.fail_over_mac = BOND_FOM_ACTIVE; } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) { pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n", bond_dev->name); res = -EOPNOTSUPP; goto err_undo_flags; } } call_netdevice_notifiers(NETDEV_JOIN, slave_dev); /* If this is the first slave, then we need to set the master's hardware * address to be the same as the slave's. */ if (is_zero_ether_addr(bond->dev->dev_addr)) memcpy(bond->dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len); new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); if (!new_slave) { res = -ENOMEM; goto err_undo_flags; } /* * Set the new_slave's queue_id to be zero. Queue ID mapping * is set via sysfs or module option if desired. */ new_slave->queue_id = 0; /* Save slave's original mtu and then set it to match the bond */ new_slave->original_mtu = slave_dev->mtu; res = dev_set_mtu(slave_dev, bond->dev->mtu); if (res) { pr_debug("Error %d calling dev_set_mtu\n", res); goto err_free; } /* * Save slave's original ("permanent") mac address for modes * that need it, and for restoring it upon release, and then * set it to the master's address */ memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); if (!bond->params.fail_over_mac) { /* * Set slave to master's mac address. The application already * set the master's mac address to that of the first slave */ memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); addr.sa_family = slave_dev->type; res = dev_set_mac_address(slave_dev, &addr); if (res) { pr_debug("Error %d calling set_mac_address\n", res); goto err_restore_mtu; } } res = netdev_set_bond_master(slave_dev, bond_dev); if (res) { pr_debug("Error %d calling netdev_set_bond_master\n", res); goto err_restore_mac; } /* open the slave since the application closed it */ res = dev_open(slave_dev); if (res) { pr_debug("Opening slave %s failed\n", slave_dev->name); goto err_unset_master; } new_slave->bond = bond; new_slave->dev = slave_dev; slave_dev->priv_flags |= IFF_BONDING; if (bond_is_lb(bond)) { /* bond_alb_init_slave() must be called before all other stages since * it might fail and we do not want to have to undo everything */ res = bond_alb_init_slave(bond, new_slave); if (res) goto err_close; } /* If the mode USES_PRIMARY, then the new slave gets the * master's promisc (and mc) settings only if it becomes the * curr_active_slave, and that is taken care of later when calling * bond_change_active() */ if (!USES_PRIMARY(bond->params.mode)) { /* set promiscuity level to new slave */ if (bond_dev->flags & IFF_PROMISC) { res = dev_set_promiscuity(slave_dev, 1); if (res) goto err_close; } /* set allmulti level to new slave */ if (bond_dev->flags & IFF_ALLMULTI) { res = dev_set_allmulti(slave_dev, 1); if (res) goto err_close; } netif_addr_lock_bh(bond_dev); /* upload master's mc_list to new slave */ netdev_for_each_mc_addr(ha, bond_dev) dev_mc_add(slave_dev, ha->addr); netif_addr_unlock_bh(bond_dev); } if (bond->params.mode == BOND_MODE_8023AD) { /* add lacpdu mc addr to mc list */ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; dev_mc_add(slave_dev, lacpdu_multicast); } bond_add_vlans_on_slave(bond, slave_dev); write_lock_bh(&bond->lock); bond_attach_slave(bond, new_slave); new_slave->delay = 0; new_slave->link_failure_count = 0; write_unlock_bh(&bond->lock); bond_compute_features(bond); read_lock(&bond->lock); new_slave->last_arp_rx = jiffies; if (bond->params.miimon && !bond->params.use_carrier) { link_reporting = bond_check_dev_link(bond, slave_dev, 1); if ((link_reporting == -1) && !bond->params.arp_interval) { /* * miimon is set but a bonded network driver * does not support ETHTOOL/MII and * arp_interval is not set. Note: if * use_carrier is enabled, we will never go * here (because netif_carrier is always * supported); thus, we don't need to change * the messages for netif_carrier. */ pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n", bond_dev->name, slave_dev->name); } else if (link_reporting == -1) { /* unable get link status using mii/ethtool */ pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n", bond_dev->name, slave_dev->name); } } /* check for initial state */ if (!bond->params.miimon || (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) { if (bond->params.updelay) { pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n"); new_slave->link = BOND_LINK_BACK; new_slave->delay = bond->params.updelay; } else { pr_debug("Initial state of slave_dev is BOND_LINK_UP\n"); new_slave->link = BOND_LINK_UP; } new_slave->jiffies = jiffies; } else { pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n"); new_slave->link = BOND_LINK_DOWN; } if (bond_update_speed_duplex(new_slave) && (new_slave->link != BOND_LINK_DOWN)) { pr_warning("%s: Warning: failed to get speed and duplex from %s, assumed to be 100Mb/sec and Full.\n", bond_dev->name, new_slave->dev->name); if (bond->params.mode == BOND_MODE_8023AD) { pr_warning("%s: Warning: Operation of 802.3ad mode requires ETHTOOL support in base driver for proper aggregator selection.\n", bond_dev->name); } } if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { /* if there is a primary slave, remember it */ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { bond->primary_slave = new_slave; bond->force_primary = true; } } write_lock_bh(&bond->curr_slave_lock); switch (bond->params.mode) { case BOND_MODE_ACTIVEBACKUP: bond_set_slave_inactive_flags(new_slave); bond_select_active_slave(bond); break; case BOND_MODE_8023AD: /* in 802.3ad mode, the internal mechanism * will activate the slaves in the selected * aggregator */ bond_set_slave_inactive_flags(new_slave); /* if this is the first slave */ if (bond->slave_cnt == 1) { SLAVE_AD_INFO(new_slave).id = 1; /* Initialize AD with the number of times that the AD timer is called in 1 second * can be called only after the mac address of the bond is set */ bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL); } else { SLAVE_AD_INFO(new_slave).id = SLAVE_AD_INFO(new_slave->prev).id + 1; } bond_3ad_bind_slave(new_slave); break; case BOND_MODE_TLB: case BOND_MODE_ALB: bond_set_active_slave(new_slave); bond_set_slave_inactive_flags(new_slave); bond_select_active_slave(bond); break; default: pr_debug("This slave is always active in trunk mode\n"); /* always active in trunk mode */ bond_set_active_slave(new_slave); /* In trunking mode there is little meaning to curr_active_slave * anyway (it holds no special properties of the bond device), * so we can change it without calling change_active_interface() */ if (!bond->curr_active_slave) bond->curr_active_slave = new_slave; break; } /* switch(bond_mode) */ write_unlock_bh(&bond->curr_slave_lock); bond_set_carrier(bond); #ifdef CONFIG_NET_POLL_CONTROLLER slave_dev->npinfo = bond_netpoll_info(bond); if (slave_dev->npinfo) { if (slave_enable_netpoll(new_slave)) { read_unlock(&bond->lock); pr_info("Error, %s: master_dev is using netpoll, " "but new slave device does not support netpoll.\n", bond_dev->name); res = -EBUSY; goto err_close; } } #endif read_unlock(&bond->lock); res = bond_create_slave_symlinks(bond_dev, slave_dev); if (res) goto err_close; res = netdev_rx_handler_register(slave_dev, bond_handle_frame, new_slave); if (res) { pr_debug("Error %d calling netdev_rx_handler_register\n", res); goto err_dest_symlinks; } pr_info("%s: enslaving %s as a%s interface with a%s link.\n", bond_dev->name, slave_dev->name, bond_is_active_slave(new_slave) ? "n active" : " backup", new_slave->link != BOND_LINK_DOWN ? "n up" : " down"); /* enslave is successful */ return 0; /* Undo stages on error */ err_dest_symlinks: bond_destroy_slave_symlinks(bond_dev, slave_dev); err_close: dev_close(slave_dev); err_unset_master: netdev_set_bond_master(slave_dev, NULL); err_restore_mac: if (!bond->params.fail_over_mac) { /* XXX TODO - fom follow mode needs to change master's * MAC if this slave's MAC is in use by the bond, or at * least print a warning. */ memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); addr.sa_family = slave_dev->type; dev_set_mac_address(slave_dev, &addr); } err_restore_mtu: dev_set_mtu(slave_dev, new_slave->original_mtu); err_free: kfree(new_slave); err_undo_flags: bond_compute_features(bond); return res; } /* * Try to release the slave device <slave> from the bond device <master> * It is legal to access curr_active_slave without a lock because all the function * is write-locked. * * The rules for slave state should be: * for Active/Backup: * Active stays on all backups go down * for Bonded connections: * The first up interface should be left on and all others downed. */ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *oldcurrent; struct sockaddr addr; u32 old_features = bond_dev->features; /* slave is not a slave or master is not master of this slave */ if (!(slave_dev->flags & IFF_SLAVE) || (slave_dev->master != bond_dev)) { pr_err("%s: Error: cannot release %s.\n", bond_dev->name, slave_dev->name); return -EINVAL; } block_netpoll_tx(); netdev_bonding_change(bond_dev, NETDEV_RELEASE); write_lock_bh(&bond->lock); slave = bond_get_slave_by_dev(bond, slave_dev); if (!slave) { /* not a slave of this bond */ pr_info("%s: %s not enslaved\n", bond_dev->name, slave_dev->name); write_unlock_bh(&bond->lock); unblock_netpoll_tx(); return -EINVAL; } /* unregister rx_handler early so bond_handle_frame wouldn't be called * for this slave anymore. */ netdev_rx_handler_unregister(slave_dev); write_unlock_bh(&bond->lock); synchronize_net(); write_lock_bh(&bond->lock); if (!bond->params.fail_over_mac) { if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && bond->slave_cnt > 1) pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n", bond_dev->name, slave_dev->name, slave->perm_hwaddr, bond_dev->name, slave_dev->name); } /* Inform AD package of unbinding of slave. */ if (bond->params.mode == BOND_MODE_8023AD) { /* must be called before the slave is * detached from the list */ bond_3ad_unbind_slave(slave); } pr_info("%s: releasing %s interface %s\n", bond_dev->name, bond_is_active_slave(slave) ? "active" : "backup", slave_dev->name); oldcurrent = bond->curr_active_slave; bond->current_arp_slave = NULL; /* release the slave from its bond */ bond_detach_slave(bond, slave); if (bond->primary_slave == slave) bond->primary_slave = NULL; if (oldcurrent == slave) bond_change_active_slave(bond, NULL); if (bond_is_lb(bond)) { /* Must be called only after the slave has been * detached from the list and the curr_active_slave * has been cleared (if our_slave == old_current), * but before a new active slave is selected. */ write_unlock_bh(&bond->lock); bond_alb_deinit_slave(bond, slave); write_lock_bh(&bond->lock); } if (oldcurrent == slave) { /* * Note that we hold RTNL over this sequence, so there * is no concern that another slave add/remove event * will interfere. */ write_unlock_bh(&bond->lock); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); write_lock_bh(&bond->lock); } if (bond->slave_cnt == 0) { bond_set_carrier(bond); /* if the last slave was removed, zero the mac address * of the master so it will be set by the application * to the mac address of the first slave */ memset(bond_dev->dev_addr, 0, bond_dev->addr_len); if (bond_vlan_used(bond)) { pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", bond_dev->name, bond_dev->name); pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", bond_dev->name); } } write_unlock_bh(&bond->lock); unblock_netpoll_tx(); bond_compute_features(bond); if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && (old_features & NETIF_F_VLAN_CHALLENGED)) pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n", bond_dev->name, slave_dev->name, bond_dev->name); /* must do this from outside any spinlocks */ bond_destroy_slave_symlinks(bond_dev, slave_dev); bond_del_vlans_from_slave(bond, slave_dev); /* If the mode USES_PRIMARY, then we should only remove its * promisc and mc settings if it was the curr_active_slave, but that was * already taken care of above when we detached the slave */ if (!USES_PRIMARY(bond->params.mode)) { /* unset promiscuity level from slave */ if (bond_dev->flags & IFF_PROMISC) dev_set_promiscuity(slave_dev, -1); /* unset allmulti level from slave */ if (bond_dev->flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1); /* flush master's mc_list from slave */ netif_addr_lock_bh(bond_dev); bond_mc_list_flush(bond_dev, slave_dev); netif_addr_unlock_bh(bond_dev); } netdev_set_bond_master(slave_dev, NULL); slave_disable_netpoll(slave); /* close slave before restoring its mac address */ dev_close(slave_dev); if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) { /* restore original ("permanent") mac address */ memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); addr.sa_family = slave_dev->type; dev_set_mac_address(slave_dev, &addr); } dev_set_mtu(slave_dev, slave->original_mtu); slave_dev->priv_flags &= ~IFF_BONDING; kfree(slave); return 0; /* deletion OK */ } /* * First release a slave and then destroy the bond if no more slaves are left. * Must be under rtnl_lock when this function is called. */ static int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev) { struct bonding *bond = netdev_priv(bond_dev); int ret; ret = bond_release(bond_dev, slave_dev); if ((ret == 0) && (bond->slave_cnt == 0)) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; pr_info("%s: destroying bond %s.\n", bond_dev->name, bond_dev->name); unregister_netdevice(bond_dev); } return ret; } /* * This function releases all slaves. */ static int bond_release_all(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; struct net_device *slave_dev; struct sockaddr addr; write_lock_bh(&bond->lock); netif_carrier_off(bond_dev); if (bond->slave_cnt == 0) goto out; bond->current_arp_slave = NULL; bond->primary_slave = NULL; bond_change_active_slave(bond, NULL); while ((slave = bond->first_slave) != NULL) { /* Inform AD package of unbinding of slave * before slave is detached from the list. */ if (bond->params.mode == BOND_MODE_8023AD) bond_3ad_unbind_slave(slave); slave_dev = slave->dev; bond_detach_slave(bond, slave); /* now that the slave is detached, unlock and perform * all the undo steps that should not be called from * within a lock. */ write_unlock_bh(&bond->lock); /* unregister rx_handler early so bond_handle_frame wouldn't * be called for this slave anymore. */ netdev_rx_handler_unregister(slave_dev); synchronize_net(); if (bond_is_lb(bond)) { /* must be called only after the slave * has been detached from the list */ bond_alb_deinit_slave(bond, slave); } bond_destroy_slave_symlinks(bond_dev, slave_dev); bond_del_vlans_from_slave(bond, slave_dev); /* If the mode USES_PRIMARY, then we should only remove its * promisc and mc settings if it was the curr_active_slave, but that was * already taken care of above when we detached the slave */ if (!USES_PRIMARY(bond->params.mode)) { /* unset promiscuity level from slave */ if (bond_dev->flags & IFF_PROMISC) dev_set_promiscuity(slave_dev, -1); /* unset allmulti level from slave */ if (bond_dev->flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1); /* flush master's mc_list from slave */ netif_addr_lock_bh(bond_dev); bond_mc_list_flush(bond_dev, slave_dev); netif_addr_unlock_bh(bond_dev); } netdev_set_bond_master(slave_dev, NULL); slave_disable_netpoll(slave); /* close slave before restoring its mac address */ dev_close(slave_dev); if (!bond->params.fail_over_mac) { /* restore original ("permanent") mac address*/ memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); addr.sa_family = slave_dev->type; dev_set_mac_address(slave_dev, &addr); } kfree(slave); /* re-acquire the lock before getting the next slave */ write_lock_bh(&bond->lock); } /* zero the mac address of the master so it will be * set by the application to the mac address of the * first slave */ memset(bond_dev->dev_addr, 0, bond_dev->addr_len); if (bond_vlan_used(bond)) { pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", bond_dev->name, bond_dev->name); pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", bond_dev->name); } pr_info("%s: released all slaves\n", bond_dev->name); out: write_unlock_bh(&bond->lock); bond_compute_features(bond); return 0; } /* * This function changes the active slave to slave <slave_dev>. * It returns -EINVAL in the following cases. * - <slave_dev> is not found in the list. * - There is not active slave now. * - <slave_dev> is already active. * - The link state of <slave_dev> is not BOND_LINK_UP. * - <slave_dev> is not running. * In these cases, this function does nothing. * In the other cases, current_slave pointer is changed and 0 is returned. */ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *old_active = NULL; struct slave *new_active = NULL; int res = 0; if (!USES_PRIMARY(bond->params.mode)) return -EINVAL; /* Verify that master_dev is indeed the master of slave_dev */ if (!(slave_dev->flags & IFF_SLAVE) || (slave_dev->master != bond_dev)) return -EINVAL; read_lock(&bond->lock); read_lock(&bond->curr_slave_lock); old_active = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); new_active = bond_get_slave_by_dev(bond, slave_dev); /* * Changing to the current active: do nothing; return success. */ if (new_active && (new_active == old_active)) { read_unlock(&bond->lock); return 0; } if ((new_active) && (old_active) && (new_active->link == BOND_LINK_UP) && IS_UP(new_active->dev)) { block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_change_active_slave(bond, new_active); write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } else res = -EINVAL; read_unlock(&bond->lock); return res; } static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) { struct bonding *bond = netdev_priv(bond_dev); info->bond_mode = bond->params.mode; info->miimon = bond->params.miimon; read_lock(&bond->lock); info->num_slaves = bond->slave_cnt; read_unlock(&bond->lock); return 0; } static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; int i, res = -ENODEV; read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { if (i == (int)info->slave_id) { res = 0; strcpy(info->slave_name, slave->dev->name); info->link = slave->link; info->state = bond_slave_state(slave); info->link_failure_count = slave->link_failure_count; break; } } read_unlock(&bond->lock); return res; } /*-------------------------------- Monitoring -------------------------------*/ static int bond_miimon_inspect(struct bonding *bond) { struct slave *slave; int i, link_state, commit = 0; bool ignore_updelay; ignore_updelay = !bond->curr_active_slave ? true : false; bond_for_each_slave(bond, slave, i) { slave->new_link = BOND_LINK_NOCHANGE; link_state = bond_check_dev_link(bond, slave->dev, 0); switch (slave->link) { case BOND_LINK_UP: if (link_state) continue; slave->link = BOND_LINK_FAIL; slave->delay = bond->params.downdelay; if (slave->delay) { pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n", bond->dev->name, (bond->params.mode == BOND_MODE_ACTIVEBACKUP) ? (bond_is_active_slave(slave) ? "active " : "backup ") : "", slave->dev->name, bond->params.downdelay * bond->params.miimon); } /*FALLTHRU*/ case BOND_LINK_FAIL: if (link_state) { /* * recovered before downdelay expired */ slave->link = BOND_LINK_UP; slave->jiffies = jiffies; pr_info("%s: link status up again after %d ms for interface %s.\n", bond->dev->name, (bond->params.downdelay - slave->delay) * bond->params.miimon, slave->dev->name); continue; } if (slave->delay <= 0) { slave->new_link = BOND_LINK_DOWN; commit++; continue; } slave->delay--; break; case BOND_LINK_DOWN: if (!link_state) continue; slave->link = BOND_LINK_BACK; slave->delay = bond->params.updelay; if (slave->delay) { pr_info("%s: link status up for interface %s, enabling it in %d ms.\n", bond->dev->name, slave->dev->name, ignore_updelay ? 0 : bond->params.updelay * bond->params.miimon); } /*FALLTHRU*/ case BOND_LINK_BACK: if (!link_state) { slave->link = BOND_LINK_DOWN; pr_info("%s: link status down again after %d ms for interface %s.\n", bond->dev->name, (bond->params.updelay - slave->delay) * bond->params.miimon, slave->dev->name); continue; } if (ignore_updelay) slave->delay = 0; if (slave->delay <= 0) { slave->new_link = BOND_LINK_UP; commit++; ignore_updelay = false; continue; } slave->delay--; break; } } return commit; } static void bond_miimon_commit(struct bonding *bond) { struct slave *slave; int i; bond_for_each_slave(bond, slave, i) { switch (slave->new_link) { case BOND_LINK_NOCHANGE: continue; case BOND_LINK_UP: slave->link = BOND_LINK_UP; slave->jiffies = jiffies; if (bond->params.mode == BOND_MODE_8023AD) { /* prevent it from being the active one */ bond_set_backup_slave(slave); } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { /* make it immediately active */ bond_set_active_slave(slave); } else if (slave != bond->primary_slave) { /* prevent it from being the active one */ bond_set_backup_slave(slave); } bond_update_speed_duplex(slave); pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", bond->dev->name, slave->dev->name, slave->speed, slave->duplex ? "full" : "half"); /* notify ad that the link status has changed */ if (bond->params.mode == BOND_MODE_8023AD) bond_3ad_handle_link_change(slave, BOND_LINK_UP); if (bond_is_lb(bond)) bond_alb_handle_link_change(bond, slave, BOND_LINK_UP); if (!bond->curr_active_slave || (slave == bond->primary_slave)) goto do_failover; continue; case BOND_LINK_DOWN: if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; slave->link = BOND_LINK_DOWN; if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || bond->params.mode == BOND_MODE_8023AD) bond_set_slave_inactive_flags(slave); pr_info("%s: link status definitely down for interface %s, disabling it\n", bond->dev->name, slave->dev->name); if (bond->params.mode == BOND_MODE_8023AD) bond_3ad_handle_link_change(slave, BOND_LINK_DOWN); if (bond_is_lb(bond)) bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN); if (slave == bond->curr_active_slave) goto do_failover; continue; default: pr_err("%s: invalid new link %d on slave %s\n", bond->dev->name, slave->new_link, slave->dev->name); slave->new_link = BOND_LINK_NOCHANGE; continue; } do_failover: ASSERT_RTNL(); block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } bond_set_carrier(bond); } /* * bond_mii_monitor * * Really a wrapper that splits the mii monitor into two phases: an * inspection, then (if inspection indicates something needs to be done) * an acquisition of appropriate locks followed by a commit phase to * implement whatever link state changes are indicated. */ void bond_mii_monitor(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, mii_work.work); bool should_notify_peers = false; read_lock(&bond->lock); if (bond->kill_timers) goto out; if (bond->slave_cnt == 0) goto re_arm; should_notify_peers = bond_should_notify_peers(bond); if (bond_miimon_inspect(bond)) { read_unlock(&bond->lock); rtnl_lock(); read_lock(&bond->lock); bond_miimon_commit(bond); read_unlock(&bond->lock); rtnl_unlock(); /* might sleep, hold no other locks */ read_lock(&bond->lock); } re_arm: if (bond->params.miimon) queue_delayed_work(bond->wq, &bond->mii_work, msecs_to_jiffies(bond->params.miimon)); out: read_unlock(&bond->lock); if (should_notify_peers) { rtnl_lock(); netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); rtnl_unlock(); } } static __be32 bond_glean_dev_ip(struct net_device *dev) { struct in_device *idev; struct in_ifaddr *ifa; __be32 addr = 0; if (!dev) return 0; rcu_read_lock(); idev = __in_dev_get_rcu(dev); if (!idev) goto out; ifa = idev->ifa_list; if (!ifa) goto out; addr = ifa->ifa_local; out: rcu_read_unlock(); return addr; } static int bond_has_this_ip(struct bonding *bond, __be32 ip) { struct vlan_entry *vlan; if (ip == bond->master_ip) return 1; list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { if (ip == vlan->vlan_ip) return 1; } return 0; } /* * We go to the (large) trouble of VLAN tagging ARP frames because * switches in VLAN mode (especially if ports are configured as * "native" to a VLAN) might not pass non-tagged frames. */ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, unsigned short vlan_id) { struct sk_buff *skb; pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op, slave_dev->name, dest_ip, src_ip, vlan_id); skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, NULL, slave_dev->dev_addr, NULL); if (!skb) { pr_err("ARP packet allocation failed\n"); return; } if (vlan_id) { skb = vlan_put_tag(skb, vlan_id); if (!skb) { pr_err("failed to insert VLAN tag\n"); return; } } arp_xmit(skb); } static void bond_arp_send_all(struct bonding *bond, struct slave *slave) { int i, vlan_id; __be32 *targets = bond->params.arp_targets; struct vlan_entry *vlan; struct net_device *vlan_dev; struct rtable *rt; for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { if (!targets[i]) break; pr_debug("basa: target %x\n", targets[i]); if (!bond_vlan_used(bond)) { pr_debug("basa: empty vlan: arp_send\n"); bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], bond->master_ip, 0); continue; } /* * If VLANs are configured, we do a route lookup to * determine which VLAN interface would be used, so we * can tag the ARP with the proper VLAN tag. */ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, RTO_ONLINK, 0); if (IS_ERR(rt)) { if (net_ratelimit()) { pr_warning("%s: no route to arp_ip_target %pI4\n", bond->dev->name, &targets[i]); } continue; } /* * This target is not on a VLAN */ if (rt->dst.dev == bond->dev) { ip_rt_put(rt); pr_debug("basa: rtdev == bond->dev: arp_send\n"); bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], bond->master_ip, 0); continue; } vlan_id = 0; list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { rcu_read_lock(); vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id); rcu_read_unlock(); if (vlan_dev == rt->dst.dev) { vlan_id = vlan->vlan_id; pr_debug("basa: vlan match on %s %d\n", vlan_dev->name, vlan_id); break; } } if (vlan_id) { ip_rt_put(rt); bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], vlan->vlan_ip, vlan_id); continue; } if (net_ratelimit()) { pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", bond->dev->name, &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL"); } ip_rt_put(rt); } } static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) { int i; __be32 *targets = bond->params.arp_targets; for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip)); if (sip == targets[i]) { if (bond_has_this_ip(bond, tip)) slave->last_arp_rx = jiffies; return; } } } static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, struct slave *slave) { struct arphdr *arp; unsigned char *arp_ptr; __be32 sip, tip; if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) return; read_lock(&bond->lock); pr_debug("bond_arp_rcv: bond %s skb->dev %s\n", bond->dev->name, skb->dev->name); if (!pskb_may_pull(skb, arp_hdr_len(bond->dev))) goto out_unlock; arp = arp_hdr(skb); if (arp->ar_hln != bond->dev->addr_len || skb->pkt_type == PACKET_OTHERHOST || skb->pkt_type == PACKET_LOOPBACK || arp->ar_hrd != htons(ARPHRD_ETHER) || arp->ar_pro != htons(ETH_P_IP) || arp->ar_pln != 4) goto out_unlock; arp_ptr = (unsigned char *)(arp + 1); arp_ptr += bond->dev->addr_len; memcpy(&sip, arp_ptr, 4); arp_ptr += 4 + bond->dev->addr_len; memcpy(&tip, arp_ptr, 4); pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n", bond->dev->name, slave->dev->name, bond_slave_state(slave), bond->params.arp_validate, slave_do_arp_validate(bond, slave), &sip, &tip); /* * Backup slaves won't see the ARP reply, but do come through * here for each ARP probe (so we swap the sip/tip to validate * the probe). In a "redundant switch, common router" type of * configuration, the ARP probe will (hopefully) travel from * the active, through one switch, the router, then the other * switch before reaching the backup. */ if (bond_is_active_slave(slave)) bond_validate_arp(bond, slave, sip, tip); else bond_validate_arp(bond, slave, tip, sip); out_unlock: read_unlock(&bond->lock); } /* * this function is called regularly to monitor each slave's link * ensuring that traffic is being sent and received when arp monitoring * is used in load-balancing mode. if the adapter has been dormant, then an * arp is transmitted to generate traffic. see activebackup_arp_monitor for * arp monitoring in active backup mode. */ void bond_loadbalance_arp_mon(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, arp_work.work); struct slave *slave, *oldcurrent; int do_failover = 0; int delta_in_ticks; int i; read_lock(&bond->lock); delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); if (bond->kill_timers) goto out; if (bond->slave_cnt == 0) goto re_arm; read_lock(&bond->curr_slave_lock); oldcurrent = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); /* see if any of the previous devices are up now (i.e. they have * xmt and rcv traffic). the curr_active_slave does not come into * the picture unless it is null. also, slave->jiffies is not needed * here because we send an arp on each slave and give a slave as * long as it needs to get the tx/rx within the delta. * TODO: what about up/down delay in arp mode? it wasn't here before * so it can wait */ bond_for_each_slave(bond, slave, i) { unsigned long trans_start = dev_trans_start(slave->dev); if (slave->link != BOND_LINK_UP) { if (time_in_range(jiffies, trans_start - delta_in_ticks, trans_start + delta_in_ticks) && time_in_range(jiffies, slave->dev->last_rx - delta_in_ticks, slave->dev->last_rx + delta_in_ticks)) { slave->link = BOND_LINK_UP; bond_set_active_slave(slave); /* primary_slave has no meaning in round-robin * mode. the window of a slave being up and * curr_active_slave being null after enslaving * is closed. */ if (!oldcurrent) { pr_info("%s: link status definitely up for interface %s, ", bond->dev->name, slave->dev->name); do_failover = 1; } else { pr_info("%s: interface %s is now up\n", bond->dev->name, slave->dev->name); } } } else { /* slave->link == BOND_LINK_UP */ /* not all switches will respond to an arp request * when the source ip is 0, so don't take the link down * if we don't know our ip yet */ if (!time_in_range(jiffies, trans_start - delta_in_ticks, trans_start + 2 * delta_in_ticks) || !time_in_range(jiffies, slave->dev->last_rx - delta_in_ticks, slave->dev->last_rx + 2 * delta_in_ticks)) { slave->link = BOND_LINK_DOWN; bond_set_backup_slave(slave); if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; pr_info("%s: interface %s is now down.\n", bond->dev->name, slave->dev->name); if (slave == oldcurrent) do_failover = 1; } } /* note: if switch is in round-robin mode, all links * must tx arp to ensure all links rx an arp - otherwise * links may oscillate or not come up at all; if switch is * in something like xor mode, there is nothing we can * do - all replies will be rx'ed on same link causing slaves * to be unstable during low/no traffic periods */ if (IS_UP(slave->dev)) bond_arp_send_all(bond, slave); } if (do_failover) { block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } re_arm: if (bond->params.arp_interval) queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); out: read_unlock(&bond->lock); } /* * Called to inspect slaves for active-backup mode ARP monitor link state * changes. Sets new_link in slaves to specify what action should take * place for the slave. Returns 0 if no changes are found, >0 if changes * to link states must be committed. * * Called with bond->lock held for read. */ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) { struct slave *slave; int i, commit = 0; unsigned long trans_start; bond_for_each_slave(bond, slave, i) { slave->new_link = BOND_LINK_NOCHANGE; if (slave->link != BOND_LINK_UP) { if (time_in_range(jiffies, slave_last_rx(bond, slave) - delta_in_ticks, slave_last_rx(bond, slave) + delta_in_ticks)) { slave->new_link = BOND_LINK_UP; commit++; } continue; } /* * Give slaves 2*delta after being enslaved or made * active. This avoids bouncing, as the last receive * times need a full ARP monitor cycle to be updated. */ if (time_in_range(jiffies, slave->jiffies - delta_in_ticks, slave->jiffies + 2 * delta_in_ticks)) continue; /* * Backup slave is down if: * - No current_arp_slave AND * - more than 3*delta since last receive AND * - the bond has an IP address * * Note: a non-null current_arp_slave indicates * the curr_active_slave went down and we are * searching for a new one; under this condition * we only take the curr_active_slave down - this * gives each slave a chance to tx/rx traffic * before being taken out */ if (!bond_is_active_slave(slave) && !bond->current_arp_slave && !time_in_range(jiffies, slave_last_rx(bond, slave) - delta_in_ticks, slave_last_rx(bond, slave) + 3 * delta_in_ticks)) { slave->new_link = BOND_LINK_DOWN; commit++; } /* * Active slave is down if: * - more than 2*delta since transmitting OR * - (more than 2*delta since receive AND * the bond has an IP address) */ trans_start = dev_trans_start(slave->dev); if (bond_is_active_slave(slave) && (!time_in_range(jiffies, trans_start - delta_in_ticks, trans_start + 2 * delta_in_ticks) || !time_in_range(jiffies, slave_last_rx(bond, slave) - delta_in_ticks, slave_last_rx(bond, slave) + 2 * delta_in_ticks))) { slave->new_link = BOND_LINK_DOWN; commit++; } } return commit; } /* * Called to commit link state changes noted by inspection step of * active-backup mode ARP monitor. * * Called with RTNL and bond->lock for read. */ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) { struct slave *slave; int i; unsigned long trans_start; bond_for_each_slave(bond, slave, i) { switch (slave->new_link) { case BOND_LINK_NOCHANGE: continue; case BOND_LINK_UP: trans_start = dev_trans_start(slave->dev); if ((!bond->curr_active_slave && time_in_range(jiffies, trans_start - delta_in_ticks, trans_start + delta_in_ticks)) || bond->curr_active_slave != slave) { slave->link = BOND_LINK_UP; bond->current_arp_slave = NULL; pr_info("%s: link status definitely up for interface %s.\n", bond->dev->name, slave->dev->name); if (!bond->curr_active_slave || (slave == bond->primary_slave)) goto do_failover; } continue; case BOND_LINK_DOWN: if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; slave->link = BOND_LINK_DOWN; bond_set_slave_inactive_flags(slave); pr_info("%s: link status definitely down for interface %s, disabling it\n", bond->dev->name, slave->dev->name); if (slave == bond->curr_active_slave) { bond->current_arp_slave = NULL; goto do_failover; } continue; default: pr_err("%s: impossible: new_link %d on slave %s\n", bond->dev->name, slave->new_link, slave->dev->name); continue; } do_failover: ASSERT_RTNL(); block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } bond_set_carrier(bond); } /* * Send ARP probes for active-backup mode ARP monitor. * * Called with bond->lock held for read. */ static void bond_ab_arp_probe(struct bonding *bond) { struct slave *slave; int i; read_lock(&bond->curr_slave_lock); if (bond->current_arp_slave && bond->curr_active_slave) pr_info("PROBE: c_arp %s && cas %s BAD\n", bond->current_arp_slave->dev->name, bond->curr_active_slave->dev->name); if (bond->curr_active_slave) { bond_arp_send_all(bond, bond->curr_active_slave); read_unlock(&bond->curr_slave_lock); return; } read_unlock(&bond->curr_slave_lock); /* if we don't have a curr_active_slave, search for the next available * backup slave from the current_arp_slave and make it the candidate * for becoming the curr_active_slave */ if (!bond->current_arp_slave) { bond->current_arp_slave = bond->first_slave; if (!bond->current_arp_slave) return; } bond_set_slave_inactive_flags(bond->current_arp_slave); /* search for next candidate */ bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) { if (IS_UP(slave->dev)) { slave->link = BOND_LINK_BACK; bond_set_slave_active_flags(slave); bond_arp_send_all(bond, slave); slave->jiffies = jiffies; bond->current_arp_slave = slave; break; } /* if the link state is up at this point, we * mark it down - this can happen if we have * simultaneous link failures and * reselect_active_interface doesn't make this * one the current slave so it is still marked * up when it is actually down */ if (slave->link == BOND_LINK_UP) { slave->link = BOND_LINK_DOWN; if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; bond_set_slave_inactive_flags(slave); pr_info("%s: backup interface %s is now down.\n", bond->dev->name, slave->dev->name); } } } void bond_activebackup_arp_mon(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, arp_work.work); bool should_notify_peers = false; int delta_in_ticks; read_lock(&bond->lock); if (bond->kill_timers) goto out; delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); if (bond->slave_cnt == 0) goto re_arm; should_notify_peers = bond_should_notify_peers(bond); if (bond_ab_arp_inspect(bond, delta_in_ticks)) { read_unlock(&bond->lock); rtnl_lock(); read_lock(&bond->lock); bond_ab_arp_commit(bond, delta_in_ticks); read_unlock(&bond->lock); rtnl_unlock(); read_lock(&bond->lock); } bond_ab_arp_probe(bond); re_arm: if (bond->params.arp_interval) queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); out: read_unlock(&bond->lock); if (should_notify_peers) { rtnl_lock(); netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); rtnl_unlock(); } } /*-------------------------- netdev event handling --------------------------*/ /* * Change device name */ static int bond_event_changename(struct bonding *bond) { bond_remove_proc_entry(bond); bond_create_proc_entry(bond); bond_debug_reregister(bond); return NOTIFY_DONE; } static int bond_master_netdev_event(unsigned long event, struct net_device *bond_dev) { struct bonding *event_bond = netdev_priv(bond_dev); switch (event) { case NETDEV_CHANGENAME: return bond_event_changename(event_bond); default: break; } return NOTIFY_DONE; } static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev) { struct net_device *bond_dev = slave_dev->master; struct bonding *bond = netdev_priv(bond_dev); switch (event) { case NETDEV_UNREGISTER: if (bond_dev) { if (bond->setup_by_slave) bond_release_and_destroy(bond_dev, slave_dev); else bond_release(bond_dev, slave_dev); } break; case NETDEV_CHANGE: if (bond->params.mode == BOND_MODE_8023AD || bond_is_lb(bond)) { struct slave *slave; slave = bond_get_slave_by_dev(bond, slave_dev); if (slave) { u32 old_speed = slave->speed; u8 old_duplex = slave->duplex; bond_update_speed_duplex(slave); if (bond_is_lb(bond)) break; if (old_speed != slave->speed) bond_3ad_adapter_speed_changed(slave); if (old_duplex != slave->duplex) bond_3ad_adapter_duplex_changed(slave); } } break; case NETDEV_DOWN: /* * ... Or is it this? */ break; case NETDEV_CHANGEMTU: /* * TODO: Should slaves be allowed to * independently alter their MTU? For * an active-backup bond, slaves need * not be the same type of device, so * MTUs may vary. For other modes, * slaves arguably should have the * same MTUs. To do this, we'd need to * take over the slave's change_mtu * function for the duration of their * servitude. */ break; case NETDEV_CHANGENAME: /* * TODO: handle changing the primary's name */ break; case NETDEV_FEAT_CHANGE: bond_compute_features(bond); break; default: break; } return NOTIFY_DONE; } /* * bond_netdev_event: handle netdev notifier chain events. * * This function receives events for the netdev chain. The caller (an * ioctl handler calling blocking_notifier_call_chain) holds the necessary * locks for us to safely manipulate the slave devices (RTNL lock, * dev_probe_lock). */ static int bond_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = (struct net_device *)ptr; pr_debug("event_dev: %s, event: %lx\n", event_dev ? event_dev->name : "None", event); if (!(event_dev->priv_flags & IFF_BONDING)) return NOTIFY_DONE; if (event_dev->flags & IFF_MASTER) { pr_debug("IFF_MASTER\n"); return bond_master_netdev_event(event, event_dev); } if (event_dev->flags & IFF_SLAVE) { pr_debug("IFF_SLAVE\n"); return bond_slave_netdev_event(event, event_dev); } return NOTIFY_DONE; } /* * bond_inetaddr_event: handle inetaddr notifier chain events. * * We keep track of device IPs primarily to use as source addresses in * ARP monitor probes (rather than spewing out broadcasts all the time). * * We track one IP for the main device (if it has one), plus one per VLAN. */ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct in_ifaddr *ifa = ptr; struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev; struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id); struct bonding *bond; struct vlan_entry *vlan; list_for_each_entry(bond, &bn->dev_list, bond_list) { if (bond->dev == event_dev) { switch (event) { case NETDEV_UP: bond->master_ip = ifa->ifa_local; return NOTIFY_OK; case NETDEV_DOWN: bond->master_ip = bond_glean_dev_ip(bond->dev); return NOTIFY_OK; default: return NOTIFY_DONE; } } list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id); if (vlan_dev == event_dev) { switch (event) { case NETDEV_UP: vlan->vlan_ip = ifa->ifa_local; return NOTIFY_OK; case NETDEV_DOWN: vlan->vlan_ip = bond_glean_dev_ip(vlan_dev); return NOTIFY_OK; default: return NOTIFY_DONE; } } } } return NOTIFY_DONE; } static struct notifier_block bond_netdev_notifier = { .notifier_call = bond_netdev_event, }; static struct notifier_block bond_inetaddr_notifier = { .notifier_call = bond_inetaddr_event, }; /*---------------------------- Hashing Policies -----------------------------*/ /* * Hash for the output device based upon layer 2 and layer 3 data. If * the packet is not IP mimic bond_xmit_hash_policy_l2() */ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) { struct ethhdr *data = (struct ethhdr *)skb->data; struct iphdr *iph = ip_hdr(skb); if (skb->protocol == htons(ETH_P_IP)) { return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ (data->h_dest[5] ^ data->h_source[5])) % count; } return (data->h_dest[5] ^ data->h_source[5]) % count; } /* * Hash for the output device based upon layer 3 and layer 4 data. If * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is * altogether not IP, mimic bond_xmit_hash_policy_l2() */ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) { struct ethhdr *data = (struct ethhdr *)skb->data; struct iphdr *iph = ip_hdr(skb); __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); int layer4_xor = 0; if (skb->protocol == htons(ETH_P_IP)) { if (!ip_is_fragment(iph) && (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP)) { layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1))); } return (layer4_xor ^ ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; } return (data->h_dest[5] ^ data->h_source[5]) % count; } /* * Hash for the output device based upon layer 2 data */ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) { struct ethhdr *data = (struct ethhdr *)skb->data; return (data->h_dest[5] ^ data->h_source[5]) % count; } /*-------------------------- Device entry points ----------------------------*/ static int bond_open(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); bond->kill_timers = 0; INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); if (bond_is_lb(bond)) { /* bond_alb_initialize must be called before the timer * is started. */ if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) { /* something went wrong - fail the open operation */ return -ENOMEM; } INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); queue_delayed_work(bond->wq, &bond->alb_work, 0); } if (bond->params.miimon) { /* link check interval, in milliseconds. */ INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); queue_delayed_work(bond->wq, &bond->mii_work, 0); } if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon); else INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon); queue_delayed_work(bond->wq, &bond->arp_work, 0); if (bond->params.arp_validate) bond->recv_probe = bond_arp_rcv; } if (bond->params.mode == BOND_MODE_8023AD) { INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); queue_delayed_work(bond->wq, &bond->ad_work, 0); /* register to receive LACPDUs */ bond->recv_probe = bond_3ad_lacpdu_recv; bond_3ad_initiate_agg_selection(bond, 1); } return 0; } static int bond_close(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); write_lock_bh(&bond->lock); bond->send_peer_notif = 0; /* signal timers not to re-arm */ bond->kill_timers = 1; write_unlock_bh(&bond->lock); if (bond->params.miimon) { /* link check interval, in milliseconds. */ cancel_delayed_work(&bond->mii_work); } if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ cancel_delayed_work(&bond->arp_work); } switch (bond->params.mode) { case BOND_MODE_8023AD: cancel_delayed_work(&bond->ad_work); break; case BOND_MODE_TLB: case BOND_MODE_ALB: cancel_delayed_work(&bond->alb_work); break; default: break; } if (delayed_work_pending(&bond->mcast_work)) cancel_delayed_work(&bond->mcast_work); if (bond_is_lb(bond)) { /* Must be called only after all * slaves have been released */ bond_alb_deinitialize(bond); } bond->recv_probe = NULL; return 0; } static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 *stats) { struct bonding *bond = netdev_priv(bond_dev); struct rtnl_link_stats64 temp; struct slave *slave; int i; memset(stats, 0, sizeof(*stats)); read_lock_bh(&bond->lock); bond_for_each_slave(bond, slave, i) { const struct rtnl_link_stats64 *sstats = dev_get_stats(slave->dev, &temp); stats->rx_packets += sstats->rx_packets; stats->rx_bytes += sstats->rx_bytes; stats->rx_errors += sstats->rx_errors; stats->rx_dropped += sstats->rx_dropped; stats->tx_packets += sstats->tx_packets; stats->tx_bytes += sstats->tx_bytes; stats->tx_errors += sstats->tx_errors; stats->tx_dropped += sstats->tx_dropped; stats->multicast += sstats->multicast; stats->collisions += sstats->collisions; stats->rx_length_errors += sstats->rx_length_errors; stats->rx_over_errors += sstats->rx_over_errors; stats->rx_crc_errors += sstats->rx_crc_errors; stats->rx_frame_errors += sstats->rx_frame_errors; stats->rx_fifo_errors += sstats->rx_fifo_errors; stats->rx_missed_errors += sstats->rx_missed_errors; stats->tx_aborted_errors += sstats->tx_aborted_errors; stats->tx_carrier_errors += sstats->tx_carrier_errors; stats->tx_fifo_errors += sstats->tx_fifo_errors; stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; stats->tx_window_errors += sstats->tx_window_errors; } read_unlock_bh(&bond->lock); return stats; } static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) { struct net_device *slave_dev = NULL; struct ifbond k_binfo; struct ifbond __user *u_binfo = NULL; struct ifslave k_sinfo; struct ifslave __user *u_sinfo = NULL; struct mii_ioctl_data *mii = NULL; int res = 0; pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd); switch (cmd) { case SIOCGMIIPHY: mii = if_mii(ifr); if (!mii) return -EINVAL; mii->phy_id = 0; /* Fall Through */ case SIOCGMIIREG: /* * We do this again just in case we were called by SIOCGMIIREG * instead of SIOCGMIIPHY. */ mii = if_mii(ifr); if (!mii) return -EINVAL; if (mii->reg_num == 1) { struct bonding *bond = netdev_priv(bond_dev); mii->val_out = 0; read_lock(&bond->lock); read_lock(&bond->curr_slave_lock); if (netif_carrier_ok(bond->dev)) mii->val_out = BMSR_LSTATUS; read_unlock(&bond->curr_slave_lock); read_unlock(&bond->lock); } return 0; case BOND_INFO_QUERY_OLD: case SIOCBONDINFOQUERY: u_binfo = (struct ifbond __user *)ifr->ifr_data; if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond))) return -EFAULT; res = bond_info_query(bond_dev, &k_binfo); if (res == 0 && copy_to_user(u_binfo, &k_binfo, sizeof(ifbond))) return -EFAULT; return res; case BOND_SLAVE_INFO_QUERY_OLD: case SIOCBONDSLAVEINFOQUERY: u_sinfo = (struct ifslave __user *)ifr->ifr_data; if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave))) return -EFAULT; res = bond_slave_info_query(bond_dev, &k_sinfo); if (res == 0 && copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave))) return -EFAULT; return res; default: /* Go on */ break; } if (!capable(CAP_NET_ADMIN)) return -EPERM; slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave); pr_debug("slave_dev=%p:\n", slave_dev); if (!slave_dev) res = -ENODEV; else { pr_debug("slave_dev->name=%s:\n", slave_dev->name); switch (cmd) { case BOND_ENSLAVE_OLD: case SIOCBONDENSLAVE: res = bond_enslave(bond_dev, slave_dev); break; case BOND_RELEASE_OLD: case SIOCBONDRELEASE: res = bond_release(bond_dev, slave_dev); break; case BOND_SETHWADDR_OLD: case SIOCBONDSETHWADDR: res = bond_sethwaddr(bond_dev, slave_dev); break; case BOND_CHANGE_ACTIVE_OLD: case SIOCBONDCHANGEACTIVE: res = bond_ioctl_change_active(bond_dev, slave_dev); break; default: res = -EOPNOTSUPP; } dev_put(slave_dev); } return res; } static bool bond_addr_in_mc_list(unsigned char *addr, struct netdev_hw_addr_list *list, int addrlen) { struct netdev_hw_addr *ha; netdev_hw_addr_list_for_each(ha, list) if (!memcmp(ha->addr, addr, addrlen)) return true; return false; } static void bond_set_multicast_list(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct netdev_hw_addr *ha; bool found; /* * Do promisc before checking multicast_mode */ if ((bond_dev->flags & IFF_PROMISC) && !(bond->flags & IFF_PROMISC)) /* * FIXME: Need to handle the error when one of the multi-slaves * encounters error. */ bond_set_promiscuity(bond, 1); if (!(bond_dev->flags & IFF_PROMISC) && (bond->flags & IFF_PROMISC)) bond_set_promiscuity(bond, -1); /* set allmulti flag to slaves */ if ((bond_dev->flags & IFF_ALLMULTI) && !(bond->flags & IFF_ALLMULTI)) /* * FIXME: Need to handle the error when one of the multi-slaves * encounters error. */ bond_set_allmulti(bond, 1); if (!(bond_dev->flags & IFF_ALLMULTI) && (bond->flags & IFF_ALLMULTI)) bond_set_allmulti(bond, -1); read_lock(&bond->lock); bond->flags = bond_dev->flags; /* looking for addresses to add to slaves' mc list */ netdev_for_each_mc_addr(ha, bond_dev) { found = bond_addr_in_mc_list(ha->addr, &bond->mc_list, bond_dev->addr_len); if (!found) bond_mc_add(bond, ha->addr); } /* looking for addresses to delete from slaves' list */ netdev_hw_addr_list_for_each(ha, &bond->mc_list) { found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc, bond_dev->addr_len); if (!found) bond_mc_del(bond, ha->addr); } /* save master's multicast list */ __hw_addr_flush(&bond->mc_list); __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc, bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST); read_unlock(&bond->lock); } static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms) { struct bonding *bond = netdev_priv(dev); struct slave *slave = bond->first_slave; if (slave) { const struct net_device_ops *slave_ops = slave->dev->netdev_ops; if (slave_ops->ndo_neigh_setup) return slave_ops->ndo_neigh_setup(slave->dev, parms); } return 0; } /* * Change the MTU of all of a master's slaves to match the master */ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *stop_at; int res = 0; int i; pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond, (bond_dev ? bond_dev->name : "None"), new_mtu); /* Can't hold bond->lock with bh disabled here since * some base drivers panic. On the other hand we can't * hold bond->lock without bh disabled because we'll * deadlock. The only solution is to rely on the fact * that we're under rtnl_lock here, and the slaves * list won't change. This doesn't solve the problem * of setting the slave's MTU while it is * transmitting, but the assumption is that the base * driver can handle that. * * TODO: figure out a way to safely iterate the slaves * list, but without holding a lock around the actual * call to the base driver. */ bond_for_each_slave(bond, slave, i) { pr_debug("s %p s->p %p c_m %p\n", slave, slave->prev, slave->dev->netdev_ops->ndo_change_mtu); res = dev_set_mtu(slave->dev, new_mtu); if (res) { /* If we failed to set the slave's mtu to the new value * we must abort the operation even in ACTIVE_BACKUP * mode, because if we allow the backup slaves to have * different mtu values than the active slave we'll * need to change their mtu when doing a failover. That * means changing their mtu from timer context, which * is probably not a good idea. */ pr_debug("err %d %s\n", res, slave->dev->name); goto unwind; } } bond_dev->mtu = new_mtu; return 0; unwind: /* unwind from head to the slave that failed */ stop_at = slave; bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) { int tmp_res; tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu); if (tmp_res) { pr_debug("unwind err %d dev %s\n", tmp_res, slave->dev->name); } } return res; } /* * Change HW address * * Note that many devices must be down to change the HW address, and * downing the master releases all slaves. We can make bonds full of * bonding devices to test this, however. */ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) { struct bonding *bond = netdev_priv(bond_dev); struct sockaddr *sa = addr, tmp_sa; struct slave *slave, *stop_at; int res = 0; int i; if (bond->params.mode == BOND_MODE_ALB) return bond_alb_set_mac_address(bond_dev, addr); pr_debug("bond=%p, name=%s\n", bond, bond_dev ? bond_dev->name : "None"); /* * If fail_over_mac is set to active, do nothing and return * success. Returning an error causes ifenslave to fail. */ if (bond->params.fail_over_mac == BOND_FOM_ACTIVE) return 0; if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; /* Can't hold bond->lock with bh disabled here since * some base drivers panic. On the other hand we can't * hold bond->lock without bh disabled because we'll * deadlock. The only solution is to rely on the fact * that we're under rtnl_lock here, and the slaves * list won't change. This doesn't solve the problem * of setting the slave's hw address while it is * transmitting, but the assumption is that the base * driver can handle that. * * TODO: figure out a way to safely iterate the slaves * list, but without holding a lock around the actual * call to the base driver. */ bond_for_each_slave(bond, slave, i) { const struct net_device_ops *slave_ops = slave->dev->netdev_ops; pr_debug("slave %p %s\n", slave, slave->dev->name); if (slave_ops->ndo_set_mac_address == NULL) { res = -EOPNOTSUPP; pr_debug("EOPNOTSUPP %s\n", slave->dev->name); goto unwind; } res = dev_set_mac_address(slave->dev, addr); if (res) { /* TODO: consider downing the slave * and retry ? * User should expect communications * breakage anyway until ARP finish * updating, so... */ pr_debug("err %d %s\n", res, slave->dev->name); goto unwind; } } /* success */ memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len); return 0; unwind: memcpy(tmp_sa.sa_data, bond_dev->dev_addr, bond_dev->addr_len); tmp_sa.sa_family = bond_dev->type; /* unwind from head to the slave that failed */ stop_at = slave; bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) { int tmp_res; tmp_res = dev_set_mac_address(slave->dev, &tmp_sa); if (tmp_res) { pr_debug("unwind err %d dev %s\n", tmp_res, slave->dev->name); } } return res; } static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *start_at; int i, slave_no, res = 1; struct iphdr *iph = ip_hdr(skb); /* * Start with the curr_active_slave that joined the bond as the * default for sending IGMP traffic. For failover purposes one * needs to maintain some consistency for the interface that will * send the join/membership reports. The curr_active_slave found * will send all of this type of traffic. */ if ((iph->protocol == IPPROTO_IGMP) && (skb->protocol == htons(ETH_P_IP))) { read_lock(&bond->curr_slave_lock); slave = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); if (!slave) goto out; } else { /* * Concurrent TX may collide on rr_tx_counter; we accept * that as being rare enough not to justify using an * atomic op here. */ slave_no = bond->rr_tx_counter++ % bond->slave_cnt; bond_for_each_slave(bond, slave, i) { slave_no--; if (slave_no < 0) break; } } start_at = slave; bond_for_each_slave_from(bond, slave, i, start_at) { if (IS_UP(slave->dev) && (slave->link == BOND_LINK_UP) && bond_is_active_slave(slave)) { res = bond_dev_queue_xmit(bond, skb, slave->dev); break; } } out: if (res) { /* no suitable interface, frame not sent */ dev_kfree_skb(skb); } return NETDEV_TX_OK; } /* * in active-backup mode, we know that bond->curr_active_slave is always valid if * the bond has a usable interface. */ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); int res = 1; read_lock(&bond->curr_slave_lock); if (bond->curr_active_slave) res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); if (res) /* no suitable interface, frame not sent */ dev_kfree_skb(skb); read_unlock(&bond->curr_slave_lock); return NETDEV_TX_OK; } /* * In bond_xmit_xor() , we determine the output device by using a pre- * determined xmit_hash_policy(), If the selected device is not enabled, * find the next active slave. */ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *start_at; int slave_no; int i; int res = 1; slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt); bond_for_each_slave(bond, slave, i) { slave_no--; if (slave_no < 0) break; } start_at = slave; bond_for_each_slave_from(bond, slave, i, start_at) { if (IS_UP(slave->dev) && (slave->link == BOND_LINK_UP) && bond_is_active_slave(slave)) { res = bond_dev_queue_xmit(bond, skb, slave->dev); break; } } if (res) { /* no suitable interface, frame not sent */ dev_kfree_skb(skb); } return NETDEV_TX_OK; } /* * in broadcast mode, we send everything to all usable interfaces. */ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *start_at; struct net_device *tx_dev = NULL; int i; int res = 1; read_lock(&bond->curr_slave_lock); start_at = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); if (!start_at) goto out; bond_for_each_slave_from(bond, slave, i, start_at) { if (IS_UP(slave->dev) && (slave->link == BOND_LINK_UP) && bond_is_active_slave(slave)) { if (tx_dev) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) { pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n", bond_dev->name); continue; } res = bond_dev_queue_xmit(bond, skb2, tx_dev); if (res) { dev_kfree_skb(skb2); continue; } } tx_dev = slave->dev; } } if (tx_dev) res = bond_dev_queue_xmit(bond, skb, tx_dev); out: if (res) /* no suitable interface, frame not sent */ dev_kfree_skb(skb); /* frame sent to all suitable interfaces */ return NETDEV_TX_OK; } /*------------------------- Device initialization ---------------------------*/ static void bond_set_xmit_hash_policy(struct bonding *bond) { switch (bond->params.xmit_policy) { case BOND_XMIT_POLICY_LAYER23: bond->xmit_hash_policy = bond_xmit_hash_policy_l23; break; case BOND_XMIT_POLICY_LAYER34: bond->xmit_hash_policy = bond_xmit_hash_policy_l34; break; case BOND_XMIT_POLICY_LAYER2: default: bond->xmit_hash_policy = bond_xmit_hash_policy_l2; break; } } /* * Lookup the slave that corresponds to a qid */ static inline int bond_slave_override(struct bonding *bond, struct sk_buff *skb) { int i, res = 1; struct slave *slave = NULL; struct slave *check_slave; if (!skb->queue_mapping) return 1; /* Find out if any slaves have the same mapping as this skb. */ bond_for_each_slave(bond, check_slave, i) { if (check_slave->queue_id == skb->queue_mapping) { slave = check_slave; break; } } /* If the slave isn't UP, use default transmit policy. */ if (slave && slave->queue_id && IS_UP(slave->dev) && (slave->link == BOND_LINK_UP)) { res = bond_dev_queue_xmit(bond, skb, slave->dev); } return res; } static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) { /* * This helper function exists to help dev_pick_tx get the correct * destination queue. Using a helper function skips a call to * skb_tx_hash and will put the skbs in the queue we expect on their * way down to the bonding driver. */ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; /* * Save the original txq to restore before passing to the driver */ bond_queue_mapping(skb) = skb->queue_mapping; if (unlikely(txq >= dev->real_num_tx_queues)) { do { txq -= dev->real_num_tx_queues; } while (txq >= dev->real_num_tx_queues); } return txq; } static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bonding *bond = netdev_priv(dev); if (TX_QUEUE_OVERRIDE(bond->params.mode)) { if (!bond_slave_override(bond, skb)) return NETDEV_TX_OK; } switch (bond->params.mode) { case BOND_MODE_ROUNDROBIN: return bond_xmit_roundrobin(skb, dev); case BOND_MODE_ACTIVEBACKUP: return bond_xmit_activebackup(skb, dev); case BOND_MODE_XOR: return bond_xmit_xor(skb, dev); case BOND_MODE_BROADCAST: return bond_xmit_broadcast(skb, dev); case BOND_MODE_8023AD: return bond_3ad_xmit_xor(skb, dev); case BOND_MODE_ALB: case BOND_MODE_TLB: return bond_alb_xmit(skb, dev); default: /* Should never happen, mode already checked */ pr_err("%s: Error: Unknown bonding mode %d\n", dev->name, bond->params.mode); WARN_ON_ONCE(1); dev_kfree_skb(skb); return NETDEV_TX_OK; } } static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bonding *bond = netdev_priv(dev); netdev_tx_t ret = NETDEV_TX_OK; /* * If we risk deadlock from transmitting this in the * netpoll path, tell netpoll to queue the frame for later tx */ if (is_netpoll_tx_blocked(dev)) return NETDEV_TX_BUSY; read_lock(&bond->lock); if (bond->slave_cnt) ret = __bond_start_xmit(skb, dev); else dev_kfree_skb(skb); read_unlock(&bond->lock); return ret; } /* * set bond mode specific net device operations */ void bond_set_mode_ops(struct bonding *bond, int mode) { struct net_device *bond_dev = bond->dev; switch (mode) { case BOND_MODE_ROUNDROBIN: break; case BOND_MODE_ACTIVEBACKUP: break; case BOND_MODE_XOR: bond_set_xmit_hash_policy(bond); break; case BOND_MODE_BROADCAST: break; case BOND_MODE_8023AD: bond_set_xmit_hash_policy(bond); break; case BOND_MODE_ALB: /* FALLTHRU */ case BOND_MODE_TLB: break; default: /* Should never happen, mode already checked */ pr_err("%s: Error: Unknown bonding mode %d\n", bond_dev->name, mode); break; } } static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, struct ethtool_drvinfo *drvinfo) { strncpy(drvinfo->driver, DRV_NAME, 32); strncpy(drvinfo->version, DRV_VERSION, 32); snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION); } static const struct ethtool_ops bond_ethtool_ops = { .get_drvinfo = bond_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, }; static const struct net_device_ops bond_netdev_ops = { .ndo_init = bond_init, .ndo_uninit = bond_uninit, .ndo_open = bond_open, .ndo_stop = bond_close, .ndo_start_xmit = bond_start_xmit, .ndo_select_queue = bond_select_queue, .ndo_get_stats64 = bond_get_stats, .ndo_do_ioctl = bond_do_ioctl, .ndo_set_multicast_list = bond_set_multicast_list, .ndo_change_mtu = bond_change_mtu, .ndo_set_mac_address = bond_set_mac_address, .ndo_neigh_setup = bond_neigh_setup, .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_netpoll_setup = bond_netpoll_setup, .ndo_netpoll_cleanup = bond_netpoll_cleanup, .ndo_poll_controller = bond_poll_controller, #endif .ndo_add_slave = bond_enslave, .ndo_del_slave = bond_release, .ndo_fix_features = bond_fix_features, }; static void bond_destructor(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); if (bond->wq) destroy_workqueue(bond->wq); free_netdev(bond_dev); } static void bond_setup(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); /* initialize rwlocks */ rwlock_init(&bond->lock); rwlock_init(&bond->curr_slave_lock); bond->params = bonding_defaults; /* Initialize pointers */ bond->dev = bond_dev; INIT_LIST_HEAD(&bond->vlan_list); /* Initialize the device entry points */ ether_setup(bond_dev); bond_dev->netdev_ops = &bond_netdev_ops; bond_dev->ethtool_ops = &bond_ethtool_ops; bond_set_mode_ops(bond, bond->params.mode); bond_dev->destructor = bond_destructor; /* Initialize the device options */ bond_dev->tx_queue_len = 0; bond_dev->flags |= IFF_MASTER|IFF_MULTICAST; bond_dev->priv_flags |= IFF_BONDING; bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; /* At first, we block adding VLANs. That's the only way to * prevent problems that occur when adding VLANs over an * empty bond. The block will be removed once non-challenged * slaves are enslaved. */ bond_dev->features |= NETIF_F_VLAN_CHALLENGED; /* don't acquire bond device's netif_tx_lock when * transmitting */ bond_dev->features |= NETIF_F_LLTX; /* By default, we declare the bond to be fully * VLAN hardware accelerated capable. Special * care is taken in the various xmit functions * when there are slaves that are not hw accel * capable */ bond_dev->hw_features = BOND_VLAN_FEATURES | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM); bond_dev->features |= bond_dev->hw_features; } static void bond_work_cancel_all(struct bonding *bond) { write_lock_bh(&bond->lock); bond->kill_timers = 1; write_unlock_bh(&bond->lock); if (bond->params.miimon && delayed_work_pending(&bond->mii_work)) cancel_delayed_work(&bond->mii_work); if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work)) cancel_delayed_work(&bond->arp_work); if (bond->params.mode == BOND_MODE_ALB && delayed_work_pending(&bond->alb_work)) cancel_delayed_work(&bond->alb_work); if (bond->params.mode == BOND_MODE_8023AD && delayed_work_pending(&bond->ad_work)) cancel_delayed_work(&bond->ad_work); if (delayed_work_pending(&bond->mcast_work)) cancel_delayed_work(&bond->mcast_work); } /* * Destroy a bonding device. * Must be under rtnl_lock when this function is called. */ static void bond_uninit(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct vlan_entry *vlan, *tmp; bond_netpoll_cleanup(bond_dev); /* Release the bonded slaves */ bond_release_all(bond_dev); list_del(&bond->bond_list); bond_work_cancel_all(bond); bond_remove_proc_entry(bond); bond_debug_unregister(bond); __hw_addr_flush(&bond->mc_list); list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { list_del(&vlan->vlan_list); kfree(vlan); } } /*------------------------- Module initialization ---------------------------*/ /* * Convert string input module parms. Accept either the * number of the mode or its string name. A bit complicated because * some mode names are substrings of other names, and calls from sysfs * may have whitespace in the name (trailing newlines, for example). */ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl) { int modeint = -1, i, rv; char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, }; for (p = (char *)buf; *p; p++) if (!(isdigit(*p) || isspace(*p))) break; if (*p) rv = sscanf(buf, "%20s", modestr); else rv = sscanf(buf, "%d", &modeint); if (!rv) return -1; for (i = 0; tbl[i].modename; i++) { if (modeint == tbl[i].mode) return tbl[i].mode; if (strcmp(modestr, tbl[i].modename) == 0) return tbl[i].mode; } return -1; } static int bond_check_params(struct bond_params *params) { int arp_validate_value, fail_over_mac_value, primary_reselect_value; /* * Convert string parameters. */ if (mode) { bond_mode = bond_parse_parm(mode, bond_mode_tbl); if (bond_mode == -1) { pr_err("Error: Invalid bonding mode \"%s\"\n", mode == NULL ? "NULL" : mode); return -EINVAL; } } if (xmit_hash_policy) { if ((bond_mode != BOND_MODE_XOR) && (bond_mode != BOND_MODE_8023AD)) { pr_info("xmit_hash_policy param is irrelevant in mode %s\n", bond_mode_name(bond_mode)); } else { xmit_hashtype = bond_parse_parm(xmit_hash_policy, xmit_hashtype_tbl); if (xmit_hashtype == -1) { pr_err("Error: Invalid xmit_hash_policy \"%s\"\n", xmit_hash_policy == NULL ? "NULL" : xmit_hash_policy); return -EINVAL; } } } if (lacp_rate) { if (bond_mode != BOND_MODE_8023AD) { pr_info("lacp_rate param is irrelevant in mode %s\n", bond_mode_name(bond_mode)); } else { lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl); if (lacp_fast == -1) { pr_err("Error: Invalid lacp rate \"%s\"\n", lacp_rate == NULL ? "NULL" : lacp_rate); return -EINVAL; } } } if (ad_select) { params->ad_select = bond_parse_parm(ad_select, ad_select_tbl); if (params->ad_select == -1) { pr_err("Error: Invalid ad_select \"%s\"\n", ad_select == NULL ? "NULL" : ad_select); return -EINVAL; } if (bond_mode != BOND_MODE_8023AD) { pr_warning("ad_select param only affects 802.3ad mode\n"); } } else { params->ad_select = BOND_AD_STABLE; } if (max_bonds < 0) { pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n", max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS); max_bonds = BOND_DEFAULT_MAX_BONDS; } if (miimon < 0) { pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to %d\n", miimon, INT_MAX, BOND_LINK_MON_INTERV); miimon = BOND_LINK_MON_INTERV; } if (updelay < 0) { pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n", updelay, INT_MAX); updelay = 0; } if (downdelay < 0) { pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n", downdelay, INT_MAX); downdelay = 0; } if ((use_carrier != 0) && (use_carrier != 1)) { pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n", use_carrier); use_carrier = 1; } if (num_peer_notif < 0 || num_peer_notif > 255) { pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n", num_peer_notif); num_peer_notif = 1; } /* reset values for 802.3ad */ if (bond_mode == BOND_MODE_8023AD) { if (!miimon) { pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); pr_warning("Forcing miimon to 100msec\n"); miimon = 100; } } if (tx_queues < 1 || tx_queues > 255) { pr_warning("Warning: tx_queues (%d) should be between " "1 and 255, resetting to %d\n", tx_queues, BOND_DEFAULT_TX_QUEUES); tx_queues = BOND_DEFAULT_TX_QUEUES; } if ((all_slaves_active != 0) && (all_slaves_active != 1)) { pr_warning("Warning: all_slaves_active module parameter (%d), " "not of valid value (0/1), so it was set to " "0\n", all_slaves_active); all_slaves_active = 0; } if (resend_igmp < 0 || resend_igmp > 255) { pr_warning("Warning: resend_igmp (%d) should be between " "0 and 255, resetting to %d\n", resend_igmp, BOND_DEFAULT_RESEND_IGMP); resend_igmp = BOND_DEFAULT_RESEND_IGMP; } /* reset values for TLB/ALB */ if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) { if (!miimon) { pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n"); pr_warning("Forcing miimon to 100msec\n"); miimon = 100; } } if (bond_mode == BOND_MODE_ALB) { pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n", updelay); } if (!miimon) { if (updelay || downdelay) { /* just warn the user the up/down delay will have * no effect since miimon is zero... */ pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n", updelay, downdelay); } } else { /* don't allow arp monitoring */ if (arp_interval) { pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n", miimon, arp_interval); arp_interval = 0; } if ((updelay % miimon) != 0) { pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n", updelay, miimon, (updelay / miimon) * miimon); } updelay /= miimon; if ((downdelay % miimon) != 0) { pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n", downdelay, miimon, (downdelay / miimon) * miimon); } downdelay /= miimon; } if (arp_interval < 0) { pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to %d\n", arp_interval, INT_MAX, BOND_LINK_ARP_INTERV); arp_interval = BOND_LINK_ARP_INTERV; } for (arp_ip_count = 0; (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[arp_ip_count]; arp_ip_count++) { /* not complete check, but should be good enough to catch mistakes */ if (!isdigit(arp_ip_target[arp_ip_count][0])) { pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", arp_ip_target[arp_ip_count]); arp_interval = 0; } else { __be32 ip = in_aton(arp_ip_target[arp_ip_count]); arp_target[arp_ip_count] = ip; } } if (arp_interval && !arp_ip_count) { /* don't allow arping if no arp_ip_target given... */ pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n", arp_interval); arp_interval = 0; } if (arp_validate) { if (bond_mode != BOND_MODE_ACTIVEBACKUP) { pr_err("arp_validate only supported in active-backup mode\n"); return -EINVAL; } if (!arp_interval) { pr_err("arp_validate requires arp_interval\n"); return -EINVAL; } arp_validate_value = bond_parse_parm(arp_validate, arp_validate_tbl); if (arp_validate_value == -1) { pr_err("Error: invalid arp_validate \"%s\"\n", arp_validate == NULL ? "NULL" : arp_validate); return -EINVAL; } } else arp_validate_value = 0; if (miimon) { pr_info("MII link monitoring set to %d ms\n", miimon); } else if (arp_interval) { int i; pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):", arp_interval, arp_validate_tbl[arp_validate_value].modename, arp_ip_count); for (i = 0; i < arp_ip_count; i++) pr_info(" %s", arp_ip_target[i]); pr_info("\n"); } else if (max_bonds) { /* miimon and arp_interval not set, we need one so things * work as expected, see bonding.txt for details */ pr_warning("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n"); } if (primary && !USES_PRIMARY(bond_mode)) { /* currently, using a primary only makes sense * in active backup, TLB or ALB modes */ pr_warning("Warning: %s primary device specified but has no effect in %s mode\n", primary, bond_mode_name(bond_mode)); primary = NULL; } if (primary && primary_reselect) { primary_reselect_value = bond_parse_parm(primary_reselect, pri_reselect_tbl); if (primary_reselect_value == -1) { pr_err("Error: Invalid primary_reselect \"%s\"\n", primary_reselect == NULL ? "NULL" : primary_reselect); return -EINVAL; } } else { primary_reselect_value = BOND_PRI_RESELECT_ALWAYS; } if (fail_over_mac) { fail_over_mac_value = bond_parse_parm(fail_over_mac, fail_over_mac_tbl); if (fail_over_mac_value == -1) { pr_err("Error: invalid fail_over_mac \"%s\"\n", arp_validate == NULL ? "NULL" : arp_validate); return -EINVAL; } if (bond_mode != BOND_MODE_ACTIVEBACKUP) pr_warning("Warning: fail_over_mac only affects active-backup mode.\n"); } else { fail_over_mac_value = BOND_FOM_NONE; } /* fill params struct with the proper values */ params->mode = bond_mode; params->xmit_policy = xmit_hashtype; params->miimon = miimon; params->num_peer_notif = num_peer_notif; params->arp_interval = arp_interval; params->arp_validate = arp_validate_value; params->updelay = updelay; params->downdelay = downdelay; params->use_carrier = use_carrier; params->lacp_fast = lacp_fast; params->primary[0] = 0; params->primary_reselect = primary_reselect_value; params->fail_over_mac = fail_over_mac_value; params->tx_queues = tx_queues; params->all_slaves_active = all_slaves_active; params->resend_igmp = resend_igmp; params->min_links = min_links; if (primary) { strncpy(params->primary, primary, IFNAMSIZ); params->primary[IFNAMSIZ - 1] = 0; } memcpy(params->arp_targets, arp_target, sizeof(arp_target)); return 0; } static struct lock_class_key bonding_netdev_xmit_lock_key; static struct lock_class_key bonding_netdev_addr_lock_key; static void bond_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &bonding_netdev_xmit_lock_key); } static void bond_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &bonding_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL); } /* * Called from registration process */ static int bond_init(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); pr_debug("Begin bond_init for %s\n", bond_dev->name); /* * Initialize locks that may be required during * en/deslave operations. All of the bond_open work * (of which this is part) should really be moved to * a phase prior to dev_open */ spin_lock_init(&(bond_info->tx_hashtbl_lock)); spin_lock_init(&(bond_info->rx_hashtbl_lock)); bond->wq = create_singlethread_workqueue(bond_dev->name); if (!bond->wq) return -ENOMEM; bond_set_lockdep_class(bond_dev); bond_create_proc_entry(bond); list_add_tail(&bond->bond_list, &bn->dev_list); bond_prepare_sysfs_group(bond); bond_debug_register(bond); __hw_addr_init(&bond->mc_list); return 0; } static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } return 0; } static struct rtnl_link_ops bond_link_ops __read_mostly = { .kind = "bond", .priv_size = sizeof(struct bonding), .setup = bond_setup, .validate = bond_validate, }; /* Create a new bond based on the specified name and bonding parameters. * If name is NULL, obtain a suitable "bond%d" name for us. * Caller must NOT hold rtnl_lock; we need to release it here before we * set up our sysfs entries. */ int bond_create(struct net *net, const char *name) { struct net_device *bond_dev; int res; rtnl_lock(); bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "bond%d", bond_setup, tx_queues); if (!bond_dev) { pr_err("%s: eek! can't alloc netdev!\n", name); rtnl_unlock(); return -ENOMEM; } dev_net_set(bond_dev, net); bond_dev->rtnl_link_ops = &bond_link_ops; res = register_netdevice(bond_dev); netif_carrier_off(bond_dev); rtnl_unlock(); if (res < 0) bond_destructor(bond_dev); return res; } static int __net_init bond_net_init(struct net *net) { struct bond_net *bn = net_generic(net, bond_net_id); bn->net = net; INIT_LIST_HEAD(&bn->dev_list); bond_create_proc_dir(bn); return 0; } static void __net_exit bond_net_exit(struct net *net) { struct bond_net *bn = net_generic(net, bond_net_id); bond_destroy_proc_dir(bn); } static struct pernet_operations bond_net_ops = { .init = bond_net_init, .exit = bond_net_exit, .id = &bond_net_id, .size = sizeof(struct bond_net), }; static int __init bonding_init(void) { int i; int res; pr_info("%s", bond_version); res = bond_check_params(&bonding_defaults); if (res) goto out; res = register_pernet_subsys(&bond_net_ops); if (res) goto out; res = rtnl_link_register(&bond_link_ops); if (res) goto err_link; bond_create_debugfs(); for (i = 0; i < max_bonds; i++) { res = bond_create(&init_net, NULL); if (res) goto err; } res = bond_create_sysfs(); if (res) goto err; register_netdevice_notifier(&bond_netdev_notifier); register_inetaddr_notifier(&bond_inetaddr_notifier); out: return res; err: rtnl_link_unregister(&bond_link_ops); err_link: unregister_pernet_subsys(&bond_net_ops); goto out; } static void __exit bonding_exit(void) { unregister_netdevice_notifier(&bond_netdev_notifier); unregister_inetaddr_notifier(&bond_inetaddr_notifier); bond_destroy_sysfs(); bond_destroy_debugfs(); rtnl_link_unregister(&bond_link_ops); unregister_pernet_subsys(&bond_net_ops); #ifdef CONFIG_NET_POLL_CONTROLLER /* * Make sure we don't have an imbalance on our netpoll blocking */ WARN_ON(atomic_read(&netpoll_block_tx)); #endif } module_init(bonding_init); module_exit(bonding_exit); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION); MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others"); MODULE_ALIAS_RTNL_LINK("bond");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3524_1
crossvul-cpp_data_bad_5861_23
/* * Glue Code for assembler optimized version of Blowfish * * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * CTR part based on code (crypto/ctr.c) by: * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <asm/processor.h> #include <crypto/blowfish.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <crypto/algapi.h> /* regular block cipher functions */ asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src, bool xor); asmlinkage void blowfish_dec_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src); /* 4-way parallel cipher functions */ asmlinkage void __blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst, const u8 *src, bool xor); asmlinkage void blowfish_dec_blk_4way(struct bf_ctx *ctx, u8 *dst, const u8 *src); static inline void blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src) { __blowfish_enc_blk(ctx, dst, src, false); } static inline void blowfish_enc_blk_xor(struct bf_ctx *ctx, u8 *dst, const u8 *src) { __blowfish_enc_blk(ctx, dst, src, true); } static inline void blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst, const u8 *src) { __blowfish_enc_blk_4way(ctx, dst, src, false); } static inline void blowfish_enc_blk_xor_4way(struct bf_ctx *ctx, u8 *dst, const u8 *src) { __blowfish_enc_blk_4way(ctx, dst, src, true); } static void blowfish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { blowfish_enc_blk(crypto_tfm_ctx(tfm), dst, src); } static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src); } static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, void (*fn)(struct bf_ctx *, u8 *, const u8 *), void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *)) { struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); unsigned int bsize = BF_BLOCK_SIZE; unsigned int nbytes; int err; err = blkcipher_walk_virt(desc, walk); while ((nbytes = walk->nbytes)) { u8 *wsrc = walk->src.virt.addr; u8 *wdst = walk->dst.virt.addr; /* Process four block batch */ if (nbytes >= bsize * 4) { do { fn_4way(ctx, wdst, wsrc); wsrc += bsize * 4; wdst += bsize * 4; nbytes -= bsize * 4; } while (nbytes >= bsize * 4); if (nbytes < bsize) goto done; } /* Handle leftovers */ do { fn(ctx, wdst, wsrc); wsrc += bsize; wdst += bsize; nbytes -= bsize; } while (nbytes >= bsize); done: err = blkcipher_walk_done(desc, walk, nbytes); } return err; } static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way); } static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_crypt(desc, &walk, blowfish_dec_blk, blowfish_dec_blk_4way); } static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); unsigned int bsize = BF_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 *iv = (u64 *)walk->iv; do { *dst = *src ^ *iv; blowfish_enc_blk(ctx, (u8 *)dst, (u8 *)dst); iv = dst; src += 1; dst += 1; nbytes -= bsize; } while (nbytes >= bsize); *(u64 *)walk->iv = *iv; return nbytes; } static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { nbytes = __cbc_encrypt(desc, &walk); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); unsigned int bsize = BF_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 ivs[4 - 1]; u64 last_iv; /* Start of the last block. */ src += nbytes / bsize - 1; dst += nbytes / bsize - 1; last_iv = *src; /* Process four block batch */ if (nbytes >= bsize * 4) { do { nbytes -= bsize * 4 - bsize; src -= 4 - 1; dst -= 4 - 1; ivs[0] = src[0]; ivs[1] = src[1]; ivs[2] = src[2]; blowfish_dec_blk_4way(ctx, (u8 *)dst, (u8 *)src); dst[1] ^= ivs[0]; dst[2] ^= ivs[1]; dst[3] ^= ivs[2]; nbytes -= bsize; if (nbytes < bsize) goto done; *dst ^= *(src - 1); src -= 1; dst -= 1; } while (nbytes >= bsize * 4); } /* Handle leftovers */ for (;;) { blowfish_dec_blk(ctx, (u8 *)dst, (u8 *)src); nbytes -= bsize; if (nbytes < bsize) break; *dst ^= *(src - 1); src -= 1; dst -= 1; } done: *dst ^= *(u64 *)walk->iv; *(u64 *)walk->iv = last_iv; return nbytes; } static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { nbytes = __cbc_decrypt(desc, &walk); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk) { u8 *ctrblk = walk->iv; u8 keystream[BF_BLOCK_SIZE]; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; blowfish_enc_blk(ctx, keystream, ctrblk); crypto_xor(keystream, src, nbytes); memcpy(dst, keystream, nbytes); crypto_inc(ctrblk, BF_BLOCK_SIZE); } static unsigned int __ctr_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); unsigned int bsize = BF_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); __be64 ctrblocks[4]; /* Process four block batch */ if (nbytes >= bsize * 4) { do { if (dst != src) { dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } /* create ctrblks for parallel encrypt */ ctrblocks[0] = cpu_to_be64(ctrblk++); ctrblocks[1] = cpu_to_be64(ctrblk++); ctrblocks[2] = cpu_to_be64(ctrblk++); ctrblocks[3] = cpu_to_be64(ctrblk++); blowfish_enc_blk_xor_4way(ctx, (u8 *)dst, (u8 *)ctrblocks); src += 4; dst += 4; } while ((nbytes -= bsize * 4) >= bsize * 4); if (nbytes < bsize) goto done; } /* Handle leftovers */ do { if (dst != src) *dst = *src; ctrblocks[0] = cpu_to_be64(ctrblk++); blowfish_enc_blk_xor(ctx, (u8 *)dst, (u8 *)ctrblocks); src += 1; dst += 1; } while ((nbytes -= bsize) >= bsize); done: *(__be64 *)walk->iv = cpu_to_be64(ctrblk); return nbytes; } static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE); while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) { nbytes = __ctr_crypt(desc, &walk); err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); err = blkcipher_walk_done(desc, &walk, 0); } return err; } static struct crypto_alg bf_algs[4] = { { .cra_name = "blowfish", .cra_driver_name = "blowfish-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = BF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct bf_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = BF_MIN_KEY_SIZE, .cia_max_keysize = BF_MAX_KEY_SIZE, .cia_setkey = blowfish_setkey, .cia_encrypt = blowfish_encrypt, .cia_decrypt = blowfish_decrypt, } } }, { .cra_name = "ecb(blowfish)", .cra_driver_name = "ecb-blowfish-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = BF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct bf_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = BF_MIN_KEY_SIZE, .max_keysize = BF_MAX_KEY_SIZE, .setkey = blowfish_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, }, }, { .cra_name = "cbc(blowfish)", .cra_driver_name = "cbc-blowfish-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = BF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct bf_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = BF_MIN_KEY_SIZE, .max_keysize = BF_MAX_KEY_SIZE, .ivsize = BF_BLOCK_SIZE, .setkey = blowfish_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }, }, { .cra_name = "ctr(blowfish)", .cra_driver_name = "ctr-blowfish-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct bf_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = BF_MIN_KEY_SIZE, .max_keysize = BF_MAX_KEY_SIZE, .ivsize = BF_BLOCK_SIZE, .setkey = blowfish_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, }, }, } }; static bool is_blacklisted_cpu(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return false; if (boot_cpu_data.x86 == 0x0f) { /* * On Pentium 4, blowfish-x86_64 is slower than generic C * implementation because use of 64bit rotates (which are really * slow on P4). Therefore blacklist P4s. */ return true; } return false; } static int force; module_param(force, int, 0); MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist"); static int __init init(void) { if (!force && is_blacklisted_cpu()) { printk(KERN_INFO "blowfish-x86_64: performance on this CPU " "would be suboptimal: disabling " "blowfish-x86_64.\n"); return -ENODEV; } return crypto_register_algs(bf_algs, ARRAY_SIZE(bf_algs)); } static void __exit fini(void) { crypto_unregister_algs(bf_algs, ARRAY_SIZE(bf_algs)); } module_init(init); module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized"); MODULE_ALIAS("blowfish"); MODULE_ALIAS("blowfish-asm");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5861_23
crossvul-cpp_data_bad_5861_31
/* * Cryptographic API. * * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions * * Copyright (C) 2013 Intel Corporation * Author: Tim Chen <tim.c.chen@linux.intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/types.h> #include <linux/module.h> #include <linux/crc-t10dif.h> #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/string.h> #include <linux/kernel.h> #include <asm/i387.h> #include <asm/cpufeature.h> #include <asm/cpu_device_id.h> asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf, size_t len); struct chksum_desc_ctx { __u16 crc; }; /* * Steps through buffer one byte at at time, calculates reflected * crc using table. */ static int chksum_init(struct shash_desc *desc) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); ctx->crc = 0; return 0; } static int chksum_update(struct shash_desc *desc, const u8 *data, unsigned int length) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); if (irq_fpu_usable()) { kernel_fpu_begin(); ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); kernel_fpu_end(); } else ctx->crc = crc_t10dif_generic(ctx->crc, data, length); return 0; } static int chksum_final(struct shash_desc *desc, u8 *out) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); *(__u16 *)out = ctx->crc; return 0; } static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, u8 *out) { if (irq_fpu_usable()) { kernel_fpu_begin(); *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); kernel_fpu_end(); } else *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); return 0; } static int chksum_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); return __chksum_finup(&ctx->crc, data, len, out); } static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); return __chksum_finup(&ctx->crc, data, length, out); } static struct shash_alg alg = { .digestsize = CRC_T10DIF_DIGEST_SIZE, .init = chksum_init, .update = chksum_update, .final = chksum_final, .finup = chksum_finup, .digest = chksum_digest, .descsize = sizeof(struct chksum_desc_ctx), .base = { .cra_name = "crct10dif", .cra_driver_name = "crct10dif-pclmul", .cra_priority = 200, .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static const struct x86_cpu_id crct10dif_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), {} }; MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id); static int __init crct10dif_intel_mod_init(void) { if (!x86_match_cpu(crct10dif_cpu_id)) return -ENODEV; return crypto_register_shash(&alg); } static void __exit crct10dif_intel_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(crct10dif_intel_mod_init); module_exit(crct10dif_intel_mod_fini); MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ."); MODULE_LICENSE("GPL"); MODULE_ALIAS("crct10dif"); MODULE_ALIAS("crct10dif-pclmul");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5861_31
crossvul-cpp_data_good_2399_6
/* * chainiv: Chain IV Generator * * Generate IVs simply be using the last block of the previous encryption. * This is mainly useful for CBC with a synchronous algorithm. * * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/skcipher.h> #include <crypto/rng.h> #include <crypto/crypto_wq.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/workqueue.h> enum { CHAINIV_STATE_INUSE = 0, }; struct chainiv_ctx { spinlock_t lock; char iv[]; }; struct async_chainiv_ctx { unsigned long state; spinlock_t lock; int err; struct crypto_queue queue; struct work_struct postponed; char iv[]; }; static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); unsigned int ivsize; int err; ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); ablkcipher_request_set_callback(subreq, req->creq.base.flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, req->creq.base.complete, req->creq.base.data); ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, req->creq.nbytes, req->creq.info); spin_lock_bh(&ctx->lock); ivsize = crypto_ablkcipher_ivsize(geniv); memcpy(req->giv, ctx->iv, ivsize); memcpy(subreq->info, ctx->iv, ivsize); err = crypto_ablkcipher_encrypt(subreq); if (err) goto unlock; memcpy(ctx->iv, subreq->info, ivsize); unlock: spin_unlock_bh(&ctx->lock); return err; } static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); int err = 0; spin_lock_bh(&ctx->lock); if (crypto_ablkcipher_crt(geniv)->givencrypt != chainiv_givencrypt_first) goto unlock; crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt; err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, crypto_ablkcipher_ivsize(geniv)); unlock: spin_unlock_bh(&ctx->lock); if (err) return err; return chainiv_givencrypt(req); } static int chainiv_init_common(struct crypto_tfm *tfm) { tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); return skcipher_geniv_init(tfm); } static int chainiv_init(struct crypto_tfm *tfm) { struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); spin_lock_init(&ctx->lock); return chainiv_init_common(tfm); } static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) { int queued; int err = ctx->err; if (!ctx->queue.qlen) { smp_mb__before_atomic(); clear_bit(CHAINIV_STATE_INUSE, &ctx->state); if (!ctx->queue.qlen || test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) goto out; } queued = queue_work(kcrypto_wq, &ctx->postponed); BUG_ON(!queued); out: return err; } static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); int err; spin_lock_bh(&ctx->lock); err = skcipher_enqueue_givcrypt(&ctx->queue, req); spin_unlock_bh(&ctx->lock); if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) return err; ctx->err = err; return async_chainiv_schedule_work(ctx); } static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); unsigned int ivsize = crypto_ablkcipher_ivsize(geniv); memcpy(req->giv, ctx->iv, ivsize); memcpy(subreq->info, ctx->iv, ivsize); ctx->err = crypto_ablkcipher_encrypt(subreq); if (ctx->err) goto out; memcpy(ctx->iv, subreq->info, ivsize); out: return async_chainiv_schedule_work(ctx); } static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); ablkcipher_request_set_callback(subreq, req->creq.base.flags, req->creq.base.complete, req->creq.base.data); ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, req->creq.nbytes, req->creq.info); if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) goto postpone; if (ctx->queue.qlen) { clear_bit(CHAINIV_STATE_INUSE, &ctx->state); goto postpone; } return async_chainiv_givencrypt_tail(req); postpone: return async_chainiv_postpone_request(req); } static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); int err = 0; if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) goto out; if (crypto_ablkcipher_crt(geniv)->givencrypt != async_chainiv_givencrypt_first) goto unlock; crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt; err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, crypto_ablkcipher_ivsize(geniv)); unlock: clear_bit(CHAINIV_STATE_INUSE, &ctx->state); if (err) return err; out: return async_chainiv_givencrypt(req); } static void async_chainiv_do_postponed(struct work_struct *work) { struct async_chainiv_ctx *ctx = container_of(work, struct async_chainiv_ctx, postponed); struct skcipher_givcrypt_request *req; struct ablkcipher_request *subreq; int err; /* Only handle one request at a time to avoid hogging keventd. */ spin_lock_bh(&ctx->lock); req = skcipher_dequeue_givcrypt(&ctx->queue); spin_unlock_bh(&ctx->lock); if (!req) { async_chainiv_schedule_work(ctx); return; } subreq = skcipher_givcrypt_reqctx(req); subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP; err = async_chainiv_givencrypt_tail(req); local_bh_disable(); skcipher_givcrypt_complete(req, err); local_bh_enable(); } static int async_chainiv_init(struct crypto_tfm *tfm) { struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); spin_lock_init(&ctx->lock); crypto_init_queue(&ctx->queue, 100); INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); return chainiv_init_common(tfm); } static void async_chainiv_exit(struct crypto_tfm *tfm) { struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen); skcipher_geniv_exit(tfm); } static struct crypto_template chainiv_tmpl; static struct crypto_instance *chainiv_alloc(struct rtattr **tb) { struct crypto_attr_type *algt; struct crypto_instance *inst; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return ERR_CAST(algt); err = crypto_get_default_rng(); if (err) return ERR_PTR(err); inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); if (IS_ERR(inst)) goto put_rng; inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first; inst->alg.cra_init = chainiv_init; inst->alg.cra_exit = skcipher_geniv_exit; inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx); if (!crypto_requires_sync(algt->type, algt->mask)) { inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; inst->alg.cra_ablkcipher.givencrypt = async_chainiv_givencrypt_first; inst->alg.cra_init = async_chainiv_init; inst->alg.cra_exit = async_chainiv_exit; inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx); } inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; out: return inst; put_rng: crypto_put_default_rng(); goto out; } static void chainiv_free(struct crypto_instance *inst) { skcipher_geniv_free(inst); crypto_put_default_rng(); } static struct crypto_template chainiv_tmpl = { .name = "chainiv", .alloc = chainiv_alloc, .free = chainiv_free, .module = THIS_MODULE, }; static int __init chainiv_module_init(void) { return crypto_register_template(&chainiv_tmpl); } static void chainiv_module_exit(void) { crypto_unregister_template(&chainiv_tmpl); } module_init(chainiv_module_init); module_exit(chainiv_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Chain IV Generator"); MODULE_ALIAS_CRYPTO("chainiv");
./CrossVul/dataset_final_sorted/CWE-264/c/good_2399_6
crossvul-cpp_data_bad_3604_4
404: Not Found
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3604_4
crossvul-cpp_data_good_4858_0
/* * Encryption policy functions for per-file encryption support. * * Copyright (C) 2015, Google, Inc. * Copyright (C) 2015, Motorola Mobility. * * Written by Michael Halcrow, 2015. * Modified by Jaegeuk Kim, 2015. */ #include <linux/random.h> #include <linux/string.h> #include <linux/fscrypto.h> static int inode_has_encryption_context(struct inode *inode) { if (!inode->i_sb->s_cop->get_context) return 0; return (inode->i_sb->s_cop->get_context(inode, NULL, 0L) > 0); } /* * check whether the policy is consistent with the encryption context * for the inode */ static int is_encryption_context_consistent_with_policy(struct inode *inode, const struct fscrypt_policy *policy) { struct fscrypt_context ctx; int res; if (!inode->i_sb->s_cop->get_context) return 0; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res != sizeof(ctx)) return 0; return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, FS_KEY_DESCRIPTOR_SIZE) == 0 && (ctx.flags == policy->flags) && (ctx.contents_encryption_mode == policy->contents_encryption_mode) && (ctx.filenames_encryption_mode == policy->filenames_encryption_mode)); } static int create_encryption_context_from_policy(struct inode *inode, const struct fscrypt_policy *policy) { struct fscrypt_context ctx; int res; if (!inode->i_sb->s_cop->set_context) return -EOPNOTSUPP; if (inode->i_sb->s_cop->prepare_context) { res = inode->i_sb->s_cop->prepare_context(inode); if (res) return res; } ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, FS_KEY_DESCRIPTOR_SIZE); if (!fscrypt_valid_contents_enc_mode( policy->contents_encryption_mode)) { printk(KERN_WARNING "%s: Invalid contents encryption mode %d\n", __func__, policy->contents_encryption_mode); return -EINVAL; } if (!fscrypt_valid_filenames_enc_mode( policy->filenames_encryption_mode)) { printk(KERN_WARNING "%s: Invalid filenames encryption mode %d\n", __func__, policy->filenames_encryption_mode); return -EINVAL; } if (policy->flags & ~FS_POLICY_FLAGS_VALID) return -EINVAL; ctx.contents_encryption_mode = policy->contents_encryption_mode; ctx.filenames_encryption_mode = policy->filenames_encryption_mode; ctx.flags = policy->flags; BUILD_BUG_ON(sizeof(ctx.nonce) != FS_KEY_DERIVATION_NONCE_SIZE); get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL); } int fscrypt_process_policy(struct inode *inode, const struct fscrypt_policy *policy) { if (!inode_owner_or_capable(inode)) return -EACCES; if (policy->version != 0) return -EINVAL; if (!inode_has_encryption_context(inode)) { if (!inode->i_sb->s_cop->empty_dir) return -EOPNOTSUPP; if (!inode->i_sb->s_cop->empty_dir(inode)) return -ENOTEMPTY; return create_encryption_context_from_policy(inode, policy); } if (is_encryption_context_consistent_with_policy(inode, policy)) return 0; printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n", __func__); return -EINVAL; } EXPORT_SYMBOL(fscrypt_process_policy); int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy) { struct fscrypt_context ctx; int res; if (!inode->i_sb->s_cop->get_context || !inode->i_sb->s_cop->is_encrypted(inode)) return -ENODATA; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res != sizeof(ctx)) return -ENODATA; if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) return -EINVAL; policy->version = 0; policy->contents_encryption_mode = ctx.contents_encryption_mode; policy->filenames_encryption_mode = ctx.filenames_encryption_mode; policy->flags = ctx.flags; memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, FS_KEY_DESCRIPTOR_SIZE); return 0; } EXPORT_SYMBOL(fscrypt_get_policy); int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { struct fscrypt_info *parent_ci, *child_ci; int res; if ((parent == NULL) || (child == NULL)) { printk(KERN_ERR "parent %p child %p\n", parent, child); BUG_ON(1); } /* no restrictions if the parent directory is not encrypted */ if (!parent->i_sb->s_cop->is_encrypted(parent)) return 1; /* if the child directory is not encrypted, this is always a problem */ if (!parent->i_sb->s_cop->is_encrypted(child)) return 0; res = fscrypt_get_encryption_info(parent); if (res) return 0; res = fscrypt_get_encryption_info(child); if (res) return 0; parent_ci = parent->i_crypt_info; child_ci = child->i_crypt_info; if (!parent_ci && !child_ci) return 1; if (!parent_ci || !child_ci) return 0; return (memcmp(parent_ci->ci_master_key, child_ci->ci_master_key, FS_KEY_DESCRIPTOR_SIZE) == 0 && (parent_ci->ci_data_mode == child_ci->ci_data_mode) && (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) && (parent_ci->ci_flags == child_ci->ci_flags)); } EXPORT_SYMBOL(fscrypt_has_permitted_context); /** * fscrypt_inherit_context() - Sets a child context from its parent * @parent: Parent inode from which the context is inherited. * @child: Child inode that inherits the context from @parent. * @fs_data: private data given by FS. * @preload: preload child i_crypt_info * * Return: Zero on success, non-zero otherwise */ int fscrypt_inherit_context(struct inode *parent, struct inode *child, void *fs_data, bool preload) { struct fscrypt_context ctx; struct fscrypt_info *ci; int res; if (!parent->i_sb->s_cop->set_context) return -EOPNOTSUPP; res = fscrypt_get_encryption_info(parent); if (res < 0) return res; ci = parent->i_crypt_info; if (ci == NULL) return -ENOKEY; ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; if (fscrypt_dummy_context_enabled(parent)) { ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; ctx.flags = 0; memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE); res = 0; } else { ctx.contents_encryption_mode = ci->ci_data_mode; ctx.filenames_encryption_mode = ci->ci_filename_mode; ctx.flags = ci->ci_flags; memcpy(ctx.master_key_descriptor, ci->ci_master_key, FS_KEY_DESCRIPTOR_SIZE); } get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); res = parent->i_sb->s_cop->set_context(child, &ctx, sizeof(ctx), fs_data); if (res) return res; return preload ? fscrypt_get_encryption_info(child): 0; } EXPORT_SYMBOL(fscrypt_inherit_context);
./CrossVul/dataset_final_sorted/CWE-264/c/good_4858_0
crossvul-cpp_data_good_1719_0
/* $OpenBSD: monitor.c,v 1.150 2015/06/22 23:42:16 djm Exp $ */ /* * Copyright 2002 Niels Provos <provos@citi.umich.edu> * Copyright 2002 Markus Friedl <markus@openbsd.org> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "includes.h" #include <sys/types.h> #include <sys/socket.h> #include "openbsd-compat/sys-tree.h" #include <sys/wait.h> #include <errno.h> #include <fcntl.h> #ifdef HAVE_PATHS_H #include <paths.h> #endif #include <pwd.h> #include <signal.h> #ifdef HAVE_STDINT_H #include <stdint.h> #endif #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdio.h> #include <unistd.h> #ifdef HAVE_POLL_H #include <poll.h> #else # ifdef HAVE_SYS_POLL_H # include <sys/poll.h> # endif #endif #ifdef SKEY #include <skey.h> #endif #ifdef WITH_OPENSSL #include <openssl/dh.h> #endif #include "openbsd-compat/sys-queue.h" #include "atomicio.h" #include "xmalloc.h" #include "ssh.h" #include "key.h" #include "buffer.h" #include "hostfile.h" #include "auth.h" #include "cipher.h" #include "kex.h" #include "dh.h" #ifdef TARGET_OS_MAC /* XXX Broken krb5 headers on Mac */ #undef TARGET_OS_MAC #include "zlib.h" #define TARGET_OS_MAC 1 #else #include "zlib.h" #endif #include "packet.h" #include "auth-options.h" #include "sshpty.h" #include "channels.h" #include "session.h" #include "sshlogin.h" #include "canohost.h" #include "log.h" #include "misc.h" #include "servconf.h" #include "monitor.h" #include "monitor_mm.h" #ifdef GSSAPI #include "ssh-gss.h" #endif #include "monitor_wrap.h" #include "monitor_fdpass.h" #include "compat.h" #include "ssh2.h" #include "roaming.h" #include "authfd.h" #include "match.h" #include "ssherr.h" #ifdef GSSAPI static Gssctxt *gsscontext = NULL; #endif /* Imports */ extern ServerOptions options; extern u_int utmp_len; extern u_char session_id[]; extern Buffer auth_debug; extern int auth_debug_init; extern Buffer loginmsg; /* State exported from the child */ static struct sshbuf *child_state; /* Functions on the monitor that answer unprivileged requests */ int mm_answer_moduli(int, Buffer *); int mm_answer_sign(int, Buffer *); int mm_answer_pwnamallow(int, Buffer *); int mm_answer_auth2_read_banner(int, Buffer *); int mm_answer_authserv(int, Buffer *); int mm_answer_authpassword(int, Buffer *); int mm_answer_bsdauthquery(int, Buffer *); int mm_answer_bsdauthrespond(int, Buffer *); int mm_answer_skeyquery(int, Buffer *); int mm_answer_skeyrespond(int, Buffer *); int mm_answer_keyallowed(int, Buffer *); int mm_answer_keyverify(int, Buffer *); int mm_answer_pty(int, Buffer *); int mm_answer_pty_cleanup(int, Buffer *); int mm_answer_term(int, Buffer *); int mm_answer_rsa_keyallowed(int, Buffer *); int mm_answer_rsa_challenge(int, Buffer *); int mm_answer_rsa_response(int, Buffer *); int mm_answer_sesskey(int, Buffer *); int mm_answer_sessid(int, Buffer *); #ifdef USE_PAM int mm_answer_pam_start(int, Buffer *); int mm_answer_pam_account(int, Buffer *); int mm_answer_pam_init_ctx(int, Buffer *); int mm_answer_pam_query(int, Buffer *); int mm_answer_pam_respond(int, Buffer *); int mm_answer_pam_free_ctx(int, Buffer *); #endif #ifdef GSSAPI int mm_answer_gss_setup_ctx(int, Buffer *); int mm_answer_gss_accept_ctx(int, Buffer *); int mm_answer_gss_userok(int, Buffer *); int mm_answer_gss_checkmic(int, Buffer *); #endif #ifdef SSH_AUDIT_EVENTS int mm_answer_audit_event(int, Buffer *); int mm_answer_audit_command(int, Buffer *); #endif static int monitor_read_log(struct monitor *); static Authctxt *authctxt; #ifdef WITH_SSH1 static BIGNUM *ssh1_challenge = NULL; /* used for ssh1 rsa auth */ #endif /* local state for key verify */ static u_char *key_blob = NULL; static u_int key_bloblen = 0; static int key_blobtype = MM_NOKEY; static char *hostbased_cuser = NULL; static char *hostbased_chost = NULL; static char *auth_method = "unknown"; static char *auth_submethod = NULL; static u_int session_id2_len = 0; static u_char *session_id2 = NULL; static pid_t monitor_child_pid; struct mon_table { enum monitor_reqtype type; int flags; int (*f)(int, Buffer *); }; #define MON_ISAUTH 0x0004 /* Required for Authentication */ #define MON_AUTHDECIDE 0x0008 /* Decides Authentication */ #define MON_ONCE 0x0010 /* Disable after calling */ #define MON_ALOG 0x0020 /* Log auth attempt without authenticating */ #define MON_AUTH (MON_ISAUTH|MON_AUTHDECIDE) #define MON_PERMIT 0x1000 /* Request is permitted */ struct mon_table mon_dispatch_proto20[] = { #ifdef WITH_OPENSSL {MONITOR_REQ_MODULI, MON_ONCE, mm_answer_moduli}, #endif {MONITOR_REQ_SIGN, MON_ONCE, mm_answer_sign}, {MONITOR_REQ_PWNAM, MON_ONCE, mm_answer_pwnamallow}, {MONITOR_REQ_AUTHSERV, MON_ONCE, mm_answer_authserv}, {MONITOR_REQ_AUTH2_READ_BANNER, MON_ONCE, mm_answer_auth2_read_banner}, {MONITOR_REQ_AUTHPASSWORD, MON_AUTH, mm_answer_authpassword}, #ifdef USE_PAM {MONITOR_REQ_PAM_START, MON_ONCE, mm_answer_pam_start}, {MONITOR_REQ_PAM_ACCOUNT, 0, mm_answer_pam_account}, {MONITOR_REQ_PAM_INIT_CTX, MON_ISAUTH, mm_answer_pam_init_ctx}, {MONITOR_REQ_PAM_QUERY, MON_ISAUTH, mm_answer_pam_query}, {MONITOR_REQ_PAM_RESPOND, MON_ISAUTH, mm_answer_pam_respond}, {MONITOR_REQ_PAM_FREE_CTX, MON_ONCE|MON_AUTHDECIDE, mm_answer_pam_free_ctx}, #endif #ifdef SSH_AUDIT_EVENTS {MONITOR_REQ_AUDIT_EVENT, MON_PERMIT, mm_answer_audit_event}, #endif #ifdef BSD_AUTH {MONITOR_REQ_BSDAUTHQUERY, MON_ISAUTH, mm_answer_bsdauthquery}, {MONITOR_REQ_BSDAUTHRESPOND, MON_AUTH, mm_answer_bsdauthrespond}, #endif #ifdef SKEY {MONITOR_REQ_SKEYQUERY, MON_ISAUTH, mm_answer_skeyquery}, {MONITOR_REQ_SKEYRESPOND, MON_AUTH, mm_answer_skeyrespond}, #endif {MONITOR_REQ_KEYALLOWED, MON_ISAUTH, mm_answer_keyallowed}, {MONITOR_REQ_KEYVERIFY, MON_AUTH, mm_answer_keyverify}, #ifdef GSSAPI {MONITOR_REQ_GSSSETUP, MON_ISAUTH, mm_answer_gss_setup_ctx}, {MONITOR_REQ_GSSSTEP, MON_ISAUTH, mm_answer_gss_accept_ctx}, {MONITOR_REQ_GSSUSEROK, MON_AUTH, mm_answer_gss_userok}, {MONITOR_REQ_GSSCHECKMIC, MON_ISAUTH, mm_answer_gss_checkmic}, #endif {0, 0, NULL} }; struct mon_table mon_dispatch_postauth20[] = { #ifdef WITH_OPENSSL {MONITOR_REQ_MODULI, 0, mm_answer_moduli}, #endif {MONITOR_REQ_SIGN, 0, mm_answer_sign}, {MONITOR_REQ_PTY, 0, mm_answer_pty}, {MONITOR_REQ_PTYCLEANUP, 0, mm_answer_pty_cleanup}, {MONITOR_REQ_TERM, 0, mm_answer_term}, #ifdef SSH_AUDIT_EVENTS {MONITOR_REQ_AUDIT_EVENT, MON_PERMIT, mm_answer_audit_event}, {MONITOR_REQ_AUDIT_COMMAND, MON_PERMIT, mm_answer_audit_command}, #endif {0, 0, NULL} }; struct mon_table mon_dispatch_proto15[] = { #ifdef WITH_SSH1 {MONITOR_REQ_PWNAM, MON_ONCE, mm_answer_pwnamallow}, {MONITOR_REQ_SESSKEY, MON_ONCE, mm_answer_sesskey}, {MONITOR_REQ_SESSID, MON_ONCE, mm_answer_sessid}, {MONITOR_REQ_AUTHPASSWORD, MON_AUTH, mm_answer_authpassword}, {MONITOR_REQ_RSAKEYALLOWED, MON_ISAUTH|MON_ALOG, mm_answer_rsa_keyallowed}, {MONITOR_REQ_KEYALLOWED, MON_ISAUTH|MON_ALOG, mm_answer_keyallowed}, {MONITOR_REQ_RSACHALLENGE, MON_ONCE, mm_answer_rsa_challenge}, {MONITOR_REQ_RSARESPONSE, MON_ONCE|MON_AUTHDECIDE, mm_answer_rsa_response}, #ifdef BSD_AUTH {MONITOR_REQ_BSDAUTHQUERY, MON_ISAUTH, mm_answer_bsdauthquery}, {MONITOR_REQ_BSDAUTHRESPOND, MON_AUTH, mm_answer_bsdauthrespond}, #endif #ifdef SKEY {MONITOR_REQ_SKEYQUERY, MON_ISAUTH, mm_answer_skeyquery}, {MONITOR_REQ_SKEYRESPOND, MON_AUTH, mm_answer_skeyrespond}, #endif #ifdef USE_PAM {MONITOR_REQ_PAM_START, MON_ONCE, mm_answer_pam_start}, {MONITOR_REQ_PAM_ACCOUNT, 0, mm_answer_pam_account}, {MONITOR_REQ_PAM_INIT_CTX, MON_ISAUTH, mm_answer_pam_init_ctx}, {MONITOR_REQ_PAM_QUERY, MON_ISAUTH, mm_answer_pam_query}, {MONITOR_REQ_PAM_RESPOND, MON_ISAUTH, mm_answer_pam_respond}, {MONITOR_REQ_PAM_FREE_CTX, MON_ONCE|MON_AUTHDECIDE, mm_answer_pam_free_ctx}, #endif #ifdef SSH_AUDIT_EVENTS {MONITOR_REQ_AUDIT_EVENT, MON_PERMIT, mm_answer_audit_event}, #endif #endif /* WITH_SSH1 */ {0, 0, NULL} }; struct mon_table mon_dispatch_postauth15[] = { #ifdef WITH_SSH1 {MONITOR_REQ_PTY, MON_ONCE, mm_answer_pty}, {MONITOR_REQ_PTYCLEANUP, MON_ONCE, mm_answer_pty_cleanup}, {MONITOR_REQ_TERM, 0, mm_answer_term}, #ifdef SSH_AUDIT_EVENTS {MONITOR_REQ_AUDIT_EVENT, MON_PERMIT, mm_answer_audit_event}, {MONITOR_REQ_AUDIT_COMMAND, MON_PERMIT|MON_ONCE, mm_answer_audit_command}, #endif #endif /* WITH_SSH1 */ {0, 0, NULL} }; struct mon_table *mon_dispatch; /* Specifies if a certain message is allowed at the moment */ static void monitor_permit(struct mon_table *ent, enum monitor_reqtype type, int permit) { while (ent->f != NULL) { if (ent->type == type) { ent->flags &= ~MON_PERMIT; ent->flags |= permit ? MON_PERMIT : 0; return; } ent++; } } static void monitor_permit_authentications(int permit) { struct mon_table *ent = mon_dispatch; while (ent->f != NULL) { if (ent->flags & MON_AUTH) { ent->flags &= ~MON_PERMIT; ent->flags |= permit ? MON_PERMIT : 0; } ent++; } } void monitor_child_preauth(Authctxt *_authctxt, struct monitor *pmonitor) { struct mon_table *ent; int authenticated = 0, partial = 0; debug3("preauth child monitor started"); close(pmonitor->m_recvfd); close(pmonitor->m_log_sendfd); pmonitor->m_log_sendfd = pmonitor->m_recvfd = -1; authctxt = _authctxt; memset(authctxt, 0, sizeof(*authctxt)); authctxt->loginmsg = &loginmsg; if (compat20) { mon_dispatch = mon_dispatch_proto20; /* Permit requests for moduli and signatures */ monitor_permit(mon_dispatch, MONITOR_REQ_MODULI, 1); monitor_permit(mon_dispatch, MONITOR_REQ_SIGN, 1); } else { mon_dispatch = mon_dispatch_proto15; monitor_permit(mon_dispatch, MONITOR_REQ_SESSKEY, 1); } /* The first few requests do not require asynchronous access */ while (!authenticated) { partial = 0; auth_method = "unknown"; auth_submethod = NULL; authenticated = (monitor_read(pmonitor, mon_dispatch, &ent) == 1); /* Special handling for multiple required authentications */ if (options.num_auth_methods != 0) { if (!compat20) fatal("AuthenticationMethods is not supported" "with SSH protocol 1"); if (authenticated && !auth2_update_methods_lists(authctxt, auth_method, auth_submethod)) { debug3("%s: method %s: partial", __func__, auth_method); authenticated = 0; partial = 1; } } if (authenticated) { if (!(ent->flags & MON_AUTHDECIDE)) fatal("%s: unexpected authentication from %d", __func__, ent->type); if (authctxt->pw->pw_uid == 0 && !auth_root_allowed(auth_method)) authenticated = 0; #ifdef USE_PAM /* PAM needs to perform account checks after auth */ if (options.use_pam && authenticated) { Buffer m; buffer_init(&m); mm_request_receive_expect(pmonitor->m_sendfd, MONITOR_REQ_PAM_ACCOUNT, &m); authenticated = mm_answer_pam_account(pmonitor->m_sendfd, &m); buffer_free(&m); } #endif } if (ent->flags & (MON_AUTHDECIDE|MON_ALOG)) { auth_log(authctxt, authenticated, partial, auth_method, auth_submethod); if (!partial && !authenticated) authctxt->failures++; } } if (!authctxt->valid) fatal("%s: authenticated invalid user", __func__); if (strcmp(auth_method, "unknown") == 0) fatal("%s: authentication method name unknown", __func__); debug("%s: %s has been authenticated by privileged process", __func__, authctxt->user); mm_get_keystate(pmonitor); /* Drain any buffered messages from the child */ while (pmonitor->m_log_recvfd != -1 && monitor_read_log(pmonitor) == 0) ; close(pmonitor->m_sendfd); close(pmonitor->m_log_recvfd); pmonitor->m_sendfd = pmonitor->m_log_recvfd = -1; } static void monitor_set_child_handler(pid_t pid) { monitor_child_pid = pid; } static void monitor_child_handler(int sig) { kill(monitor_child_pid, sig); } void monitor_child_postauth(struct monitor *pmonitor) { close(pmonitor->m_recvfd); pmonitor->m_recvfd = -1; monitor_set_child_handler(pmonitor->m_pid); signal(SIGHUP, &monitor_child_handler); signal(SIGTERM, &monitor_child_handler); signal(SIGINT, &monitor_child_handler); #ifdef SIGXFSZ signal(SIGXFSZ, SIG_IGN); #endif if (compat20) { mon_dispatch = mon_dispatch_postauth20; /* Permit requests for moduli and signatures */ monitor_permit(mon_dispatch, MONITOR_REQ_MODULI, 1); monitor_permit(mon_dispatch, MONITOR_REQ_SIGN, 1); monitor_permit(mon_dispatch, MONITOR_REQ_TERM, 1); } else { mon_dispatch = mon_dispatch_postauth15; monitor_permit(mon_dispatch, MONITOR_REQ_TERM, 1); } if (!no_pty_flag) { monitor_permit(mon_dispatch, MONITOR_REQ_PTY, 1); monitor_permit(mon_dispatch, MONITOR_REQ_PTYCLEANUP, 1); } for (;;) monitor_read(pmonitor, mon_dispatch, NULL); } void monitor_sync(struct monitor *pmonitor) { if (options.compression) { /* The member allocation is not visible, so sync it */ mm_share_sync(&pmonitor->m_zlib, &pmonitor->m_zback); } } /* Allocation functions for zlib */ static void * mm_zalloc(struct mm_master *mm, u_int ncount, u_int size) { size_t len = (size_t) size * ncount; void *address; if (len == 0 || ncount > SIZE_MAX / size) fatal("%s: mm_zalloc(%u, %u)", __func__, ncount, size); address = mm_malloc(mm, len); return (address); } static void mm_zfree(struct mm_master *mm, void *address) { mm_free(mm, address); } static int monitor_read_log(struct monitor *pmonitor) { Buffer logmsg; u_int len, level; char *msg; buffer_init(&logmsg); /* Read length */ buffer_append_space(&logmsg, 4); if (atomicio(read, pmonitor->m_log_recvfd, buffer_ptr(&logmsg), buffer_len(&logmsg)) != buffer_len(&logmsg)) { if (errno == EPIPE) { buffer_free(&logmsg); debug("%s: child log fd closed", __func__); close(pmonitor->m_log_recvfd); pmonitor->m_log_recvfd = -1; return -1; } fatal("%s: log fd read: %s", __func__, strerror(errno)); } len = buffer_get_int(&logmsg); if (len <= 4 || len > 8192) fatal("%s: invalid log message length %u", __func__, len); /* Read severity, message */ buffer_clear(&logmsg); buffer_append_space(&logmsg, len); if (atomicio(read, pmonitor->m_log_recvfd, buffer_ptr(&logmsg), buffer_len(&logmsg)) != buffer_len(&logmsg)) fatal("%s: log fd read: %s", __func__, strerror(errno)); /* Log it */ level = buffer_get_int(&logmsg); msg = buffer_get_string(&logmsg, NULL); if (log_level_name(level) == NULL) fatal("%s: invalid log level %u (corrupted message?)", __func__, level); do_log2(level, "%s [preauth]", msg); buffer_free(&logmsg); free(msg); return 0; } int monitor_read(struct monitor *pmonitor, struct mon_table *ent, struct mon_table **pent) { Buffer m; int ret; u_char type; struct pollfd pfd[2]; for (;;) { memset(&pfd, 0, sizeof(pfd)); pfd[0].fd = pmonitor->m_sendfd; pfd[0].events = POLLIN; pfd[1].fd = pmonitor->m_log_recvfd; pfd[1].events = pfd[1].fd == -1 ? 0 : POLLIN; if (poll(pfd, pfd[1].fd == -1 ? 1 : 2, -1) == -1) { if (errno == EINTR || errno == EAGAIN) continue; fatal("%s: poll: %s", __func__, strerror(errno)); } if (pfd[1].revents) { /* * Drain all log messages before processing next * monitor request. */ monitor_read_log(pmonitor); continue; } if (pfd[0].revents) break; /* Continues below */ } buffer_init(&m); mm_request_receive(pmonitor->m_sendfd, &m); type = buffer_get_char(&m); debug3("%s: checking request %d", __func__, type); while (ent->f != NULL) { if (ent->type == type) break; ent++; } if (ent->f != NULL) { if (!(ent->flags & MON_PERMIT)) fatal("%s: unpermitted request %d", __func__, type); ret = (*ent->f)(pmonitor->m_sendfd, &m); buffer_free(&m); /* The child may use this request only once, disable it */ if (ent->flags & MON_ONCE) { debug2("%s: %d used once, disabling now", __func__, type); ent->flags &= ~MON_PERMIT; } if (pent != NULL) *pent = ent; return ret; } fatal("%s: unsupported request: %d", __func__, type); /* NOTREACHED */ return (-1); } /* allowed key state */ static int monitor_allowed_key(u_char *blob, u_int bloblen) { /* make sure key is allowed */ if (key_blob == NULL || key_bloblen != bloblen || timingsafe_bcmp(key_blob, blob, key_bloblen)) return (0); return (1); } static void monitor_reset_key_state(void) { /* reset state */ free(key_blob); free(hostbased_cuser); free(hostbased_chost); key_blob = NULL; key_bloblen = 0; key_blobtype = MM_NOKEY; hostbased_cuser = NULL; hostbased_chost = NULL; } #ifdef WITH_OPENSSL int mm_answer_moduli(int sock, Buffer *m) { DH *dh; int min, want, max; min = buffer_get_int(m); want = buffer_get_int(m); max = buffer_get_int(m); debug3("%s: got parameters: %d %d %d", __func__, min, want, max); /* We need to check here, too, in case the child got corrupted */ if (max < min || want < min || max < want) fatal("%s: bad parameters: %d %d %d", __func__, min, want, max); buffer_clear(m); dh = choose_dh(min, want, max); if (dh == NULL) { buffer_put_char(m, 0); return (0); } else { /* Send first bignum */ buffer_put_char(m, 1); buffer_put_bignum2(m, dh->p); buffer_put_bignum2(m, dh->g); DH_free(dh); } mm_request_send(sock, MONITOR_ANS_MODULI, m); return (0); } #endif int mm_answer_sign(int sock, Buffer *m) { struct ssh *ssh = active_state; /* XXX */ extern int auth_sock; /* XXX move to state struct? */ struct sshkey *key; struct sshbuf *sigbuf; u_char *p; u_char *signature; size_t datlen, siglen; int r, keyid, is_proof = 0; const char proof_req[] = "hostkeys-prove-00@openssh.com"; debug3("%s", __func__); if ((r = sshbuf_get_u32(m, &keyid)) != 0 || (r = sshbuf_get_string(m, &p, &datlen)) != 0) fatal("%s: buffer error: %s", __func__, ssh_err(r)); /* * Supported KEX types use SHA1 (20 bytes), SHA256 (32 bytes), * SHA384 (48 bytes) and SHA512 (64 bytes). * * Otherwise, verify the signature request is for a hostkey * proof. * * XXX perform similar check for KEX signature requests too? * it's not trivial, since what is signed is the hash, rather * than the full kex structure... */ if (datlen != 20 && datlen != 32 && datlen != 48 && datlen != 64) { /* * Construct expected hostkey proof and compare it to what * the client sent us. */ if (session_id2_len == 0) /* hostkeys is never first */ fatal("%s: bad data length: %zu", __func__, datlen); if ((key = get_hostkey_public_by_index(keyid, ssh)) == NULL) fatal("%s: no hostkey for index %d", __func__, keyid); if ((sigbuf = sshbuf_new()) == NULL) fatal("%s: sshbuf_new", __func__); if ((r = sshbuf_put_cstring(sigbuf, proof_req)) != 0 || (r = sshbuf_put_string(sigbuf, session_id2, session_id2_len) != 0) || (r = sshkey_puts(key, sigbuf)) != 0) fatal("%s: couldn't prepare private key " "proof buffer: %s", __func__, ssh_err(r)); if (datlen != sshbuf_len(sigbuf) || memcmp(p, sshbuf_ptr(sigbuf), sshbuf_len(sigbuf)) != 0) fatal("%s: bad data length: %zu, hostkey proof len %zu", __func__, datlen, sshbuf_len(sigbuf)); sshbuf_free(sigbuf); is_proof = 1; } /* save session id, it will be passed on the first call */ if (session_id2_len == 0) { session_id2_len = datlen; session_id2 = xmalloc(session_id2_len); memcpy(session_id2, p, session_id2_len); } if ((key = get_hostkey_by_index(keyid)) != NULL) { if ((r = sshkey_sign(key, &signature, &siglen, p, datlen, datafellows)) != 0) fatal("%s: sshkey_sign failed: %s", __func__, ssh_err(r)); } else if ((key = get_hostkey_public_by_index(keyid, ssh)) != NULL && auth_sock > 0) { if ((r = ssh_agent_sign(auth_sock, key, &signature, &siglen, p, datlen, datafellows)) != 0) { fatal("%s: ssh_agent_sign failed: %s", __func__, ssh_err(r)); } } else fatal("%s: no hostkey from index %d", __func__, keyid); debug3("%s: %s signature %p(%zu)", __func__, is_proof ? "KEX" : "hostkey proof", signature, siglen); sshbuf_reset(m); if ((r = sshbuf_put_string(m, signature, siglen)) != 0) fatal("%s: buffer error: %s", __func__, ssh_err(r)); free(p); free(signature); mm_request_send(sock, MONITOR_ANS_SIGN, m); /* Turn on permissions for getpwnam */ monitor_permit(mon_dispatch, MONITOR_REQ_PWNAM, 1); return (0); } /* Retrieves the password entry and also checks if the user is permitted */ int mm_answer_pwnamallow(int sock, Buffer *m) { char *username; struct passwd *pwent; int allowed = 0; u_int i; debug3("%s", __func__); if (authctxt->attempt++ != 0) fatal("%s: multiple attempts for getpwnam", __func__); username = buffer_get_string(m, NULL); pwent = getpwnamallow(username); authctxt->user = xstrdup(username); setproctitle("%s [priv]", pwent ? username : "unknown"); free(username); buffer_clear(m); if (pwent == NULL) { buffer_put_char(m, 0); authctxt->pw = fakepw(); goto out; } allowed = 1; authctxt->pw = pwent; authctxt->valid = 1; buffer_put_char(m, 1); buffer_put_string(m, pwent, sizeof(struct passwd)); buffer_put_cstring(m, pwent->pw_name); buffer_put_cstring(m, "*"); #ifdef HAVE_STRUCT_PASSWD_PW_GECOS buffer_put_cstring(m, pwent->pw_gecos); #endif #ifdef HAVE_STRUCT_PASSWD_PW_CLASS buffer_put_cstring(m, pwent->pw_class); #endif buffer_put_cstring(m, pwent->pw_dir); buffer_put_cstring(m, pwent->pw_shell); out: buffer_put_string(m, &options, sizeof(options)); #define M_CP_STROPT(x) do { \ if (options.x != NULL) \ buffer_put_cstring(m, options.x); \ } while (0) #define M_CP_STRARRAYOPT(x, nx) do { \ for (i = 0; i < options.nx; i++) \ buffer_put_cstring(m, options.x[i]); \ } while (0) /* See comment in servconf.h */ COPY_MATCH_STRING_OPTS(); #undef M_CP_STROPT #undef M_CP_STRARRAYOPT /* Create valid auth method lists */ if (compat20 && auth2_setup_methods_lists(authctxt) != 0) { /* * The monitor will continue long enough to let the child * run to it's packet_disconnect(), but it must not allow any * authentication to succeed. */ debug("%s: no valid authentication method lists", __func__); } debug3("%s: sending MONITOR_ANS_PWNAM: %d", __func__, allowed); mm_request_send(sock, MONITOR_ANS_PWNAM, m); /* For SSHv1 allow authentication now */ if (!compat20) monitor_permit_authentications(1); else { /* Allow service/style information on the auth context */ monitor_permit(mon_dispatch, MONITOR_REQ_AUTHSERV, 1); monitor_permit(mon_dispatch, MONITOR_REQ_AUTH2_READ_BANNER, 1); } #ifdef USE_PAM if (options.use_pam) monitor_permit(mon_dispatch, MONITOR_REQ_PAM_START, 1); #endif return (0); } int mm_answer_auth2_read_banner(int sock, Buffer *m) { char *banner; buffer_clear(m); banner = auth2_read_banner(); buffer_put_cstring(m, banner != NULL ? banner : ""); mm_request_send(sock, MONITOR_ANS_AUTH2_READ_BANNER, m); free(banner); return (0); } int mm_answer_authserv(int sock, Buffer *m) { monitor_permit_authentications(1); authctxt->service = buffer_get_string(m, NULL); authctxt->style = buffer_get_string(m, NULL); debug3("%s: service=%s, style=%s", __func__, authctxt->service, authctxt->style); if (strlen(authctxt->style) == 0) { free(authctxt->style); authctxt->style = NULL; } return (0); } int mm_answer_authpassword(int sock, Buffer *m) { static int call_count; char *passwd; int authenticated; u_int plen; passwd = buffer_get_string(m, &plen); /* Only authenticate if the context is valid */ authenticated = options.password_authentication && auth_password(authctxt, passwd); explicit_bzero(passwd, strlen(passwd)); free(passwd); buffer_clear(m); buffer_put_int(m, authenticated); debug3("%s: sending result %d", __func__, authenticated); mm_request_send(sock, MONITOR_ANS_AUTHPASSWORD, m); call_count++; if (plen == 0 && call_count == 1) auth_method = "none"; else auth_method = "password"; /* Causes monitor loop to terminate if authenticated */ return (authenticated); } #ifdef BSD_AUTH int mm_answer_bsdauthquery(int sock, Buffer *m) { char *name, *infotxt; u_int numprompts; u_int *echo_on; char **prompts; u_int success; success = bsdauth_query(authctxt, &name, &infotxt, &numprompts, &prompts, &echo_on) < 0 ? 0 : 1; buffer_clear(m); buffer_put_int(m, success); if (success) buffer_put_cstring(m, prompts[0]); debug3("%s: sending challenge success: %u", __func__, success); mm_request_send(sock, MONITOR_ANS_BSDAUTHQUERY, m); if (success) { free(name); free(infotxt); free(prompts); free(echo_on); } return (0); } int mm_answer_bsdauthrespond(int sock, Buffer *m) { char *response; int authok; if (authctxt->as == 0) fatal("%s: no bsd auth session", __func__); response = buffer_get_string(m, NULL); authok = options.challenge_response_authentication && auth_userresponse(authctxt->as, response, 0); authctxt->as = NULL; debug3("%s: <%s> = <%d>", __func__, response, authok); free(response); buffer_clear(m); buffer_put_int(m, authok); debug3("%s: sending authenticated: %d", __func__, authok); mm_request_send(sock, MONITOR_ANS_BSDAUTHRESPOND, m); if (compat20) { auth_method = "keyboard-interactive"; auth_submethod = "bsdauth"; } else auth_method = "bsdauth"; return (authok != 0); } #endif #ifdef SKEY int mm_answer_skeyquery(int sock, Buffer *m) { struct skey skey; char challenge[1024]; u_int success; success = _compat_skeychallenge(&skey, authctxt->user, challenge, sizeof(challenge)) < 0 ? 0 : 1; buffer_clear(m); buffer_put_int(m, success); if (success) buffer_put_cstring(m, challenge); debug3("%s: sending challenge success: %u", __func__, success); mm_request_send(sock, MONITOR_ANS_SKEYQUERY, m); return (0); } int mm_answer_skeyrespond(int sock, Buffer *m) { char *response; int authok; response = buffer_get_string(m, NULL); authok = (options.challenge_response_authentication && authctxt->valid && skey_haskey(authctxt->pw->pw_name) == 0 && skey_passcheck(authctxt->pw->pw_name, response) != -1); free(response); buffer_clear(m); buffer_put_int(m, authok); debug3("%s: sending authenticated: %d", __func__, authok); mm_request_send(sock, MONITOR_ANS_SKEYRESPOND, m); auth_method = "skey"; return (authok != 0); } #endif #ifdef USE_PAM int mm_answer_pam_start(int sock, Buffer *m) { if (!options.use_pam) fatal("UsePAM not set, but ended up in %s anyway", __func__); start_pam(authctxt); monitor_permit(mon_dispatch, MONITOR_REQ_PAM_ACCOUNT, 1); return (0); } int mm_answer_pam_account(int sock, Buffer *m) { u_int ret; if (!options.use_pam) fatal("UsePAM not set, but ended up in %s anyway", __func__); ret = do_pam_account(); buffer_put_int(m, ret); buffer_put_string(m, buffer_ptr(&loginmsg), buffer_len(&loginmsg)); mm_request_send(sock, MONITOR_ANS_PAM_ACCOUNT, m); return (ret); } static void *sshpam_ctxt, *sshpam_authok; extern KbdintDevice sshpam_device; int mm_answer_pam_init_ctx(int sock, Buffer *m) { debug3("%s", __func__); sshpam_ctxt = (sshpam_device.init_ctx)(authctxt); sshpam_authok = NULL; buffer_clear(m); if (sshpam_ctxt != NULL) { monitor_permit(mon_dispatch, MONITOR_REQ_PAM_FREE_CTX, 1); buffer_put_int(m, 1); } else { buffer_put_int(m, 0); } mm_request_send(sock, MONITOR_ANS_PAM_INIT_CTX, m); return (0); } int mm_answer_pam_query(int sock, Buffer *m) { char *name = NULL, *info = NULL, **prompts = NULL; u_int i, num = 0, *echo_on = 0; int ret; debug3("%s", __func__); sshpam_authok = NULL; ret = (sshpam_device.query)(sshpam_ctxt, &name, &info, &num, &prompts, &echo_on); if (ret == 0 && num == 0) sshpam_authok = sshpam_ctxt; if (num > 1 || name == NULL || info == NULL) ret = -1; buffer_clear(m); buffer_put_int(m, ret); buffer_put_cstring(m, name); free(name); buffer_put_cstring(m, info); free(info); buffer_put_int(m, num); for (i = 0; i < num; ++i) { buffer_put_cstring(m, prompts[i]); free(prompts[i]); buffer_put_int(m, echo_on[i]); } free(prompts); free(echo_on); auth_method = "keyboard-interactive"; auth_submethod = "pam"; mm_request_send(sock, MONITOR_ANS_PAM_QUERY, m); return (0); } int mm_answer_pam_respond(int sock, Buffer *m) { char **resp; u_int i, num; int ret; debug3("%s", __func__); sshpam_authok = NULL; num = buffer_get_int(m); if (num > 0) { resp = xcalloc(num, sizeof(char *)); for (i = 0; i < num; ++i) resp[i] = buffer_get_string(m, NULL); ret = (sshpam_device.respond)(sshpam_ctxt, num, resp); for (i = 0; i < num; ++i) free(resp[i]); free(resp); } else { ret = (sshpam_device.respond)(sshpam_ctxt, num, NULL); } buffer_clear(m); buffer_put_int(m, ret); mm_request_send(sock, MONITOR_ANS_PAM_RESPOND, m); auth_method = "keyboard-interactive"; auth_submethod = "pam"; if (ret == 0) sshpam_authok = sshpam_ctxt; return (0); } int mm_answer_pam_free_ctx(int sock, Buffer *m) { int r = sshpam_authok != NULL && sshpam_authok == sshpam_ctxt; debug3("%s", __func__); (sshpam_device.free_ctx)(sshpam_ctxt); sshpam_ctxt = sshpam_authok = NULL; buffer_clear(m); mm_request_send(sock, MONITOR_ANS_PAM_FREE_CTX, m); auth_method = "keyboard-interactive"; auth_submethod = "pam"; return r; } #endif int mm_answer_keyallowed(int sock, Buffer *m) { Key *key; char *cuser, *chost; u_char *blob; u_int bloblen, pubkey_auth_attempt; enum mm_keytype type = 0; int allowed = 0; debug3("%s entering", __func__); type = buffer_get_int(m); cuser = buffer_get_string(m, NULL); chost = buffer_get_string(m, NULL); blob = buffer_get_string(m, &bloblen); pubkey_auth_attempt = buffer_get_int(m); key = key_from_blob(blob, bloblen); if ((compat20 && type == MM_RSAHOSTKEY) || (!compat20 && type != MM_RSAHOSTKEY)) fatal("%s: key type and protocol mismatch", __func__); debug3("%s: key_from_blob: %p", __func__, key); if (key != NULL && authctxt->valid) { /* These should not make it past the privsep child */ if (key_type_plain(key->type) == KEY_RSA && (datafellows & SSH_BUG_RSASIGMD5) != 0) fatal("%s: passed a SSH_BUG_RSASIGMD5 key", __func__); switch (type) { case MM_USERKEY: allowed = options.pubkey_authentication && !auth2_userkey_already_used(authctxt, key) && match_pattern_list(sshkey_ssh_name(key), options.pubkey_key_types, 0) == 1 && user_key_allowed(authctxt->pw, key, pubkey_auth_attempt); pubkey_auth_info(authctxt, key, NULL); auth_method = "publickey"; if (options.pubkey_authentication && (!pubkey_auth_attempt || allowed != 1)) auth_clear_options(); break; case MM_HOSTKEY: allowed = options.hostbased_authentication && match_pattern_list(sshkey_ssh_name(key), options.hostbased_key_types, 0) == 1 && hostbased_key_allowed(authctxt->pw, cuser, chost, key); pubkey_auth_info(authctxt, key, "client user \"%.100s\", client host \"%.100s\"", cuser, chost); auth_method = "hostbased"; break; #ifdef WITH_SSH1 case MM_RSAHOSTKEY: key->type = KEY_RSA1; /* XXX */ allowed = options.rhosts_rsa_authentication && auth_rhosts_rsa_key_allowed(authctxt->pw, cuser, chost, key); if (options.rhosts_rsa_authentication && allowed != 1) auth_clear_options(); auth_method = "rsa"; break; #endif default: fatal("%s: unknown key type %d", __func__, type); break; } } if (key != NULL) key_free(key); /* clear temporarily storage (used by verify) */ monitor_reset_key_state(); if (allowed) { /* Save temporarily for comparison in verify */ key_blob = blob; key_bloblen = bloblen; key_blobtype = type; hostbased_cuser = cuser; hostbased_chost = chost; } else { /* Log failed attempt */ auth_log(authctxt, 0, 0, auth_method, NULL); free(blob); free(cuser); free(chost); } debug3("%s: key %p is %s", __func__, key, allowed ? "allowed" : "not allowed"); buffer_clear(m); buffer_put_int(m, allowed); buffer_put_int(m, forced_command != NULL); mm_request_send(sock, MONITOR_ANS_KEYALLOWED, m); if (type == MM_RSAHOSTKEY) monitor_permit(mon_dispatch, MONITOR_REQ_RSACHALLENGE, allowed); return (0); } static int monitor_valid_userblob(u_char *data, u_int datalen) { Buffer b; char *p, *userstyle; u_int len; int fail = 0; buffer_init(&b); buffer_append(&b, data, datalen); if (datafellows & SSH_OLD_SESSIONID) { p = buffer_ptr(&b); len = buffer_len(&b); if ((session_id2 == NULL) || (len < session_id2_len) || (timingsafe_bcmp(p, session_id2, session_id2_len) != 0)) fail++; buffer_consume(&b, session_id2_len); } else { p = buffer_get_string(&b, &len); if ((session_id2 == NULL) || (len != session_id2_len) || (timingsafe_bcmp(p, session_id2, session_id2_len) != 0)) fail++; free(p); } if (buffer_get_char(&b) != SSH2_MSG_USERAUTH_REQUEST) fail++; p = buffer_get_cstring(&b, NULL); xasprintf(&userstyle, "%s%s%s", authctxt->user, authctxt->style ? ":" : "", authctxt->style ? authctxt->style : ""); if (strcmp(userstyle, p) != 0) { logit("wrong user name passed to monitor: expected %s != %.100s", userstyle, p); fail++; } free(userstyle); free(p); buffer_skip_string(&b); if (datafellows & SSH_BUG_PKAUTH) { if (!buffer_get_char(&b)) fail++; } else { p = buffer_get_cstring(&b, NULL); if (strcmp("publickey", p) != 0) fail++; free(p); if (!buffer_get_char(&b)) fail++; buffer_skip_string(&b); } buffer_skip_string(&b); if (buffer_len(&b) != 0) fail++; buffer_free(&b); return (fail == 0); } static int monitor_valid_hostbasedblob(u_char *data, u_int datalen, char *cuser, char *chost) { Buffer b; char *p, *userstyle; u_int len; int fail = 0; buffer_init(&b); buffer_append(&b, data, datalen); p = buffer_get_string(&b, &len); if ((session_id2 == NULL) || (len != session_id2_len) || (timingsafe_bcmp(p, session_id2, session_id2_len) != 0)) fail++; free(p); if (buffer_get_char(&b) != SSH2_MSG_USERAUTH_REQUEST) fail++; p = buffer_get_cstring(&b, NULL); xasprintf(&userstyle, "%s%s%s", authctxt->user, authctxt->style ? ":" : "", authctxt->style ? authctxt->style : ""); if (strcmp(userstyle, p) != 0) { logit("wrong user name passed to monitor: expected %s != %.100s", userstyle, p); fail++; } free(userstyle); free(p); buffer_skip_string(&b); /* service */ p = buffer_get_cstring(&b, NULL); if (strcmp(p, "hostbased") != 0) fail++; free(p); buffer_skip_string(&b); /* pkalg */ buffer_skip_string(&b); /* pkblob */ /* verify client host, strip trailing dot if necessary */ p = buffer_get_string(&b, NULL); if (((len = strlen(p)) > 0) && p[len - 1] == '.') p[len - 1] = '\0'; if (strcmp(p, chost) != 0) fail++; free(p); /* verify client user */ p = buffer_get_string(&b, NULL); if (strcmp(p, cuser) != 0) fail++; free(p); if (buffer_len(&b) != 0) fail++; buffer_free(&b); return (fail == 0); } int mm_answer_keyverify(int sock, Buffer *m) { Key *key; u_char *signature, *data, *blob; u_int signaturelen, datalen, bloblen; int verified = 0; int valid_data = 0; blob = buffer_get_string(m, &bloblen); signature = buffer_get_string(m, &signaturelen); data = buffer_get_string(m, &datalen); if (hostbased_cuser == NULL || hostbased_chost == NULL || !monitor_allowed_key(blob, bloblen)) fatal("%s: bad key, not previously allowed", __func__); key = key_from_blob(blob, bloblen); if (key == NULL) fatal("%s: bad public key blob", __func__); switch (key_blobtype) { case MM_USERKEY: valid_data = monitor_valid_userblob(data, datalen); break; case MM_HOSTKEY: valid_data = monitor_valid_hostbasedblob(data, datalen, hostbased_cuser, hostbased_chost); break; default: valid_data = 0; break; } if (!valid_data) fatal("%s: bad signature data blob", __func__); verified = key_verify(key, signature, signaturelen, data, datalen); debug3("%s: key %p signature %s", __func__, key, (verified == 1) ? "verified" : "unverified"); /* If auth was successful then record key to ensure it isn't reused */ if (verified == 1) auth2_record_userkey(authctxt, key); else key_free(key); free(blob); free(signature); free(data); auth_method = key_blobtype == MM_USERKEY ? "publickey" : "hostbased"; monitor_reset_key_state(); buffer_clear(m); buffer_put_int(m, verified); mm_request_send(sock, MONITOR_ANS_KEYVERIFY, m); return (verified == 1); } static void mm_record_login(Session *s, struct passwd *pw) { socklen_t fromlen; struct sockaddr_storage from; if (options.use_login) return; /* * Get IP address of client. If the connection is not a socket, let * the address be 0.0.0.0. */ memset(&from, 0, sizeof(from)); fromlen = sizeof(from); if (packet_connection_is_on_socket()) { if (getpeername(packet_get_connection_in(), (struct sockaddr *)&from, &fromlen) < 0) { debug("getpeername: %.100s", strerror(errno)); cleanup_exit(255); } } /* Record that there was a login on that tty from the remote host. */ record_login(s->pid, s->tty, pw->pw_name, pw->pw_uid, get_remote_name_or_ip(utmp_len, options.use_dns), (struct sockaddr *)&from, fromlen); } static void mm_session_close(Session *s) { debug3("%s: session %d pid %ld", __func__, s->self, (long)s->pid); if (s->ttyfd != -1) { debug3("%s: tty %s ptyfd %d", __func__, s->tty, s->ptyfd); session_pty_cleanup2(s); } session_unused(s->self); } int mm_answer_pty(int sock, Buffer *m) { extern struct monitor *pmonitor; Session *s; int res, fd0; debug3("%s entering", __func__); buffer_clear(m); s = session_new(); if (s == NULL) goto error; s->authctxt = authctxt; s->pw = authctxt->pw; s->pid = pmonitor->m_pid; res = pty_allocate(&s->ptyfd, &s->ttyfd, s->tty, sizeof(s->tty)); if (res == 0) goto error; pty_setowner(authctxt->pw, s->tty); buffer_put_int(m, 1); buffer_put_cstring(m, s->tty); /* We need to trick ttyslot */ if (dup2(s->ttyfd, 0) == -1) fatal("%s: dup2", __func__); mm_record_login(s, authctxt->pw); /* Now we can close the file descriptor again */ close(0); /* send messages generated by record_login */ buffer_put_string(m, buffer_ptr(&loginmsg), buffer_len(&loginmsg)); buffer_clear(&loginmsg); mm_request_send(sock, MONITOR_ANS_PTY, m); if (mm_send_fd(sock, s->ptyfd) == -1 || mm_send_fd(sock, s->ttyfd) == -1) fatal("%s: send fds failed", __func__); /* make sure nothing uses fd 0 */ if ((fd0 = open(_PATH_DEVNULL, O_RDONLY)) < 0) fatal("%s: open(/dev/null): %s", __func__, strerror(errno)); if (fd0 != 0) error("%s: fd0 %d != 0", __func__, fd0); /* slave is not needed */ close(s->ttyfd); s->ttyfd = s->ptyfd; /* no need to dup() because nobody closes ptyfd */ s->ptymaster = s->ptyfd; debug3("%s: tty %s ptyfd %d", __func__, s->tty, s->ttyfd); return (0); error: if (s != NULL) mm_session_close(s); buffer_put_int(m, 0); mm_request_send(sock, MONITOR_ANS_PTY, m); return (0); } int mm_answer_pty_cleanup(int sock, Buffer *m) { Session *s; char *tty; debug3("%s entering", __func__); tty = buffer_get_string(m, NULL); if ((s = session_by_tty(tty)) != NULL) mm_session_close(s); buffer_clear(m); free(tty); return (0); } #ifdef WITH_SSH1 int mm_answer_sesskey(int sock, Buffer *m) { BIGNUM *p; int rsafail; /* Turn off permissions */ monitor_permit(mon_dispatch, MONITOR_REQ_SESSKEY, 0); if ((p = BN_new()) == NULL) fatal("%s: BN_new", __func__); buffer_get_bignum2(m, p); rsafail = ssh1_session_key(p); buffer_clear(m); buffer_put_int(m, rsafail); buffer_put_bignum2(m, p); BN_clear_free(p); mm_request_send(sock, MONITOR_ANS_SESSKEY, m); /* Turn on permissions for sessid passing */ monitor_permit(mon_dispatch, MONITOR_REQ_SESSID, 1); return (0); } int mm_answer_sessid(int sock, Buffer *m) { int i; debug3("%s entering", __func__); if (buffer_len(m) != 16) fatal("%s: bad ssh1 session id", __func__); for (i = 0; i < 16; i++) session_id[i] = buffer_get_char(m); /* Turn on permissions for getpwnam */ monitor_permit(mon_dispatch, MONITOR_REQ_PWNAM, 1); return (0); } int mm_answer_rsa_keyallowed(int sock, Buffer *m) { BIGNUM *client_n; Key *key = NULL; u_char *blob = NULL; u_int blen = 0; int allowed = 0; debug3("%s entering", __func__); auth_method = "rsa"; if (options.rsa_authentication && authctxt->valid) { if ((client_n = BN_new()) == NULL) fatal("%s: BN_new", __func__); buffer_get_bignum2(m, client_n); allowed = auth_rsa_key_allowed(authctxt->pw, client_n, &key); BN_clear_free(client_n); } buffer_clear(m); buffer_put_int(m, allowed); buffer_put_int(m, forced_command != NULL); /* clear temporarily storage (used by generate challenge) */ monitor_reset_key_state(); if (allowed && key != NULL) { key->type = KEY_RSA; /* cheat for key_to_blob */ if (key_to_blob(key, &blob, &blen) == 0) fatal("%s: key_to_blob failed", __func__); buffer_put_string(m, blob, blen); /* Save temporarily for comparison in verify */ key_blob = blob; key_bloblen = blen; key_blobtype = MM_RSAUSERKEY; } if (key != NULL) key_free(key); mm_request_send(sock, MONITOR_ANS_RSAKEYALLOWED, m); monitor_permit(mon_dispatch, MONITOR_REQ_RSACHALLENGE, allowed); monitor_permit(mon_dispatch, MONITOR_REQ_RSARESPONSE, 0); return (0); } int mm_answer_rsa_challenge(int sock, Buffer *m) { Key *key = NULL; u_char *blob; u_int blen; debug3("%s entering", __func__); if (!authctxt->valid) fatal("%s: authctxt not valid", __func__); blob = buffer_get_string(m, &blen); if (!monitor_allowed_key(blob, blen)) fatal("%s: bad key, not previously allowed", __func__); if (key_blobtype != MM_RSAUSERKEY && key_blobtype != MM_RSAHOSTKEY) fatal("%s: key type mismatch", __func__); if ((key = key_from_blob(blob, blen)) == NULL) fatal("%s: received bad key", __func__); if (key->type != KEY_RSA) fatal("%s: received bad key type %d", __func__, key->type); key->type = KEY_RSA1; if (ssh1_challenge) BN_clear_free(ssh1_challenge); ssh1_challenge = auth_rsa_generate_challenge(key); buffer_clear(m); buffer_put_bignum2(m, ssh1_challenge); debug3("%s sending reply", __func__); mm_request_send(sock, MONITOR_ANS_RSACHALLENGE, m); monitor_permit(mon_dispatch, MONITOR_REQ_RSARESPONSE, 1); free(blob); key_free(key); return (0); } int mm_answer_rsa_response(int sock, Buffer *m) { Key *key = NULL; u_char *blob, *response; u_int blen, len; int success; debug3("%s entering", __func__); if (!authctxt->valid) fatal("%s: authctxt not valid", __func__); if (ssh1_challenge == NULL) fatal("%s: no ssh1_challenge", __func__); blob = buffer_get_string(m, &blen); if (!monitor_allowed_key(blob, blen)) fatal("%s: bad key, not previously allowed", __func__); if (key_blobtype != MM_RSAUSERKEY && key_blobtype != MM_RSAHOSTKEY) fatal("%s: key type mismatch: %d", __func__, key_blobtype); if ((key = key_from_blob(blob, blen)) == NULL) fatal("%s: received bad key", __func__); response = buffer_get_string(m, &len); if (len != 16) fatal("%s: received bad response to challenge", __func__); success = auth_rsa_verify_response(key, ssh1_challenge, response); free(blob); key_free(key); free(response); auth_method = key_blobtype == MM_RSAUSERKEY ? "rsa" : "rhosts-rsa"; /* reset state */ BN_clear_free(ssh1_challenge); ssh1_challenge = NULL; monitor_reset_key_state(); buffer_clear(m); buffer_put_int(m, success); mm_request_send(sock, MONITOR_ANS_RSARESPONSE, m); return (success); } #endif int mm_answer_term(int sock, Buffer *req) { extern struct monitor *pmonitor; int res, status; debug3("%s: tearing down sessions", __func__); /* The child is terminating */ session_destroy_all(&mm_session_close); #ifdef USE_PAM if (options.use_pam) sshpam_cleanup(); #endif while (waitpid(pmonitor->m_pid, &status, 0) == -1) if (errno != EINTR) exit(1); res = WIFEXITED(status) ? WEXITSTATUS(status) : 1; /* Terminate process */ exit(res); } #ifdef SSH_AUDIT_EVENTS /* Report that an audit event occurred */ int mm_answer_audit_event(int socket, Buffer *m) { ssh_audit_event_t event; debug3("%s entering", __func__); event = buffer_get_int(m); switch(event) { case SSH_AUTH_FAIL_PUBKEY: case SSH_AUTH_FAIL_HOSTBASED: case SSH_AUTH_FAIL_GSSAPI: case SSH_LOGIN_EXCEED_MAXTRIES: case SSH_LOGIN_ROOT_DENIED: case SSH_CONNECTION_CLOSE: case SSH_INVALID_USER: audit_event(event); break; default: fatal("Audit event type %d not permitted", event); } return (0); } int mm_answer_audit_command(int socket, Buffer *m) { u_int len; char *cmd; debug3("%s entering", __func__); cmd = buffer_get_string(m, &len); /* sanity check command, if so how? */ audit_run_command(cmd); free(cmd); return (0); } #endif /* SSH_AUDIT_EVENTS */ void monitor_apply_keystate(struct monitor *pmonitor) { struct ssh *ssh = active_state; /* XXX */ struct kex *kex; int r; debug3("%s: packet_set_state", __func__); if ((r = ssh_packet_set_state(ssh, child_state)) != 0) fatal("%s: packet_set_state: %s", __func__, ssh_err(r)); sshbuf_free(child_state); child_state = NULL; if ((kex = ssh->kex) != 0) { /* XXX set callbacks */ #ifdef WITH_OPENSSL kex->kex[KEX_DH_GRP1_SHA1] = kexdh_server; kex->kex[KEX_DH_GRP14_SHA1] = kexdh_server; kex->kex[KEX_DH_GEX_SHA1] = kexgex_server; kex->kex[KEX_DH_GEX_SHA256] = kexgex_server; # ifdef OPENSSL_HAS_ECC kex->kex[KEX_ECDH_SHA2] = kexecdh_server; # endif #endif /* WITH_OPENSSL */ kex->kex[KEX_C25519_SHA256] = kexc25519_server; kex->load_host_public_key=&get_hostkey_public_by_type; kex->load_host_private_key=&get_hostkey_private_by_type; kex->host_key_index=&get_hostkey_index; kex->sign = sshd_hostkey_sign; } /* Update with new address */ if (options.compression) { ssh_packet_set_compress_hooks(ssh, pmonitor->m_zlib, (ssh_packet_comp_alloc_func *)mm_zalloc, (ssh_packet_comp_free_func *)mm_zfree); } } /* This function requries careful sanity checking */ void mm_get_keystate(struct monitor *pmonitor) { debug3("%s: Waiting for new keys", __func__); if ((child_state = sshbuf_new()) == NULL) fatal("%s: sshbuf_new failed", __func__); mm_request_receive_expect(pmonitor->m_sendfd, MONITOR_REQ_KEYEXPORT, child_state); debug3("%s: GOT new keys", __func__); } /* XXX */ #define FD_CLOSEONEXEC(x) do { \ if (fcntl(x, F_SETFD, FD_CLOEXEC) == -1) \ fatal("fcntl(%d, F_SETFD)", x); \ } while (0) static void monitor_openfds(struct monitor *mon, int do_logfds) { int pair[2]; if (socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) fatal("%s: socketpair: %s", __func__, strerror(errno)); FD_CLOSEONEXEC(pair[0]); FD_CLOSEONEXEC(pair[1]); mon->m_recvfd = pair[0]; mon->m_sendfd = pair[1]; if (do_logfds) { if (pipe(pair) == -1) fatal("%s: pipe: %s", __func__, strerror(errno)); FD_CLOSEONEXEC(pair[0]); FD_CLOSEONEXEC(pair[1]); mon->m_log_recvfd = pair[0]; mon->m_log_sendfd = pair[1]; } else mon->m_log_recvfd = mon->m_log_sendfd = -1; } #define MM_MEMSIZE 65536 struct monitor * monitor_init(void) { struct ssh *ssh = active_state; /* XXX */ struct monitor *mon; mon = xcalloc(1, sizeof(*mon)); monitor_openfds(mon, 1); /* Used to share zlib space across processes */ if (options.compression) { mon->m_zback = mm_create(NULL, MM_MEMSIZE); mon->m_zlib = mm_create(mon->m_zback, 20 * MM_MEMSIZE); /* Compression needs to share state across borders */ ssh_packet_set_compress_hooks(ssh, mon->m_zlib, (ssh_packet_comp_alloc_func *)mm_zalloc, (ssh_packet_comp_free_func *)mm_zfree); } return mon; } void monitor_reinit(struct monitor *mon) { monitor_openfds(mon, 0); } #ifdef GSSAPI int mm_answer_gss_setup_ctx(int sock, Buffer *m) { gss_OID_desc goid; OM_uint32 major; u_int len; goid.elements = buffer_get_string(m, &len); goid.length = len; major = ssh_gssapi_server_ctx(&gsscontext, &goid); free(goid.elements); buffer_clear(m); buffer_put_int(m, major); mm_request_send(sock, MONITOR_ANS_GSSSETUP, m); /* Now we have a context, enable the step */ monitor_permit(mon_dispatch, MONITOR_REQ_GSSSTEP, 1); return (0); } int mm_answer_gss_accept_ctx(int sock, Buffer *m) { gss_buffer_desc in; gss_buffer_desc out = GSS_C_EMPTY_BUFFER; OM_uint32 major, minor; OM_uint32 flags = 0; /* GSI needs this */ u_int len; in.value = buffer_get_string(m, &len); in.length = len; major = ssh_gssapi_accept_ctx(gsscontext, &in, &out, &flags); free(in.value); buffer_clear(m); buffer_put_int(m, major); buffer_put_string(m, out.value, out.length); buffer_put_int(m, flags); mm_request_send(sock, MONITOR_ANS_GSSSTEP, m); gss_release_buffer(&minor, &out); if (major == GSS_S_COMPLETE) { monitor_permit(mon_dispatch, MONITOR_REQ_GSSSTEP, 0); monitor_permit(mon_dispatch, MONITOR_REQ_GSSUSEROK, 1); monitor_permit(mon_dispatch, MONITOR_REQ_GSSCHECKMIC, 1); } return (0); } int mm_answer_gss_checkmic(int sock, Buffer *m) { gss_buffer_desc gssbuf, mic; OM_uint32 ret; u_int len; gssbuf.value = buffer_get_string(m, &len); gssbuf.length = len; mic.value = buffer_get_string(m, &len); mic.length = len; ret = ssh_gssapi_checkmic(gsscontext, &gssbuf, &mic); free(gssbuf.value); free(mic.value); buffer_clear(m); buffer_put_int(m, ret); mm_request_send(sock, MONITOR_ANS_GSSCHECKMIC, m); if (!GSS_ERROR(ret)) monitor_permit(mon_dispatch, MONITOR_REQ_GSSUSEROK, 1); return (0); } int mm_answer_gss_userok(int sock, Buffer *m) { int authenticated; authenticated = authctxt->valid && ssh_gssapi_userok(authctxt->user); buffer_clear(m); buffer_put_int(m, authenticated); debug3("%s: sending result %d", __func__, authenticated); mm_request_send(sock, MONITOR_ANS_GSSUSEROK, m); auth_method = "gssapi-with-mic"; /* Monitor loop will terminate if authenticated */ return (authenticated); } #endif /* GSSAPI */
./CrossVul/dataset_final_sorted/CWE-264/c/good_1719_0
crossvul-cpp_data_good_5861_32
/* * Glue Code for assembler optimized version of 3DES * * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * CTR part based on code (crypto/ctr.c) by: * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <asm/processor.h> #include <crypto/des.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <crypto/algapi.h> struct des3_ede_x86_ctx { u32 enc_expkey[DES3_EDE_EXPKEY_WORDS]; u32 dec_expkey[DES3_EDE_EXPKEY_WORDS]; }; /* regular block cipher functions */ asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst, const u8 *src); /* 3-way parallel cipher functions */ asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst, const u8 *src); static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *enc_ctx = ctx->enc_expkey; des3_ede_x86_64_crypt_blk(enc_ctx, dst, src); } static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *dec_ctx = ctx->dec_expkey; des3_ede_x86_64_crypt_blk(dec_ctx, dst, src); } static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *enc_ctx = ctx->enc_expkey; des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src); } static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *dec_ctx = ctx->dec_expkey; des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src); } static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src); } static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src); } static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, const u32 *expkey) { unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes; int err; err = blkcipher_walk_virt(desc, walk); while ((nbytes = walk->nbytes)) { u8 *wsrc = walk->src.virt.addr; u8 *wdst = walk->dst.virt.addr; /* Process four block batch */ if (nbytes >= bsize * 3) { do { des3_ede_x86_64_crypt_blk_3way(expkey, wdst, wsrc); wsrc += bsize * 3; wdst += bsize * 3; nbytes -= bsize * 3; } while (nbytes >= bsize * 3); if (nbytes < bsize) goto done; } /* Handle leftovers */ do { des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc); wsrc += bsize; wdst += bsize; nbytes -= bsize; } while (nbytes >= bsize); done: err = blkcipher_walk_done(desc, walk, nbytes); } return err; } static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_crypt(desc, &walk, ctx->enc_expkey); } static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_crypt(desc, &walk, ctx->dec_expkey); } static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 *iv = (u64 *)walk->iv; do { *dst = *src ^ *iv; des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst); iv = dst; src += 1; dst += 1; nbytes -= bsize; } while (nbytes >= bsize); *(u64 *)walk->iv = *iv; return nbytes; } static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { nbytes = __cbc_encrypt(desc, &walk); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 ivs[3 - 1]; u64 last_iv; /* Start of the last block. */ src += nbytes / bsize - 1; dst += nbytes / bsize - 1; last_iv = *src; /* Process four block batch */ if (nbytes >= bsize * 3) { do { nbytes -= bsize * 3 - bsize; src -= 3 - 1; dst -= 3 - 1; ivs[0] = src[0]; ivs[1] = src[1]; des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); dst[1] ^= ivs[0]; dst[2] ^= ivs[1]; nbytes -= bsize; if (nbytes < bsize) goto done; *dst ^= *(src - 1); src -= 1; dst -= 1; } while (nbytes >= bsize * 3); } /* Handle leftovers */ for (;;) { des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src); nbytes -= bsize; if (nbytes < bsize) break; *dst ^= *(src - 1); src -= 1; dst -= 1; } done: *dst ^= *(u64 *)walk->iv; *(u64 *)walk->iv = last_iv; return nbytes; } static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { nbytes = __cbc_decrypt(desc, &walk); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx, struct blkcipher_walk *walk) { u8 *ctrblk = walk->iv; u8 keystream[DES3_EDE_BLOCK_SIZE]; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; des3_ede_enc_blk(ctx, keystream, ctrblk); crypto_xor(keystream, src, nbytes); memcpy(dst, keystream, nbytes); crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE); } static unsigned int __ctr_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; __be64 *src = (__be64 *)walk->src.virt.addr; __be64 *dst = (__be64 *)walk->dst.virt.addr; u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); __be64 ctrblocks[3]; /* Process four block batch */ if (nbytes >= bsize * 3) { do { /* create ctrblks for parallel encrypt */ ctrblocks[0] = cpu_to_be64(ctrblk++); ctrblocks[1] = cpu_to_be64(ctrblk++); ctrblocks[2] = cpu_to_be64(ctrblk++); des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); dst[0] = src[0] ^ ctrblocks[0]; dst[1] = src[1] ^ ctrblocks[1]; dst[2] = src[2] ^ ctrblocks[2]; src += 3; dst += 3; } while ((nbytes -= bsize * 3) >= bsize * 3); if (nbytes < bsize) goto done; } /* Handle leftovers */ do { ctrblocks[0] = cpu_to_be64(ctrblk++); des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); dst[0] = src[0] ^ ctrblocks[0]; src += 1; dst += 1; } while ((nbytes -= bsize) >= bsize); done: *(__be64 *)walk->iv = cpu_to_be64(ctrblk); return nbytes; } static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE); while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) { nbytes = __ctr_crypt(desc, &walk); err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); err = blkcipher_walk_done(desc, &walk, 0); } return err; } static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm); u32 i, j, tmp; int err; /* Generate encryption context using generic implementation. */ err = __des3_ede_setkey(ctx->enc_expkey, &tfm->crt_flags, key, keylen); if (err < 0) return err; /* Fix encryption context for this implementation and form decryption * context. */ j = DES3_EDE_EXPKEY_WORDS - 2; for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) { tmp = ror32(ctx->enc_expkey[i + 1], 4); ctx->enc_expkey[i + 1] = tmp; ctx->dec_expkey[j + 0] = ctx->enc_expkey[i + 0]; ctx->dec_expkey[j + 1] = tmp; } return 0; } static struct crypto_alg des3_ede_algs[4] = { { .cra_name = "des3_ede", .cra_driver_name = "des3_ede-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = DES3_EDE_KEY_SIZE, .cia_max_keysize = DES3_EDE_KEY_SIZE, .cia_setkey = des3_ede_x86_setkey, .cia_encrypt = des3_ede_x86_encrypt, .cia_decrypt = des3_ede_x86_decrypt, } } }, { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3_ede-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = des3_ede_x86_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, }, }, { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-des3_ede-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = des3_ede_x86_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }, }, { .cra_name = "ctr(des3_ede)", .cra_driver_name = "ctr-des3_ede-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = des3_ede_x86_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, }, }, } }; static bool is_blacklisted_cpu(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return false; if (boot_cpu_data.x86 == 0x0f) { /* * On Pentium 4, des3_ede-x86_64 is slower than generic C * implementation because use of 64bit rotates (which are really * slow on P4). Therefore blacklist P4s. */ return true; } return false; } static int force; module_param(force, int, 0); MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist"); static int __init des3_ede_x86_init(void) { if (!force && is_blacklisted_cpu()) { pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n"); return -ENODEV; } return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs)); } static void __exit des3_ede_x86_fini(void) { crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs)); } module_init(des3_ede_x86_init); module_exit(des3_ede_x86_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); MODULE_ALIAS_CRYPTO("des3_ede"); MODULE_ALIAS_CRYPTO("des3_ede-asm"); MODULE_ALIAS_CRYPTO("des"); MODULE_ALIAS_CRYPTO("des-asm"); MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_32
crossvul-cpp_data_bad_5054_0
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/device.h> #include <linux/err.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/cdev.h> #include <linux/idr.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <rdma/ib_cm.h> #include <rdma/ib_user_cm.h> #include <rdma/ib_marshall.h> MODULE_AUTHOR("Libor Michalek"); MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access"); MODULE_LICENSE("Dual BSD/GPL"); struct ib_ucm_device { int devnum; struct cdev cdev; struct device dev; struct ib_device *ib_dev; }; struct ib_ucm_file { struct mutex file_mutex; struct file *filp; struct ib_ucm_device *device; struct list_head ctxs; struct list_head events; wait_queue_head_t poll_wait; }; struct ib_ucm_context { int id; struct completion comp; atomic_t ref; int events_reported; struct ib_ucm_file *file; struct ib_cm_id *cm_id; __u64 uid; struct list_head events; /* list of pending events. */ struct list_head file_list; /* member in file ctx list */ }; struct ib_ucm_event { struct ib_ucm_context *ctx; struct list_head file_list; /* member in file event list */ struct list_head ctx_list; /* member in ctx event list */ struct ib_cm_id *cm_id; struct ib_ucm_event_resp resp; void *data; void *info; int data_len; int info_len; }; enum { IB_UCM_MAJOR = 231, IB_UCM_BASE_MINOR = 224, IB_UCM_MAX_DEVICES = 32 }; #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR) static void ib_ucm_add_one(struct ib_device *device); static void ib_ucm_remove_one(struct ib_device *device, void *client_data); static struct ib_client ucm_client = { .name = "ucm", .add = ib_ucm_add_one, .remove = ib_ucm_remove_one }; static DEFINE_MUTEX(ctx_id_mutex); static DEFINE_IDR(ctx_id_table); static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES); static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) { struct ib_ucm_context *ctx; mutex_lock(&ctx_id_mutex); ctx = idr_find(&ctx_id_table, id); if (!ctx) ctx = ERR_PTR(-ENOENT); else if (ctx->file != file) ctx = ERR_PTR(-EINVAL); else atomic_inc(&ctx->ref); mutex_unlock(&ctx_id_mutex); return ctx; } static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) { if (atomic_dec_and_test(&ctx->ref)) complete(&ctx->comp); } static inline int ib_ucm_new_cm_id(int event) { return event == IB_CM_REQ_RECEIVED || event == IB_CM_SIDR_REQ_RECEIVED; } static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx) { struct ib_ucm_event *uevent; mutex_lock(&ctx->file->file_mutex); list_del(&ctx->file_list); while (!list_empty(&ctx->events)) { uevent = list_entry(ctx->events.next, struct ib_ucm_event, ctx_list); list_del(&uevent->file_list); list_del(&uevent->ctx_list); mutex_unlock(&ctx->file->file_mutex); /* clear incoming connections. */ if (ib_ucm_new_cm_id(uevent->resp.event)) ib_destroy_cm_id(uevent->cm_id); kfree(uevent); mutex_lock(&ctx->file->file_mutex); } mutex_unlock(&ctx->file->file_mutex); } static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) { struct ib_ucm_context *ctx; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) return NULL; atomic_set(&ctx->ref, 1); init_completion(&ctx->comp); ctx->file = file; INIT_LIST_HEAD(&ctx->events); mutex_lock(&ctx_id_mutex); ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL); mutex_unlock(&ctx_id_mutex); if (ctx->id < 0) goto error; list_add_tail(&ctx->file_list, &file->ctxs); return ctx; error: kfree(ctx); return NULL; } static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq, struct ib_cm_req_event_param *kreq) { ureq->remote_ca_guid = kreq->remote_ca_guid; ureq->remote_qkey = kreq->remote_qkey; ureq->remote_qpn = kreq->remote_qpn; ureq->qp_type = kreq->qp_type; ureq->starting_psn = kreq->starting_psn; ureq->responder_resources = kreq->responder_resources; ureq->initiator_depth = kreq->initiator_depth; ureq->local_cm_response_timeout = kreq->local_cm_response_timeout; ureq->flow_control = kreq->flow_control; ureq->remote_cm_response_timeout = kreq->remote_cm_response_timeout; ureq->retry_count = kreq->retry_count; ureq->rnr_retry_count = kreq->rnr_retry_count; ureq->srq = kreq->srq; ureq->port = kreq->port; ib_copy_path_rec_to_user(&ureq->primary_path, kreq->primary_path); if (kreq->alternate_path) ib_copy_path_rec_to_user(&ureq->alternate_path, kreq->alternate_path); } static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep, struct ib_cm_rep_event_param *krep) { urep->remote_ca_guid = krep->remote_ca_guid; urep->remote_qkey = krep->remote_qkey; urep->remote_qpn = krep->remote_qpn; urep->starting_psn = krep->starting_psn; urep->responder_resources = krep->responder_resources; urep->initiator_depth = krep->initiator_depth; urep->target_ack_delay = krep->target_ack_delay; urep->failover_accepted = krep->failover_accepted; urep->flow_control = krep->flow_control; urep->rnr_retry_count = krep->rnr_retry_count; urep->srq = krep->srq; } static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep, struct ib_cm_sidr_rep_event_param *krep) { urep->status = krep->status; urep->qkey = krep->qkey; urep->qpn = krep->qpn; }; static int ib_ucm_event_process(struct ib_cm_event *evt, struct ib_ucm_event *uvt) { void *info = NULL; switch (evt->event) { case IB_CM_REQ_RECEIVED: ib_ucm_event_req_get(&uvt->resp.u.req_resp, &evt->param.req_rcvd); uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE; uvt->resp.present = IB_UCM_PRES_PRIMARY; uvt->resp.present |= (evt->param.req_rcvd.alternate_path ? IB_UCM_PRES_ALTERNATE : 0); break; case IB_CM_REP_RECEIVED: ib_ucm_event_rep_get(&uvt->resp.u.rep_resp, &evt->param.rep_rcvd); uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE; break; case IB_CM_RTU_RECEIVED: uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE; uvt->resp.u.send_status = evt->param.send_status; break; case IB_CM_DREQ_RECEIVED: uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE; uvt->resp.u.send_status = evt->param.send_status; break; case IB_CM_DREP_RECEIVED: uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE; uvt->resp.u.send_status = evt->param.send_status; break; case IB_CM_MRA_RECEIVED: uvt->resp.u.mra_resp.timeout = evt->param.mra_rcvd.service_timeout; uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE; break; case IB_CM_REJ_RECEIVED: uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason; uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; uvt->info_len = evt->param.rej_rcvd.ari_length; info = evt->param.rej_rcvd.ari; break; case IB_CM_LAP_RECEIVED: ib_copy_path_rec_to_user(&uvt->resp.u.lap_resp.path, evt->param.lap_rcvd.alternate_path); uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE; uvt->resp.present = IB_UCM_PRES_ALTERNATE; break; case IB_CM_APR_RECEIVED: uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status; uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE; uvt->info_len = evt->param.apr_rcvd.info_len; info = evt->param.apr_rcvd.apr_info; break; case IB_CM_SIDR_REQ_RECEIVED: uvt->resp.u.sidr_req_resp.pkey = evt->param.sidr_req_rcvd.pkey; uvt->resp.u.sidr_req_resp.port = evt->param.sidr_req_rcvd.port; uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; break; case IB_CM_SIDR_REP_RECEIVED: ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp, &evt->param.sidr_rep_rcvd); uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; uvt->info_len = evt->param.sidr_rep_rcvd.info_len; info = evt->param.sidr_rep_rcvd.info; break; default: uvt->resp.u.send_status = evt->param.send_status; break; } if (uvt->data_len) { uvt->data = kmemdup(evt->private_data, uvt->data_len, GFP_KERNEL); if (!uvt->data) goto err1; uvt->resp.present |= IB_UCM_PRES_DATA; } if (uvt->info_len) { uvt->info = kmemdup(info, uvt->info_len, GFP_KERNEL); if (!uvt->info) goto err2; uvt->resp.present |= IB_UCM_PRES_INFO; } return 0; err2: kfree(uvt->data); err1: return -ENOMEM; } static int ib_ucm_event_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) { struct ib_ucm_event *uevent; struct ib_ucm_context *ctx; int result = 0; ctx = cm_id->context; uevent = kzalloc(sizeof *uevent, GFP_KERNEL); if (!uevent) goto err1; uevent->ctx = ctx; uevent->cm_id = cm_id; uevent->resp.uid = ctx->uid; uevent->resp.id = ctx->id; uevent->resp.event = event->event; result = ib_ucm_event_process(event, uevent); if (result) goto err2; mutex_lock(&ctx->file->file_mutex); list_add_tail(&uevent->file_list, &ctx->file->events); list_add_tail(&uevent->ctx_list, &ctx->events); wake_up_interruptible(&ctx->file->poll_wait); mutex_unlock(&ctx->file->file_mutex); return 0; err2: kfree(uevent); err1: /* Destroy new cm_id's */ return ib_ucm_new_cm_id(event->event); } static ssize_t ib_ucm_event(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_ucm_context *ctx; struct ib_ucm_event_get cmd; struct ib_ucm_event *uevent; int result = 0; if (out_len < sizeof(struct ib_ucm_event_resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&file->file_mutex); while (list_empty(&file->events)) { mutex_unlock(&file->file_mutex); if (file->filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(file->poll_wait, !list_empty(&file->events))) return -ERESTARTSYS; mutex_lock(&file->file_mutex); } uevent = list_entry(file->events.next, struct ib_ucm_event, file_list); if (ib_ucm_new_cm_id(uevent->resp.event)) { ctx = ib_ucm_ctx_alloc(file); if (!ctx) { result = -ENOMEM; goto done; } ctx->cm_id = uevent->cm_id; ctx->cm_id->context = ctx; uevent->resp.id = ctx->id; } if (copy_to_user((void __user *)(unsigned long)cmd.response, &uevent->resp, sizeof(uevent->resp))) { result = -EFAULT; goto done; } if (uevent->data) { if (cmd.data_len < uevent->data_len) { result = -ENOMEM; goto done; } if (copy_to_user((void __user *)(unsigned long)cmd.data, uevent->data, uevent->data_len)) { result = -EFAULT; goto done; } } if (uevent->info) { if (cmd.info_len < uevent->info_len) { result = -ENOMEM; goto done; } if (copy_to_user((void __user *)(unsigned long)cmd.info, uevent->info, uevent->info_len)) { result = -EFAULT; goto done; } } list_del(&uevent->file_list); list_del(&uevent->ctx_list); uevent->ctx->events_reported++; kfree(uevent->data); kfree(uevent->info); kfree(uevent); done: mutex_unlock(&file->file_mutex); return result; } static ssize_t ib_ucm_create_id(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_ucm_create_id cmd; struct ib_ucm_create_id_resp resp; struct ib_ucm_context *ctx; int result; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&file->file_mutex); ctx = ib_ucm_ctx_alloc(file); mutex_unlock(&file->file_mutex); if (!ctx) return -ENOMEM; ctx->uid = cmd.uid; ctx->cm_id = ib_create_cm_id(file->device->ib_dev, ib_ucm_event_handler, ctx); if (IS_ERR(ctx->cm_id)) { result = PTR_ERR(ctx->cm_id); goto err1; } resp.id = ctx->id; if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) { result = -EFAULT; goto err2; } return 0; err2: ib_destroy_cm_id(ctx->cm_id); err1: mutex_lock(&ctx_id_mutex); idr_remove(&ctx_id_table, ctx->id); mutex_unlock(&ctx_id_mutex); kfree(ctx); return result; } static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_ucm_destroy_id cmd; struct ib_ucm_destroy_id_resp resp; struct ib_ucm_context *ctx; int result = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&ctx_id_mutex); ctx = idr_find(&ctx_id_table, cmd.id); if (!ctx) ctx = ERR_PTR(-ENOENT); else if (ctx->file != file) ctx = ERR_PTR(-EINVAL); else idr_remove(&ctx_id_table, ctx->id); mutex_unlock(&ctx_id_mutex); if (IS_ERR(ctx)) return PTR_ERR(ctx); ib_ucm_ctx_put(ctx); wait_for_completion(&ctx->comp); /* No new events will be generated after destroying the cm_id. */ ib_destroy_cm_id(ctx->cm_id); /* Cleanup events not yet reported to the user. */ ib_ucm_cleanup_events(ctx); resp.events_reported = ctx->events_reported; if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) result = -EFAULT; kfree(ctx); return result; } static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_ucm_attr_id_resp resp; struct ib_ucm_attr_id cmd; struct ib_ucm_context *ctx; int result = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ib_ucm_ctx_get(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); resp.service_id = ctx->cm_id->service_id; resp.service_mask = ctx->cm_id->service_mask; resp.local_id = ctx->cm_id->local_id; resp.remote_id = ctx->cm_id->remote_id; if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) result = -EFAULT; ib_ucm_ctx_put(ctx); return result; } static ssize_t ib_ucm_init_qp_attr(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_uverbs_qp_attr resp; struct ib_ucm_init_qp_attr cmd; struct ib_ucm_context *ctx; struct ib_qp_attr qp_attr; int result = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ib_ucm_ctx_get(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; result = ib_cm_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); if (result) goto out; ib_copy_qp_attr_to_user(&resp, &qp_attr); if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) result = -EFAULT; out: ib_ucm_ctx_put(ctx); return result; } static int ucm_validate_listen(__be64 service_id, __be64 service_mask) { service_id &= service_mask; if (((service_id & IB_CMA_SERVICE_ID_MASK) == IB_CMA_SERVICE_ID) || ((service_id & IB_SDP_SERVICE_ID_MASK) == IB_SDP_SERVICE_ID)) return -EINVAL; return 0; } static ssize_t ib_ucm_listen(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_ucm_listen cmd; struct ib_ucm_context *ctx; int result; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ib_ucm_ctx_get(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); result = ucm_validate_listen(cmd.service_id, cmd.service_mask); if (result) goto out; result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask); out: ib_ucm_ctx_put(ctx); return result; } static ssize_t ib_ucm_notify(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_ucm_notify cmd; struct ib_ucm_context *ctx; int result; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ib_ucm_ctx_get(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event); ib_ucm_ctx_put(ctx); return result; } static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len) { void *data; *dest = NULL; if (!len) return 0; data = memdup_user((void __user *)(unsigned long)src, len); if (IS_ERR(data)) return PTR_ERR(data); *dest = data; return 0; } static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src) { struct ib_user_path_rec upath; struct ib_sa_path_rec *sa_path; *path = NULL; if (!src) return 0; sa_path = kmalloc(sizeof(*sa_path), GFP_KERNEL); if (!sa_path) return -ENOMEM; if (copy_from_user(&upath, (void __user *)(unsigned long)src, sizeof(upath))) { kfree(sa_path); return -EFAULT; } ib_copy_path_rec_from_user(sa_path, &upath); *path = sa_path; return 0; } static ssize_t ib_ucm_send_req(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_cm_req_param param; struct ib_ucm_context *ctx; struct ib_ucm_req cmd; int result; param.private_data = NULL; param.primary_path = NULL; param.alternate_path = NULL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len); if (result) goto done; result = ib_ucm_path_get(&param.primary_path, cmd.primary_path); if (result) goto done; result = ib_ucm_path_get(&param.alternate_path, cmd.alternate_path); if (result) goto done; param.private_data_len = cmd.len; param.service_id = cmd.sid; param.qp_num = cmd.qpn; param.qp_type = cmd.qp_type; param.starting_psn = cmd.psn; param.peer_to_peer = cmd.peer_to_peer; param.responder_resources = cmd.responder_resources; param.initiator_depth = cmd.initiator_depth; param.remote_cm_response_timeout = cmd.remote_cm_response_timeout; param.flow_control = cmd.flow_control; param.local_cm_response_timeout = cmd.local_cm_response_timeout; param.retry_count = cmd.retry_count; param.rnr_retry_count = cmd.rnr_retry_count; param.max_cm_retries = cmd.max_cm_retries; param.srq = cmd.srq; ctx = ib_ucm_ctx_get(file, cmd.id); if (!IS_ERR(ctx)) { result = ib_send_cm_req(ctx->cm_id, &param); ib_ucm_ctx_put(ctx); } else result = PTR_ERR(ctx); done: kfree(param.private_data); kfree(param.primary_path); kfree(param.alternate_path); return result; } static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_cm_rep_param param; struct ib_ucm_context *ctx; struct ib_ucm_rep cmd; int result; param.private_data = NULL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len); if (result) return result; param.qp_num = cmd.qpn; param.starting_psn = cmd.psn; param.private_data_len = cmd.len; param.responder_resources = cmd.responder_resources; param.initiator_depth = cmd.initiator_depth; param.failover_accepted = cmd.failover_accepted; param.flow_control = cmd.flow_control; param.rnr_retry_count = cmd.rnr_retry_count; param.srq = cmd.srq; ctx = ib_ucm_ctx_get(file, cmd.id); if (!IS_ERR(ctx)) { ctx->uid = cmd.uid; result = ib_send_cm_rep(ctx->cm_id, &param); ib_ucm_ctx_put(ctx); } else result = PTR_ERR(ctx); kfree(param.private_data); return result; } static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int (*func)(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len)) { struct ib_ucm_private_data cmd; struct ib_ucm_context *ctx; const void *private_data = NULL; int result; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; result = ib_ucm_alloc_data(&private_data, cmd.data, cmd.len); if (result) return result; ctx = ib_ucm_ctx_get(file, cmd.id); if (!IS_ERR(ctx)) { result = func(ctx->cm_id, private_data, cmd.len); ib_ucm_ctx_put(ctx); } else result = PTR_ERR(ctx); kfree(private_data); return result; } static ssize_t ib_ucm_send_rtu(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_rtu); } static ssize_t ib_ucm_send_dreq(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_dreq); } static ssize_t ib_ucm_send_drep(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_drep); } static ssize_t ib_ucm_send_info(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int (*func)(struct ib_cm_id *cm_id, int status, const void *info, u8 info_len, const void *data, u8 data_len)) { struct ib_ucm_context *ctx; struct ib_ucm_info cmd; const void *data = NULL; const void *info = NULL; int result; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; result = ib_ucm_alloc_data(&data, cmd.data, cmd.data_len); if (result) goto done; result = ib_ucm_alloc_data(&info, cmd.info, cmd.info_len); if (result) goto done; ctx = ib_ucm_ctx_get(file, cmd.id); if (!IS_ERR(ctx)) { result = func(ctx->cm_id, cmd.status, info, cmd.info_len, data, cmd.data_len); ib_ucm_ctx_put(ctx); } else result = PTR_ERR(ctx); done: kfree(data); kfree(info); return result; } static ssize_t ib_ucm_send_rej(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_rej); } static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr); } static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_ucm_context *ctx; struct ib_ucm_mra cmd; const void *data = NULL; int result; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; result = ib_ucm_alloc_data(&data, cmd.data, cmd.len); if (result) return result; ctx = ib_ucm_ctx_get(file, cmd.id); if (!IS_ERR(ctx)) { result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len); ib_ucm_ctx_put(ctx); } else result = PTR_ERR(ctx); kfree(data); return result; } static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_ucm_context *ctx; struct ib_sa_path_rec *path = NULL; struct ib_ucm_lap cmd; const void *data = NULL; int result; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; result = ib_ucm_alloc_data(&data, cmd.data, cmd.len); if (result) goto done; result = ib_ucm_path_get(&path, cmd.path); if (result) goto done; ctx = ib_ucm_ctx_get(file, cmd.id); if (!IS_ERR(ctx)) { result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len); ib_ucm_ctx_put(ctx); } else result = PTR_ERR(ctx); done: kfree(data); kfree(path); return result; } static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_cm_sidr_req_param param; struct ib_ucm_context *ctx; struct ib_ucm_sidr_req cmd; int result; param.private_data = NULL; param.path = NULL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len); if (result) goto done; result = ib_ucm_path_get(&param.path, cmd.path); if (result) goto done; param.private_data_len = cmd.len; param.service_id = cmd.sid; param.timeout_ms = cmd.timeout; param.max_cm_retries = cmd.max_cm_retries; ctx = ib_ucm_ctx_get(file, cmd.id); if (!IS_ERR(ctx)) { result = ib_send_cm_sidr_req(ctx->cm_id, &param); ib_ucm_ctx_put(ctx); } else result = PTR_ERR(ctx); done: kfree(param.private_data); kfree(param.path); return result; } static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) { struct ib_cm_sidr_rep_param param; struct ib_ucm_sidr_rep cmd; struct ib_ucm_context *ctx; int result; param.info = NULL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.data_len); if (result) goto done; result = ib_ucm_alloc_data(&param.info, cmd.info, cmd.info_len); if (result) goto done; param.qp_num = cmd.qpn; param.qkey = cmd.qkey; param.status = cmd.status; param.info_length = cmd.info_len; param.private_data_len = cmd.data_len; ctx = ib_ucm_ctx_get(file, cmd.id); if (!IS_ERR(ctx)) { result = ib_send_cm_sidr_rep(ctx->cm_id, &param); ib_ucm_ctx_put(ctx); } else result = PTR_ERR(ctx); done: kfree(param.private_data); kfree(param.info); return result; } static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int out_len) = { [IB_USER_CM_CMD_CREATE_ID] = ib_ucm_create_id, [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id, [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id, [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen, [IB_USER_CM_CMD_NOTIFY] = ib_ucm_notify, [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req, [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep, [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu, [IB_USER_CM_CMD_SEND_DREQ] = ib_ucm_send_dreq, [IB_USER_CM_CMD_SEND_DREP] = ib_ucm_send_drep, [IB_USER_CM_CMD_SEND_REJ] = ib_ucm_send_rej, [IB_USER_CM_CMD_SEND_MRA] = ib_ucm_send_mra, [IB_USER_CM_CMD_SEND_LAP] = ib_ucm_send_lap, [IB_USER_CM_CMD_SEND_APR] = ib_ucm_send_apr, [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req, [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep, [IB_USER_CM_CMD_EVENT] = ib_ucm_event, [IB_USER_CM_CMD_INIT_QP_ATTR] = ib_ucm_init_qp_attr, }; static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) { struct ib_ucm_file *file = filp->private_data; struct ib_ucm_cmd_hdr hdr; ssize_t result; if (len < sizeof(hdr)) return -EINVAL; if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) return -EINVAL; if (hdr.in + sizeof(hdr) > len) return -EINVAL; result = ucm_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); if (!result) result = len; return result; } static unsigned int ib_ucm_poll(struct file *filp, struct poll_table_struct *wait) { struct ib_ucm_file *file = filp->private_data; unsigned int mask = 0; poll_wait(filp, &file->poll_wait, wait); if (!list_empty(&file->events)) mask = POLLIN | POLLRDNORM; return mask; } /* * ib_ucm_open() does not need the BKL: * * - no global state is referred to; * - there is no ioctl method to race against; * - no further module initialization is required for open to work * after the device is registered. */ static int ib_ucm_open(struct inode *inode, struct file *filp) { struct ib_ucm_file *file; file = kmalloc(sizeof(*file), GFP_KERNEL); if (!file) return -ENOMEM; INIT_LIST_HEAD(&file->events); INIT_LIST_HEAD(&file->ctxs); init_waitqueue_head(&file->poll_wait); mutex_init(&file->file_mutex); filp->private_data = file; file->filp = filp; file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev); return nonseekable_open(inode, filp); } static int ib_ucm_close(struct inode *inode, struct file *filp) { struct ib_ucm_file *file = filp->private_data; struct ib_ucm_context *ctx; mutex_lock(&file->file_mutex); while (!list_empty(&file->ctxs)) { ctx = list_entry(file->ctxs.next, struct ib_ucm_context, file_list); mutex_unlock(&file->file_mutex); mutex_lock(&ctx_id_mutex); idr_remove(&ctx_id_table, ctx->id); mutex_unlock(&ctx_id_mutex); ib_destroy_cm_id(ctx->cm_id); ib_ucm_cleanup_events(ctx); kfree(ctx); mutex_lock(&file->file_mutex); } mutex_unlock(&file->file_mutex); kfree(file); return 0; } static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES); static void ib_ucm_release_dev(struct device *dev) { struct ib_ucm_device *ucm_dev; ucm_dev = container_of(dev, struct ib_ucm_device, dev); cdev_del(&ucm_dev->cdev); if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) clear_bit(ucm_dev->devnum, dev_map); else clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map); kfree(ucm_dev); } static const struct file_operations ucm_fops = { .owner = THIS_MODULE, .open = ib_ucm_open, .release = ib_ucm_close, .write = ib_ucm_write, .poll = ib_ucm_poll, .llseek = no_llseek, }; static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, char *buf) { struct ib_ucm_device *ucm_dev; ucm_dev = container_of(dev, struct ib_ucm_device, dev); return sprintf(buf, "%s\n", ucm_dev->ib_dev->name); } static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); static dev_t overflow_maj; static int find_overflow_devnum(void) { int ret; if (!overflow_maj) { ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES, "infiniband_cm"); if (ret) { pr_err("ucm: couldn't register dynamic device number\n"); return ret; } } ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES); if (ret >= IB_UCM_MAX_DEVICES) return -1; return ret; } static void ib_ucm_add_one(struct ib_device *device) { int devnum; dev_t base; struct ib_ucm_device *ucm_dev; if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1)) return; ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); if (!ucm_dev) return; ucm_dev->ib_dev = device; devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES); if (devnum >= IB_UCM_MAX_DEVICES) { devnum = find_overflow_devnum(); if (devnum < 0) goto err; ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES; base = devnum + overflow_maj; set_bit(devnum, overflow_map); } else { ucm_dev->devnum = devnum; base = devnum + IB_UCM_BASE_DEV; set_bit(devnum, dev_map); } cdev_init(&ucm_dev->cdev, &ucm_fops); ucm_dev->cdev.owner = THIS_MODULE; kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum); if (cdev_add(&ucm_dev->cdev, base, 1)) goto err; ucm_dev->dev.class = &cm_class; ucm_dev->dev.parent = device->dma_device; ucm_dev->dev.devt = ucm_dev->cdev.dev; ucm_dev->dev.release = ib_ucm_release_dev; dev_set_name(&ucm_dev->dev, "ucm%d", ucm_dev->devnum); if (device_register(&ucm_dev->dev)) goto err_cdev; if (device_create_file(&ucm_dev->dev, &dev_attr_ibdev)) goto err_dev; ib_set_client_data(device, &ucm_client, ucm_dev); return; err_dev: device_unregister(&ucm_dev->dev); err_cdev: cdev_del(&ucm_dev->cdev); if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) clear_bit(devnum, dev_map); else clear_bit(devnum, overflow_map); err: kfree(ucm_dev); return; } static void ib_ucm_remove_one(struct ib_device *device, void *client_data) { struct ib_ucm_device *ucm_dev = client_data; if (!ucm_dev) return; device_unregister(&ucm_dev->dev); } static CLASS_ATTR_STRING(abi_version, S_IRUGO, __stringify(IB_USER_CM_ABI_VERSION)); static int __init ib_ucm_init(void) { int ret; ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES, "infiniband_cm"); if (ret) { pr_err("ucm: couldn't register device number\n"); goto error1; } ret = class_create_file(&cm_class, &class_attr_abi_version.attr); if (ret) { pr_err("ucm: couldn't create abi_version attribute\n"); goto error2; } ret = ib_register_client(&ucm_client); if (ret) { pr_err("ucm: couldn't register client\n"); goto error3; } return 0; error3: class_remove_file(&cm_class, &class_attr_abi_version.attr); error2: unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); error1: return ret; } static void __exit ib_ucm_cleanup(void) { ib_unregister_client(&ucm_client); class_remove_file(&cm_class, &class_attr_abi_version.attr); unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); if (overflow_maj) unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES); idr_destroy(&ctx_id_table); } module_init(ib_ucm_init); module_exit(ib_ucm_cleanup);
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5054_0
crossvul-cpp_data_good_5861_22
/* * Support for Intel AES-NI instructions. This file contains glue * code, the real AES implementation is in intel-aes_asm.S. * * Copyright (C) 2008, Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD * interface for 64-bit kernels. * Authors: Adrian Hoban <adrian.hoban@intel.com> * Gabriele Paoloni <gabriele.paoloni@intel.com> * Tadeusz Struk (tadeusz.struk@intel.com) * Aidan O'Mahony (aidan.o.mahony@intel.com) * Copyright (c) 2010, Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/hardirq.h> #include <linux/types.h> #include <linux/crypto.h> #include <linux/module.h> #include <linux/err.h> #include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/cryptd.h> #include <crypto/ctr.h> #include <crypto/b128ops.h> #include <crypto/lrw.h> #include <crypto/xts.h> #include <asm/cpu_device_id.h> #include <asm/i387.h> #include <asm/crypto/aes.h> #include <crypto/ablk_helper.h> #include <crypto/scatterwalk.h> #include <crypto/internal/aead.h> #include <linux/workqueue.h> #include <linux/spinlock.h> #ifdef CONFIG_X86_64 #include <asm/crypto/glue_helper.h> #endif /* This data is stored at the end of the crypto_tfm struct. * It's a type of per "session" data storage location. * This needs to be 16 byte aligned. */ struct aesni_rfc4106_gcm_ctx { u8 hash_subkey[16]; struct crypto_aes_ctx aes_key_expanded; u8 nonce[4]; struct cryptd_aead *cryptd_tfm; }; struct aesni_gcm_set_hash_subkey_result { int err; struct completion completion; }; struct aesni_hash_subkey_req_data { u8 iv[16]; struct aesni_gcm_set_hash_subkey_result result; struct scatterlist sg; }; #define AESNI_ALIGN (16) #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) #define RFC4106_HASH_SUBKEY_SIZE 16 struct aesni_lrw_ctx { struct lrw_table_ctx lrw_table; u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; }; struct aesni_xts_ctx { u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; }; asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len); asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len); asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len); asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); int crypto_fpu_init(void); void crypto_fpu_exit(void); #define AVX_GEN2_OPTSIZE 640 #define AVX_GEN4_OPTSIZE 4096 #ifdef CONFIG_X86_64 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, bool enc, u8 *iv); /* asmlinkage void aesni_gcm_enc() * void *ctx, AES Key schedule. Starts on a 16 byte boundary. * u8 *out, Ciphertext output. Encrypt in-place is allowed. * const u8 *in, Plaintext input * unsigned long plaintext_len, Length of data in bytes for encryption. * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) * concatenated with 8 byte Initialisation Vector (from IPSec ESP * Payload) concatenated with 0x00000001. 16-byte aligned pointer. * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. * const u8 *aad, Additional Authentication Data (AAD) * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this * is going to be 8 or 12 bytes * u8 *auth_tag, Authenticated Tag output. * unsigned long auth_tag_len), Authenticated Tag Length in bytes. * Valid values are 16 (most likely), 12 or 8. */ asmlinkage void aesni_gcm_enc(void *ctx, u8 *out, const u8 *in, unsigned long plaintext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); /* asmlinkage void aesni_gcm_dec() * void *ctx, AES Key schedule. Starts on a 16 byte boundary. * u8 *out, Plaintext output. Decrypt in-place is allowed. * const u8 *in, Ciphertext input * unsigned long ciphertext_len, Length of data in bytes for decryption. * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) * concatenated with 8 byte Initialisation Vector (from IPSec ESP * Payload) concatenated with 0x00000001. 16-byte aligned pointer. * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. * const u8 *aad, Additional Authentication Data (AAD) * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going * to be 8 or 12 bytes * u8 *auth_tag, Authenticated Tag output. * unsigned long auth_tag_len) Authenticated Tag Length in bytes. * Valid values are 16 (most likely), 12 or 8. */ asmlinkage void aesni_gcm_dec(void *ctx, u8 *out, const u8 *in, unsigned long ciphertext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); #ifdef CONFIG_AS_AVX asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); /* * asmlinkage void aesni_gcm_precomp_avx_gen2() * gcm_data *my_ctx_data, context data * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. */ asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey); asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out, const u8 *in, unsigned long plaintext_len, u8 *iv, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out, const u8 *in, unsigned long ciphertext_len, u8 *iv, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); static void aesni_gcm_enc_avx(void *ctx, u8 *out, const u8 *in, unsigned long plaintext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len) { if (plaintext_len < AVX_GEN2_OPTSIZE) { aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, aad_len, auth_tag, auth_tag_len); } else { aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, aad_len, auth_tag, auth_tag_len); } } static void aesni_gcm_dec_avx(void *ctx, u8 *out, const u8 *in, unsigned long ciphertext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len) { if (ciphertext_len < AVX_GEN2_OPTSIZE) { aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad, aad_len, auth_tag, auth_tag_len); } else { aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, aad_len, auth_tag, auth_tag_len); } } #endif #ifdef CONFIG_AS_AVX2 /* * asmlinkage void aesni_gcm_precomp_avx_gen4() * gcm_data *my_ctx_data, context data * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. */ asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey); asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out, const u8 *in, unsigned long plaintext_len, u8 *iv, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out, const u8 *in, unsigned long ciphertext_len, u8 *iv, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); static void aesni_gcm_enc_avx2(void *ctx, u8 *out, const u8 *in, unsigned long plaintext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len) { if (plaintext_len < AVX_GEN2_OPTSIZE) { aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, aad_len, auth_tag, auth_tag_len); } else if (plaintext_len < AVX_GEN4_OPTSIZE) { aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, aad_len, auth_tag, auth_tag_len); } else { aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad, aad_len, auth_tag, auth_tag_len); } } static void aesni_gcm_dec_avx2(void *ctx, u8 *out, const u8 *in, unsigned long ciphertext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len) { if (ciphertext_len < AVX_GEN2_OPTSIZE) { aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad, aad_len, auth_tag, auth_tag_len); } else if (ciphertext_len < AVX_GEN4_OPTSIZE) { aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, aad_len, auth_tag, auth_tag_len); } else { aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad, aad_len, auth_tag, auth_tag_len); } } #endif static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out, const u8 *in, unsigned long plaintext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out, const u8 *in, unsigned long ciphertext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) { return (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *) crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN); } #endif static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) { unsigned long addr = (unsigned long)raw_ctx; unsigned long align = AESNI_ALIGN; if (align <= crypto_tfm_ctx_alignment()) align = 1; return (struct crypto_aes_ctx *)ALIGN(addr, align); } static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); u32 *flags = &tfm->crt_flags; int err; if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_256) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } if (!irq_fpu_usable()) err = crypto_aes_expand_key(ctx, in_key, key_len); else { kernel_fpu_begin(); err = aesni_set_key(ctx, in_key, key_len); kernel_fpu_end(); } return err; } static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); } static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); if (!irq_fpu_usable()) crypto_aes_encrypt_x86(ctx, dst, src); else { kernel_fpu_begin(); aesni_enc(ctx, dst, src); kernel_fpu_end(); } } static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); if (!irq_fpu_usable()) crypto_aes_decrypt_x86(ctx, dst, src); else { kernel_fpu_begin(); aesni_dec(ctx, dst, src); kernel_fpu_end(); } } static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); aesni_enc(ctx, dst, src); } static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); aesni_dec(ctx, dst, src); } static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } kernel_fpu_end(); return err; } static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } kernel_fpu_end(); return err; } static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } kernel_fpu_end(); return err; } static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } kernel_fpu_end(); return err; } #ifdef CONFIG_X86_64 static void ctr_crypt_final(struct crypto_aes_ctx *ctx, struct blkcipher_walk *walk) { u8 *ctrblk = walk->iv; u8 keystream[AES_BLOCK_SIZE]; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; aesni_enc(ctx, keystream, ctrblk); crypto_xor(keystream, src, nbytes); memcpy(dst, keystream, nbytes); crypto_inc(ctrblk, AES_BLOCK_SIZE); } #ifdef CONFIG_AS_AVX static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) { /* * based on key length, override with the by8 version * of ctr mode encryption/decryption for improved performance * aes_set_key_common() ensures that key length is one of * {128,192,256} */ if (ctx->key_length == AES_KEYSIZE_128) aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len); else if (ctx->key_length == AES_KEYSIZE_192) aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len); else aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); } #endif static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { ctr_crypt_final(ctx, &walk); err = blkcipher_walk_done(desc, &walk, 0); } kernel_fpu_end(); return err; } #endif static int ablk_ecb_init(struct crypto_tfm *tfm) { return ablk_init_common(tfm, "__driver-ecb-aes-aesni"); } static int ablk_cbc_init(struct crypto_tfm *tfm) { return ablk_init_common(tfm, "__driver-cbc-aes-aesni"); } #ifdef CONFIG_X86_64 static int ablk_ctr_init(struct crypto_tfm *tfm) { return ablk_init_common(tfm, "__driver-ctr-aes-aesni"); } #endif #if IS_ENABLED(CONFIG_CRYPTO_PCBC) static int ablk_pcbc_init(struct crypto_tfm *tfm) { return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))"); } #endif static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes) { aesni_ecb_enc(ctx, blks, blks, nbytes); } static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes) { aesni_ecb_dec(ctx, blks, blks, nbytes); } static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm); int err; err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key, keylen - AES_BLOCK_SIZE); if (err) return err; return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE); } static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm) { struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_free_table(&ctx->lrw_table); } static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = lrw_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = lrw_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; int err; /* key consists of keys of equal size concatenated, therefore * the length must be even */ if (keylen % 2) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } /* first half of xts-key is for crypt */ err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2); if (err) return err; /* second half of xts-key is for tweak */ return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2, keylen / 2); } static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) { aesni_enc(ctx, out, in); } #ifdef CONFIG_X86_64 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) { glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); } static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) { glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); } static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) { aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); } static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) { aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); } static const struct common_glue_ctx aesni_enc_xts = { .num_funcs = 2, .fpu_blocks_limit = 1, .funcs = { { .num_blocks = 8, .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) } }, { .num_blocks = 1, .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) } } } }; static const struct common_glue_ctx aesni_dec_xts = { .num_funcs = 2, .fpu_blocks_limit = 1, .funcs = { { .num_blocks = 8, .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) } }, { .num_blocks = 1, .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) } } } }; static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes, XTS_TWEAK_CAST(aesni_xts_tweak), aes_ctx(ctx->raw_tweak_ctx), aes_ctx(ctx->raw_crypt_ctx)); } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes, XTS_TWEAK_CAST(aesni_xts_tweak), aes_ctx(ctx->raw_tweak_ctx), aes_ctx(ctx->raw_crypt_ctx)); } #else static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } #endif #ifdef CONFIG_X86_64 static int rfc4106_init(struct crypto_tfm *tfm) { struct cryptd_aead *cryptd_tfm; struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); struct crypto_aead *cryptd_child; struct aesni_rfc4106_gcm_ctx *child_ctx; cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); cryptd_child = cryptd_aead_child(cryptd_tfm); child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); memcpy(child_ctx, ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; tfm->crt_aead.reqsize = sizeof(struct aead_request) + crypto_aead_reqsize(&cryptd_tfm->base); return 0; } static void rfc4106_exit(struct crypto_tfm *tfm) { struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); if (!IS_ERR(ctx->cryptd_tfm)) cryptd_free_aead(ctx->cryptd_tfm); return; } static void rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) { struct aesni_gcm_set_hash_subkey_result *result = req->data; if (err == -EINPROGRESS) return; result->err = err; complete(&result->completion); } static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_ablkcipher *ctr_tfm; struct ablkcipher_request *req; int ret = -EINVAL; struct aesni_hash_subkey_req_data *req_data; ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); if (IS_ERR(ctr_tfm)) return PTR_ERR(ctr_tfm); crypto_ablkcipher_clear_flags(ctr_tfm, ~0); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); if (ret) goto out_free_ablkcipher; ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); if (!req) goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) goto out_free_request; memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); init_completion(&req_data->result.completion); sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); ablkcipher_request_set_tfm(req, ctr_tfm); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, rfc4106_set_hash_subkey_done, &req_data->result); ablkcipher_request_set_crypt(req, &req_data->sg, &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); ret = crypto_ablkcipher_encrypt(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible (&req_data->result.completion); if (!ret) ret = req_data->result.err; } kfree(req_data); out_free_request: ablkcipher_request_free(req); out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; } static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(parent); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); struct aesni_rfc4106_gcm_ctx *child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); u8 *new_key_align, *new_key_mem = NULL; if (key_len < 4) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; if (key_len != AES_KEYSIZE_128) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) return -EINVAL; if ((unsigned long)key % AESNI_ALIGN) { /*key is not aligned: use an auxuliar aligned pointer*/ new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); if (!new_key_mem) return -ENOMEM; new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); memcpy(new_key_align, key, key_len); key = new_key_align; } if (!irq_fpu_usable()) ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), key, key_len); else { kernel_fpu_begin(); ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); kernel_fpu_end(); } /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { ret = -EINVAL; goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); memcpy(child_ctx, ctx, sizeof(*ctx)); exit: kfree(new_key_mem); return ret; } /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } crypto_aead_crt(parent)->authsize = authsize; crypto_aead_crt(cryptd_child)->authsize = authsize; return 0; } static int rfc4106_encrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_encrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.encrypt(req); kernel_fpu_end(); return ret; } } static int rfc4106_decrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_decrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.decrypt(req); kernel_fpu_end(); return ret; } } static int __driver_rfc4106_encrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; u8 *src, *dst, *assoc; __be32 counter = cpu_to_be32(1); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); unsigned long auth_tag_len = crypto_aead_authsize(tfm); u8 iv_tab[16+AESNI_ALIGN]; u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN); struct scatter_walk src_sg_walk; struct scatter_walk assoc_sg_walk; struct scatter_walk dst_sg_walk; unsigned int i; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length equal */ /* to 8 or 12 bytes */ if (unlikely(req->assoclen != 8 && req->assoclen != 12)) return -EINVAL; /* IV below built */ for (i = 0; i < 4; i++) *(iv+i) = ctx->nonce[i]; for (i = 0; i < 8; i++) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { one_entry_in_sg = 1; scatterwalk_start(&src_sg_walk, req->src); scatterwalk_start(&assoc_sg_walk, req->assoc); src = scatterwalk_map(&src_sg_walk); assoc = scatterwalk_map(&assoc_sg_walk); dst = src; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk); } } else { /* Allocate memory for src, dst, assoc */ src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, GFP_ATOMIC); if (unlikely(!src)) return -ENOMEM; assoc = (src + req->cryptlen + auth_tag_len); scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); scatterwalk_map_and_copy(assoc, req->assoc, 0, req->assoclen, 0); dst = src; } aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst + ((unsigned long)req->cryptlen), auth_tag_len); /* The authTag (aka the Integrity Check Value) needs to be written * back to the packet. */ if (one_entry_in_sg) { if (unlikely(req->src != req->dst)) { scatterwalk_unmap(dst); scatterwalk_done(&dst_sg_walk, 0, 0); } scatterwalk_unmap(src); scatterwalk_unmap(assoc); scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0); } else { scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen + auth_tag_len, 1); kfree(src); } return 0; } static int __driver_rfc4106_decrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; u8 *src, *dst, *assoc; unsigned long tempCipherLen = 0; __be32 counter = cpu_to_be32(1); int retval = 0; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); unsigned long auth_tag_len = crypto_aead_authsize(tfm); u8 iv_and_authTag[32+AESNI_ALIGN]; u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN); u8 *authTag = iv + 16; struct scatter_walk src_sg_walk; struct scatter_walk assoc_sg_walk; struct scatter_walk dst_sg_walk; unsigned int i; if (unlikely((req->cryptlen < auth_tag_len) || (req->assoclen != 8 && req->assoclen != 12))) return -EINVAL; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length */ /* equal to 8 or 12 bytes */ tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); /* IV below built */ for (i = 0; i < 4; i++) *(iv+i) = ctx->nonce[i]; for (i = 0; i < 8; i++) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { one_entry_in_sg = 1; scatterwalk_start(&src_sg_walk, req->src); scatterwalk_start(&assoc_sg_walk, req->assoc); src = scatterwalk_map(&src_sg_walk); assoc = scatterwalk_map(&assoc_sg_walk); dst = src; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk); } } else { /* Allocate memory for src, dst, assoc */ src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); if (!src) return -ENOMEM; assoc = (src + req->cryptlen + auth_tag_len); scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); scatterwalk_map_and_copy(assoc, req->assoc, 0, req->assoclen, 0); dst = src; } aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, ctx->hash_subkey, assoc, (unsigned long)req->assoclen, authTag, auth_tag_len); /* Compare generated tag with passed in tag. */ retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ? -EBADMSG : 0; if (one_entry_in_sg) { if (unlikely(req->src != req->dst)) { scatterwalk_unmap(dst); scatterwalk_done(&dst_sg_walk, 0, 0); } scatterwalk_unmap(src); scatterwalk_unmap(assoc); scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0); } else { scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); kfree(src); } return retval; } #endif static struct crypto_alg aesni_algs[] = { { .cra_name = "aes", .cra_driver_name = "aes-aesni", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1, .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, .cia_encrypt = aes_encrypt, .cia_decrypt = aes_decrypt } } }, { .cra_name = "__aes-aesni", .cra_driver_name = "__driver-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1, .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, .cia_encrypt = __aes_encrypt, .cia_decrypt = __aes_decrypt } } }, { .cra_name = "__ecb-aes-aesni", .cra_driver_name = "__driver-ecb-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1, .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aes_set_key, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, }, }, { .cra_name = "__cbc-aes-aesni", .cra_driver_name = "__driver-cbc-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1, .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aes_set_key, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }, }, { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_ecb_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, }, { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_cbc_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, #ifdef CONFIG_X86_64 }, { .cra_name = "__ctr-aes-aesni", .cra_driver_name = "__driver-ctr-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1, .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = aes_set_key, .encrypt = ctr_crypt, .decrypt = ctr_crypt, }, }, }, { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_ctr_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_encrypt, .geniv = "chainiv", }, }, }, { .cra_name = "__gcm-aes-aesni", .cra_driver_name = "__driver-gcm-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_AEAD, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN, .cra_alignmask = 0, .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_u = { .aead = { .encrypt = __driver_rfc4106_encrypt, .decrypt = __driver_rfc4106_decrypt, }, }, }, { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "rfc4106-gcm-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN, .cra_alignmask = 0, .cra_type = &crypto_nivaead_type, .cra_module = THIS_MODULE, .cra_init = rfc4106_init, .cra_exit = rfc4106_exit, .cra_u = { .aead = { .setkey = rfc4106_set_key, .setauthsize = rfc4106_set_authsize, .encrypt = rfc4106_encrypt, .decrypt = rfc4106_decrypt, .geniv = "seqiv", .ivsize = 8, .maxauthsize = 16, }, }, #endif #if IS_ENABLED(CONFIG_CRYPTO_PCBC) }, { .cra_name = "pcbc(aes)", .cra_driver_name = "pcbc-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_pcbc_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, #endif }, { .cra_name = "__lrw-aes-aesni", .cra_driver_name = "__driver-lrw-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aesni_lrw_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_exit = lrw_aesni_exit_tfm, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE, .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = lrw_aesni_setkey, .encrypt = lrw_encrypt, .decrypt = lrw_decrypt, }, }, }, { .cra_name = "__xts-aes-aesni", .cra_driver_name = "__driver-xts-aes-aesni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aesni_xts_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = xts_aesni_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, }, }, }, { .cra_name = "lrw(aes)", .cra_driver_name = "lrw-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE, .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, }, { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, } }; static const struct x86_cpu_id aesni_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_AES), {} }; MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); static int __init aesni_init(void) { int err; if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; #ifdef CONFIG_X86_64 #ifdef CONFIG_AS_AVX2 if (boot_cpu_has(X86_FEATURE_AVX2)) { pr_info("AVX2 version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc_avx2; aesni_gcm_dec_tfm = aesni_gcm_dec_avx2; } else #endif #ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { pr_info("AVX version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc_avx; aesni_gcm_dec_tfm = aesni_gcm_dec_avx; } else #endif { pr_info("SSE version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc; aesni_gcm_dec_tfm = aesni_gcm_dec; } aesni_ctr_enc_tfm = aesni_ctr_enc; #ifdef CONFIG_AS_AVX if (cpu_has_avx) { /* optimize performance of ctr mode encryption transform */ aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; pr_info("AES CTR mode by8 optimization enabled\n"); } #endif #endif err = crypto_fpu_init(); if (err) return err; return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); } static void __exit aesni_exit(void) { crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); crypto_fpu_exit(); } module_init(aesni_init); module_exit(aesni_exit); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("aes");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_22
crossvul-cpp_data_good_2165_0
/****************************************************************************** * emulate.c * * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. * * Copyright (c) 2005 Keir Fraser * * Linux coding style, mod r/m decoder, segment base fixes, real-mode * privileged instructions: * * Copyright (C) 2006 Qumranet * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 */ #include <linux/kvm_host.h> #include "kvm_cache_regs.h" #include <linux/module.h> #include <asm/kvm_emulate.h> #include <linux/stringify.h> #include "x86.h" #include "tss.h" /* * Operand types */ #define OpNone 0ull #define OpImplicit 1ull /* No generic decode */ #define OpReg 2ull /* Register */ #define OpMem 3ull /* Memory */ #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ #define OpDI 5ull /* ES:DI/EDI/RDI */ #define OpMem64 6ull /* Memory, 64-bit */ #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ #define OpDX 8ull /* DX register */ #define OpCL 9ull /* CL register (for shifts) */ #define OpImmByte 10ull /* 8-bit sign extended immediate */ #define OpOne 11ull /* Implied 1 */ #define OpImm 12ull /* Sign extended up to 32-bit immediate */ #define OpMem16 13ull /* Memory operand (16-bit). */ #define OpMem32 14ull /* Memory operand (32-bit). */ #define OpImmU 15ull /* Immediate operand, zero extended */ #define OpSI 16ull /* SI/ESI/RSI */ #define OpImmFAddr 17ull /* Immediate far address */ #define OpMemFAddr 18ull /* Far address in memory */ #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ #define OpES 20ull /* ES */ #define OpCS 21ull /* CS */ #define OpSS 22ull /* SS */ #define OpDS 23ull /* DS */ #define OpFS 24ull /* FS */ #define OpGS 25ull /* GS */ #define OpMem8 26ull /* 8-bit zero extended memory operand */ #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ #define OpBits 5 /* Width of operand field */ #define OpMask ((1ull << OpBits) - 1) /* * Opcode effective-address decode tables. * Note that we only emulate instructions that have at least one memory * operand (excluding implicit stack references). We assume that stack * references and instruction fetches will never occur in special memory * areas that require emulation. So, for example, 'mov <imm>,<reg>' need * not be handled. */ /* Operand sizes: 8-bit operands or specified/overridden size. */ #define ByteOp (1<<0) /* 8-bit operands. */ /* Destination operand type. */ #define DstShift 1 #define ImplicitOps (OpImplicit << DstShift) #define DstReg (OpReg << DstShift) #define DstMem (OpMem << DstShift) #define DstAcc (OpAcc << DstShift) #define DstDI (OpDI << DstShift) #define DstMem64 (OpMem64 << DstShift) #define DstImmUByte (OpImmUByte << DstShift) #define DstDX (OpDX << DstShift) #define DstAccLo (OpAccLo << DstShift) #define DstMask (OpMask << DstShift) /* Source operand type. */ #define SrcShift 6 #define SrcNone (OpNone << SrcShift) #define SrcReg (OpReg << SrcShift) #define SrcMem (OpMem << SrcShift) #define SrcMem16 (OpMem16 << SrcShift) #define SrcMem32 (OpMem32 << SrcShift) #define SrcImm (OpImm << SrcShift) #define SrcImmByte (OpImmByte << SrcShift) #define SrcOne (OpOne << SrcShift) #define SrcImmUByte (OpImmUByte << SrcShift) #define SrcImmU (OpImmU << SrcShift) #define SrcSI (OpSI << SrcShift) #define SrcXLat (OpXLat << SrcShift) #define SrcImmFAddr (OpImmFAddr << SrcShift) #define SrcMemFAddr (OpMemFAddr << SrcShift) #define SrcAcc (OpAcc << SrcShift) #define SrcImmU16 (OpImmU16 << SrcShift) #define SrcImm64 (OpImm64 << SrcShift) #define SrcDX (OpDX << SrcShift) #define SrcMem8 (OpMem8 << SrcShift) #define SrcAccHi (OpAccHi << SrcShift) #define SrcMask (OpMask << SrcShift) #define BitOp (1<<11) #define MemAbs (1<<12) /* Memory operand is absolute displacement */ #define String (1<<13) /* String instruction (rep capable) */ #define Stack (1<<14) /* Stack instruction (push/pop) */ #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ #define Escape (5<<15) /* Escape to coprocessor instruction */ #define Sse (1<<18) /* SSE Vector instruction */ /* Generic ModRM decode. */ #define ModRM (1<<19) /* Destination is only written; never read. */ #define Mov (1<<20) /* Misc flags */ #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ #define Undefined (1<<25) /* No Such Instruction */ #define Lock (1<<26) /* lock prefix is allowed for the instruction */ #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ #define No64 (1<<28) #define PageTable (1 << 29) /* instruction used to write page table */ #define NotImpl (1 << 30) /* instruction is not implemented */ /* Source 2 operand type */ #define Src2Shift (31) #define Src2None (OpNone << Src2Shift) #define Src2Mem (OpMem << Src2Shift) #define Src2CL (OpCL << Src2Shift) #define Src2ImmByte (OpImmByte << Src2Shift) #define Src2One (OpOne << Src2Shift) #define Src2Imm (OpImm << Src2Shift) #define Src2ES (OpES << Src2Shift) #define Src2CS (OpCS << Src2Shift) #define Src2SS (OpSS << Src2Shift) #define Src2DS (OpDS << Src2Shift) #define Src2FS (OpFS << Src2Shift) #define Src2GS (OpGS << Src2Shift) #define Src2Mask (OpMask << Src2Shift) #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ #define NoWrite ((u64)1 << 45) /* No writeback */ #define SrcWrite ((u64)1 << 46) /* Write back src operand */ #define NoMod ((u64)1 << 47) /* Mod field is ignored */ #define Intercept ((u64)1 << 48) /* Has valid intercept field */ #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ #define NoBigReal ((u64)1 << 50) /* No big real mode */ #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) #define X2(x...) x, x #define X3(x...) X2(x), x #define X4(x...) X2(x), X2(x) #define X5(x...) X4(x), x #define X6(x...) X4(x), X2(x) #define X7(x...) X4(x), X3(x) #define X8(x...) X4(x), X4(x) #define X16(x...) X8(x), X8(x) #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) #define FASTOP_SIZE 8 /* * fastop functions have a special calling convention: * * dst: rax (in/out) * src: rdx (in/out) * src2: rcx (in) * flags: rflags (in/out) * ex: rsi (in:fastop pointer, out:zero if exception) * * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for * different operand sizes can be reached by calculation, rather than a jump * table (which would be bigger than the code). * * fastop functions are declared as taking a never-defined fastop parameter, * so they can't be called from C directly. */ struct fastop; struct opcode { u64 flags : 56; u64 intercept : 8; union { int (*execute)(struct x86_emulate_ctxt *ctxt); const struct opcode *group; const struct group_dual *gdual; const struct gprefix *gprefix; const struct escape *esc; void (*fastop)(struct fastop *fake); } u; int (*check_perm)(struct x86_emulate_ctxt *ctxt); }; struct group_dual { struct opcode mod012[8]; struct opcode mod3[8]; }; struct gprefix { struct opcode pfx_no; struct opcode pfx_66; struct opcode pfx_f2; struct opcode pfx_f3; }; struct escape { struct opcode op[8]; struct opcode high[64]; }; /* EFLAGS bit definitions. */ #define EFLG_ID (1<<21) #define EFLG_VIP (1<<20) #define EFLG_VIF (1<<19) #define EFLG_AC (1<<18) #define EFLG_VM (1<<17) #define EFLG_RF (1<<16) #define EFLG_IOPL (3<<12) #define EFLG_NT (1<<14) #define EFLG_OF (1<<11) #define EFLG_DF (1<<10) #define EFLG_IF (1<<9) #define EFLG_TF (1<<8) #define EFLG_SF (1<<7) #define EFLG_ZF (1<<6) #define EFLG_AF (1<<4) #define EFLG_PF (1<<2) #define EFLG_CF (1<<0) #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a #define EFLG_RESERVED_ONE_MASK 2 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) { if (!(ctxt->regs_valid & (1 << nr))) { ctxt->regs_valid |= 1 << nr; ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); } return ctxt->_regs[nr]; } static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) { ctxt->regs_valid |= 1 << nr; ctxt->regs_dirty |= 1 << nr; return &ctxt->_regs[nr]; } static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) { reg_read(ctxt, nr); return reg_write(ctxt, nr); } static void writeback_registers(struct x86_emulate_ctxt *ctxt) { unsigned reg; for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); } static void invalidate_registers(struct x86_emulate_ctxt *ctxt) { ctxt->regs_dirty = 0; ctxt->regs_valid = 0; } /* * These EFLAGS bits are restored from saved value during emulation, and * any changes are written back to the saved value after emulation. */ #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) #ifdef CONFIG_X86_64 #define ON64(x) x #else #define ON64(x) #endif static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" #define FOP_RET "ret \n\t" #define FOP_START(op) \ extern void em_##op(struct fastop *fake); \ asm(".pushsection .text, \"ax\" \n\t" \ ".global em_" #op " \n\t" \ FOP_ALIGN \ "em_" #op ": \n\t" #define FOP_END \ ".popsection") #define FOPNOP() FOP_ALIGN FOP_RET #define FOP1E(op, dst) \ FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET #define FOP1EEX(op, dst) \ FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) #define FASTOP1(op) \ FOP_START(op) \ FOP1E(op##b, al) \ FOP1E(op##w, ax) \ FOP1E(op##l, eax) \ ON64(FOP1E(op##q, rax)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m) */ #define FASTOP1SRC2(op, name) \ FOP_START(name) \ FOP1E(op, cl) \ FOP1E(op, cx) \ FOP1E(op, ecx) \ ON64(FOP1E(op, rcx)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ #define FASTOP1SRC2EX(op, name) \ FOP_START(name) \ FOP1EEX(op, cl) \ FOP1EEX(op, cx) \ FOP1EEX(op, ecx) \ ON64(FOP1EEX(op, rcx)) \ FOP_END #define FOP2E(op, dst, src) \ FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET #define FASTOP2(op) \ FOP_START(op) \ FOP2E(op##b, al, dl) \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, word only */ #define FASTOP2W(op) \ FOP_START(op) \ FOPNOP() \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, src is CL */ #define FASTOP2CL(op) \ FOP_START(op) \ FOP2E(op##b, al, cl) \ FOP2E(op##w, ax, cl) \ FOP2E(op##l, eax, cl) \ ON64(FOP2E(op##q, rax, cl)) \ FOP_END #define FOP3E(op, dst, src, src2) \ FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET /* 3-operand, word-only, src2=cl */ #define FASTOP3WCL(op) \ FOP_START(op) \ FOPNOP() \ FOP3E(op##w, ax, dx, cl) \ FOP3E(op##l, eax, edx, cl) \ ON64(FOP3E(op##q, rax, rdx, cl)) \ FOP_END /* Special case for SETcc - 1 instruction per cc */ #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t" asm(".global kvm_fastop_exception \n" "kvm_fastop_exception: xor %esi, %esi; ret"); FOP_START(setcc) FOP_SETCC(seto) FOP_SETCC(setno) FOP_SETCC(setc) FOP_SETCC(setnc) FOP_SETCC(setz) FOP_SETCC(setnz) FOP_SETCC(setbe) FOP_SETCC(setnbe) FOP_SETCC(sets) FOP_SETCC(setns) FOP_SETCC(setp) FOP_SETCC(setnp) FOP_SETCC(setl) FOP_SETCC(setnl) FOP_SETCC(setle) FOP_SETCC(setnle) FOP_END; FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET FOP_END; static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, enum x86_intercept intercept, enum x86_intercept_stage stage) { struct x86_instruction_info info = { .intercept = intercept, .rep_prefix = ctxt->rep_prefix, .modrm_mod = ctxt->modrm_mod, .modrm_reg = ctxt->modrm_reg, .modrm_rm = ctxt->modrm_rm, .src_val = ctxt->src.val64, .dst_val = ctxt->dst.val64, .src_bytes = ctxt->src.bytes, .dst_bytes = ctxt->dst.bytes, .ad_bytes = ctxt->ad_bytes, .next_rip = ctxt->eip, }; return ctxt->ops->intercept(ctxt, &info, stage); } static void assign_masked(ulong *dest, ulong src, ulong mask) { *dest = (*dest & ~mask) | (src & mask); } static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) { return (1UL << (ctxt->ad_bytes << 3)) - 1; } static ulong stack_mask(struct x86_emulate_ctxt *ctxt) { u16 sel; struct desc_struct ss; if (ctxt->mode == X86EMUL_MODE_PROT64) return ~0UL; ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ } static int stack_size(struct x86_emulate_ctxt *ctxt) { return (__fls(stack_mask(ctxt)) + 1) >> 3; } /* Access/update address held in a register, based on addressing mode. */ static inline unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) { if (ctxt->ad_bytes == sizeof(unsigned long)) return reg; else return reg & ad_mask(ctxt); } static inline unsigned long register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) { return address_mask(ctxt, reg); } static void masked_increment(ulong *reg, ulong mask, int inc) { assign_masked(reg, *reg + inc, mask); } static inline void register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) { ulong mask; if (ctxt->ad_bytes == sizeof(unsigned long)) mask = ~0UL; else mask = ad_mask(ctxt); masked_increment(reg, mask, inc); } static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) { masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); } static u32 desc_limit_scaled(struct desc_struct *desc) { u32 limit = get_desc_limit(desc); return desc->g ? (limit << 12) | 0xfff : limit; } static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) { if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) return 0; return ctxt->ops->get_cached_segment_base(ctxt, seg); } static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, u32 error, bool valid) { WARN_ON(vec > 0x1f); ctxt->exception.vector = vec; ctxt->exception.error_code = error; ctxt->exception.error_code_valid = valid; return X86EMUL_PROPAGATE_FAULT; } static int emulate_db(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DB_VECTOR, 0, false); } static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, GP_VECTOR, err, true); } static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, SS_VECTOR, err, true); } static int emulate_ud(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, UD_VECTOR, 0, false); } static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, TS_VECTOR, err, true); } static int emulate_de(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DE_VECTOR, 0, false); } static int emulate_nm(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, NM_VECTOR, 0, false); } static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, int cs_l) { switch (ctxt->op_bytes) { case 2: ctxt->_eip = (u16)dst; break; case 4: ctxt->_eip = (u32)dst; break; case 8: if ((cs_l && is_noncanonical_address(dst)) || (!cs_l && (dst & ~(u32)-1))) return emulate_gp(ctxt, 0); ctxt->_eip = dst; break; default: WARN(1, "unsupported eip assignment size\n"); } return X86EMUL_CONTINUE; } static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) { return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64); } static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) { return assign_eip_near(ctxt, ctxt->_eip + rel); } static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) { u16 selector; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); return selector; } static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, unsigned seg) { u16 dummy; u32 base3; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); } /* * x86 defines three classes of vector instructions: explicitly * aligned, explicitly unaligned, and the rest, which change behaviour * depending on whether they're AVX encoded or not. * * Also included is CMPXCHG16B which is not a vector instruction, yet it is * subject to the same check. */ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) { if (likely(size < 16)) return false; if (ctxt->d & Aligned) return true; else if (ctxt->d & Unaligned) return false; else if (ctxt->d & Avx) return false; else return true; } static int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned size, bool write, bool fetch, ulong *linear) { struct desc_struct desc; bool usable; ulong la; u32 lim; u16 sel; unsigned cpl; la = seg_base(ctxt, addr.seg) + addr.ea; switch (ctxt->mode) { case X86EMUL_MODE_PROT64: if (((signed long)la << 16) >> 16 != la) return emulate_gp(ctxt, 0); break; default: usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, addr.seg); if (!usable) goto bad; /* code segment in protected mode or read-only data segment */ if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) && write) goto bad; /* unreadable code segment */ if (!fetch && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && (ctxt->d & NoBigReal)) { /* la is between zero and 0xffff */ if (la > 0xffff || (u32)(la + size - 1) > 0xffff) goto bad; } else if ((desc.type & 8) || !(desc.type & 4)) { /* expand-up segment */ if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) goto bad; } else { /* expand-down segment */ if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) goto bad; lim = desc.d ? 0xffffffff : 0xffff; if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) goto bad; } cpl = ctxt->ops->cpl(ctxt); if (!(desc.type & 8)) { /* data segment */ if (cpl > desc.dpl) goto bad; } else if ((desc.type & 8) && !(desc.type & 4)) { /* nonconforming code segment */ if (cpl != desc.dpl) goto bad; } else if ((desc.type & 8) && (desc.type & 4)) { /* conforming code segment */ if (cpl < desc.dpl) goto bad; } break; } if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8) la &= (u32)-1; if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) return emulate_gp(ctxt, 0); *linear = la; return X86EMUL_CONTINUE; bad: if (addr.seg == VCPU_SREG_SS) return emulate_ss(ctxt, sel); else return emulate_gp(ctxt, sel); } static int linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned size, bool write, ulong *linear) { return __linearize(ctxt, addr, size, write, false, linear); } static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); } /* * Prefetch the remaining bytes of the instruction without crossing page * boundary if they are not in fetch_cache yet. */ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) { int rc; unsigned size; unsigned long linear; int cur_size = ctxt->fetch.end - ctxt->fetch.data; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = ctxt->eip + cur_size }; size = 15UL ^ cur_size; rc = __linearize(ctxt, addr, size, false, true, &linear); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); /* * One instruction can only straddle two pages, * and one has been loaded at the beginning of * x86_decode_insn. So, if not enough bytes * still, we must have hit the 15-byte boundary. */ if (unlikely(size < op_size)) return X86EMUL_UNHANDLEABLE; rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, size, &ctxt->exception); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; ctxt->fetch.end += size; return X86EMUL_CONTINUE; } static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, unsigned size) { if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size)) return __do_insn_fetch_bytes(ctxt, size); else return X86EMUL_CONTINUE; } /* Fetch next part of the instruction being emulated. */ #define insn_fetch(_type, _ctxt) \ ({ _type _x; \ \ rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += sizeof(_type); \ _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \ ctxt->fetch.ptr += sizeof(_type); \ _x; \ }) #define insn_fetch_arr(_arr, _size, _ctxt) \ ({ \ rc = do_insn_fetch_bytes(_ctxt, _size); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += (_size); \ memcpy(_arr, ctxt->fetch.ptr, _size); \ ctxt->fetch.ptr += (_size); \ }) /* * Given the 'reg' portion of a ModRM byte, and a register block, return a * pointer into the block that addresses the relevant register. * @highbyte_regs specifies whether to decode AH,CH,DH,BH. */ static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, int byteop) { void *p; int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; else p = reg_rmw(ctxt, modrm_reg); return p; } static int read_descriptor(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, u16 *size, unsigned long *address, int op_bytes) { int rc; if (op_bytes == 2) op_bytes = 3; *address = 0; rc = segmented_read_std(ctxt, addr, size, 2); if (rc != X86EMUL_CONTINUE) return rc; addr.ea += 2; rc = segmented_read_std(ctxt, addr, address, op_bytes); return rc; } FASTOP2(add); FASTOP2(or); FASTOP2(adc); FASTOP2(sbb); FASTOP2(and); FASTOP2(sub); FASTOP2(xor); FASTOP2(cmp); FASTOP2(test); FASTOP1SRC2(mul, mul_ex); FASTOP1SRC2(imul, imul_ex); FASTOP1SRC2EX(div, div_ex); FASTOP1SRC2EX(idiv, idiv_ex); FASTOP3WCL(shld); FASTOP3WCL(shrd); FASTOP2W(imul); FASTOP1(not); FASTOP1(neg); FASTOP1(inc); FASTOP1(dec); FASTOP2CL(rol); FASTOP2CL(ror); FASTOP2CL(rcl); FASTOP2CL(rcr); FASTOP2CL(shl); FASTOP2CL(shr); FASTOP2CL(sar); FASTOP2W(bsf); FASTOP2W(bsr); FASTOP2W(bt); FASTOP2W(bts); FASTOP2W(btr); FASTOP2W(btc); FASTOP2(xadd); static u8 test_cc(unsigned int condition, unsigned long flags) { u8 rc; void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; asm("push %[flags]; popf; call *%[fastop]" : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); return rc; } static void fetch_register_operand(struct operand *op) { switch (op->bytes) { case 1: op->val = *(u8 *)op->addr.reg; break; case 2: op->val = *(u16 *)op->addr.reg; break; case 4: op->val = *(u32 *)op->addr.reg; break; case 8: op->val = *(u64 *)op->addr.reg; break; } } static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break; case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break; case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break; case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break; case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break; case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break; #ifdef CONFIG_X86_64 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break; case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break; case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break; case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break; case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break; case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break; case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break; case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break; #endif default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break; case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break; case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break; case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break; case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break; case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break; #ifdef CONFIG_X86_64 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break; case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break; case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break; case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break; case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break; case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break; case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break; case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break; #endif default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; case 2: asm("movq %%mm2, %0" : "=m"(*data)); break; case 3: asm("movq %%mm3, %0" : "=m"(*data)); break; case 4: asm("movq %%mm4, %0" : "=m"(*data)); break; case 5: asm("movq %%mm5, %0" : "=m"(*data)); break; case 6: asm("movq %%mm6, %0" : "=m"(*data)); break; case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; case 2: asm("movq %0, %%mm2" : : "m"(*data)); break; case 3: asm("movq %0, %%mm3" : : "m"(*data)); break; case 4: asm("movq %0, %%mm4" : : "m"(*data)); break; case 5: asm("movq %0, %%mm5" : : "m"(*data)); break; case 6: asm("movq %0, %%mm6" : : "m"(*data)); break; case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; default: BUG(); } ctxt->ops->put_fpu(ctxt); } static int em_fninit(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fninit"); ctxt->ops->put_fpu(ctxt); return X86EMUL_CONTINUE; } static int em_fnstcw(struct x86_emulate_ctxt *ctxt) { u16 fcw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fnstcw %0": "+m"(fcw)); ctxt->ops->put_fpu(ctxt); /* force 2 byte destination */ ctxt->dst.bytes = 2; ctxt->dst.val = fcw; return X86EMUL_CONTINUE; } static int em_fnstsw(struct x86_emulate_ctxt *ctxt) { u16 fsw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fnstsw %0": "+m"(fsw)); ctxt->ops->put_fpu(ctxt); /* force 2 byte destination */ ctxt->dst.bytes = 2; ctxt->dst.val = fsw; return X86EMUL_CONTINUE; } static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { unsigned reg = ctxt->modrm_reg; if (!(ctxt->d & ModRM)) reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = reg; read_sse_reg(ctxt, &op->vec_val, reg); return; } if (ctxt->d & Mmx) { reg &= 7; op->type = OP_MM; op->bytes = 8; op->addr.mm = reg; return; } op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); fetch_register_operand(op); op->orig_val = op->val; } static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) { if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) ctxt->modrm_seg = VCPU_SREG_SS; } static int decode_modrm(struct x86_emulate_ctxt *ctxt, struct operand *op) { u8 sib; int index_reg, base_reg, scale; int rc = X86EMUL_CONTINUE; ulong modrm_ea = 0; ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); ctxt->modrm_seg = VCPU_SREG_DS; if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = ctxt->modrm_rm; read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); return rc; } if (ctxt->d & Mmx) { op->type = OP_MM; op->bytes = 8; op->addr.mm = ctxt->modrm_rm & 7; return rc; } fetch_register_operand(op); return rc; } op->type = OP_MEM; if (ctxt->ad_bytes == 2) { unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); unsigned si = reg_read(ctxt, VCPU_REGS_RSI); unsigned di = reg_read(ctxt, VCPU_REGS_RDI); /* 16-bit ModR/M decode. */ switch (ctxt->modrm_mod) { case 0: if (ctxt->modrm_rm == 6) modrm_ea += insn_fetch(u16, ctxt); break; case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(u16, ctxt); break; } switch (ctxt->modrm_rm) { case 0: modrm_ea += bx + si; break; case 1: modrm_ea += bx + di; break; case 2: modrm_ea += bp + si; break; case 3: modrm_ea += bp + di; break; case 4: modrm_ea += si; break; case 5: modrm_ea += di; break; case 6: if (ctxt->modrm_mod != 0) modrm_ea += bp; break; case 7: modrm_ea += bx; break; } if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) ctxt->modrm_seg = VCPU_SREG_SS; modrm_ea = (u16)modrm_ea; } else { /* 32/64-bit ModR/M decode. */ if ((ctxt->modrm_rm & 7) == 4) { sib = insn_fetch(u8, ctxt); index_reg |= (sib >> 3) & 7; base_reg |= sib & 7; scale = sib >> 6; if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) modrm_ea += insn_fetch(s32, ctxt); else { modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); } if (index_reg != 4) modrm_ea += reg_read(ctxt, index_reg) << scale; } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->rip_relative = 1; } else { base_reg = ctxt->modrm_rm; modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); } switch (ctxt->modrm_mod) { case 0: if (ctxt->modrm_rm == 5) modrm_ea += insn_fetch(s32, ctxt); break; case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(s32, ctxt); break; } } op->addr.mem.ea = modrm_ea; if (ctxt->ad_bytes != 8) ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; done: return rc; } static int decode_abs(struct x86_emulate_ctxt *ctxt, struct operand *op) { int rc = X86EMUL_CONTINUE; op->type = OP_MEM; switch (ctxt->ad_bytes) { case 2: op->addr.mem.ea = insn_fetch(u16, ctxt); break; case 4: op->addr.mem.ea = insn_fetch(u32, ctxt); break; case 8: op->addr.mem.ea = insn_fetch(u64, ctxt); break; } done: return rc; } static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) { long sv = 0, mask; if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { mask = ~((long)ctxt->dst.bytes * 8 - 1); if (ctxt->src.bytes == 2) sv = (s16)ctxt->src.val & (s16)mask; else if (ctxt->src.bytes == 4) sv = (s32)ctxt->src.val & (s32)mask; else sv = (s64)ctxt->src.val & (s64)mask; ctxt->dst.addr.mem.ea += (sv >> 3); } /* only subword offset */ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; } static int read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *dest, unsigned size) { int rc; struct read_cache *mc = &ctxt->mem_read; if (mc->pos < mc->end) goto read_cached; WARN_ON((mc->end + size) >= sizeof(mc->data)); rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; mc->end += size; read_cached: memcpy(dest, mc->data + mc->pos, size); mc->pos += size; return X86EMUL_CONTINUE; } static int segmented_read(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return read_emulated(ctxt, linear, data, size); } static int segmented_write(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->write_emulated(ctxt, linear, data, size, &ctxt->exception); } static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *orig_data, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, size, &ctxt->exception); } static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, unsigned int size, unsigned short port, void *dest) { struct read_cache *rc = &ctxt->io_read; if (rc->pos == rc->end) { /* refill pio read ahead */ unsigned int in_page, n; unsigned int count = ctxt->rep_prefix ? address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; in_page = (ctxt->eflags & EFLG_DF) ? offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); if (n == 0) n = 1; rc->pos = rc->end = 0; if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) return 0; rc->end = n * size; } if (ctxt->rep_prefix && (ctxt->d & String) && !(ctxt->eflags & EFLG_DF)) { ctxt->dst.data = rc->data + rc->pos; ctxt->dst.type = OP_MEM_STR; ctxt->dst.count = (rc->end - rc->pos) / size; rc->pos = rc->end; } else { memcpy(dest, rc->data + rc->pos, size); rc->pos += size; } return 1; } static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, u16 index, struct desc_struct *desc) { struct desc_ptr dt; ulong addr; ctxt->ops->get_idt(ctxt, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_ptr *dt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 base3 = 0; if (selector & 1 << 2) { struct desc_struct desc; u16 sel; memset (dt, 0, sizeof *dt); if (!ops->get_segment(ctxt, &sel, &desc, &base3, VCPU_SREG_LDTR)) return; dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ dt->address = get_desc_base(&desc) | ((u64)base3 << 32); } else ops->get_gdt(ctxt, dt); } /* allowed just for 8 bytes segments */ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, ulong *desc_addr_p) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); *desc_addr_p = addr = dt.address + index * 8; return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } /* allowed just for 8 bytes segments */ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } /* Does not support long mode */ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, bool in_task_switch, struct desc_struct *desc) { struct desc_struct seg_desc, old_desc; u8 dpl, rpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ ulong desc_addr; int ret; u16 dummy; u32 base3 = 0; memset(&seg_desc, 0, sizeof seg_desc); if (ctxt->mode == X86EMUL_MODE_REAL) { /* set real mode segment descriptor (keep limit etc. for * unreal mode) */ ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); set_desc_base(&seg_desc, selector << 4); goto load; } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { /* VM86 needs a clean new segment descriptor */ set_desc_base(&seg_desc, selector << 4); set_desc_limit(&seg_desc, 0xffff); seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; seg_desc.dpl = 3; goto load; } rpl = selector & 3; /* NULL selector is not valid for TR, CS and SS (except for long mode) */ if ((seg == VCPU_SREG_CS || (seg == VCPU_SREG_SS && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) || seg == VCPU_SREG_TR) && null_selector) goto exception; /* TR should be in GDT only */ if (seg == VCPU_SREG_TR && (selector & (1 << 2))) goto exception; if (null_selector) /* for NULL selector skip all following checks */ goto load; ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; err_code = selector & 0xfffc; err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR; /* can't load system descriptor into segment selector */ if (seg <= VCPU_SREG_GS && !seg_desc.s) goto exception; if (!seg_desc.p) { err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; goto exception; } dpl = seg_desc.dpl; switch (seg) { case VCPU_SREG_SS: /* * segment is not a writable data segment or segment * selector's RPL != CPL or segment selector's RPL != CPL */ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) goto exception; break; case VCPU_SREG_CS: if (!(seg_desc.type & 8)) goto exception; if (seg_desc.type & 4) { /* conforming */ if (dpl > cpl) goto exception; } else { /* nonconforming */ if (rpl > cpl || dpl != cpl) goto exception; } /* in long-mode d/b must be clear if l is set */ if (seg_desc.d && seg_desc.l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) goto exception; } /* CS(RPL) <- CPL */ selector = (selector & 0xfffc) | cpl; break; case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; old_desc = seg_desc; seg_desc.type |= 2; /* busy */ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, sizeof(seg_desc), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) goto exception; break; default: /* DS, ES, FS, or GS */ /* * segment is not a data or readable code segment or * ((segment is a data or nonconforming code segment) * and (both RPL and CPL > DPL)) */ if ((seg_desc.type & 0xa) == 0x8 || (((seg_desc.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl))) goto exception; break; } if (seg_desc.s) { /* mark segment as accessed */ seg_desc.type |= 1; ret = write_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; } else if (ctxt->mode == X86EMUL_MODE_PROT64) { ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, sizeof(base3), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); if (desc) *desc = seg_desc; return X86EMUL_CONTINUE; exception: return emulate_exception(ctxt, err_vec, err_code, true); } static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg) { u8 cpl = ctxt->ops->cpl(ctxt); return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL); } static void write_register_operand(struct operand *op) { /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ switch (op->bytes) { case 1: *(u8 *)op->addr.reg = (u8)op->val; break; case 2: *(u16 *)op->addr.reg = (u16)op->val; break; case 4: *op->addr.reg = (u32)op->val; break; /* 64b: zero-extend */ case 8: *op->addr.reg = op->val; break; } } static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) { switch (op->type) { case OP_REG: write_register_operand(op); break; case OP_MEM: if (ctxt->lock_prefix) return segmented_cmpxchg(ctxt, op->addr.mem, &op->orig_val, &op->val, op->bytes); else return segmented_write(ctxt, op->addr.mem, &op->val, op->bytes); break; case OP_MEM_STR: return segmented_write(ctxt, op->addr.mem, op->data, op->bytes * op->count); break; case OP_XMM: write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); break; case OP_MM: write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); break; case OP_NONE: /* no writeback */ break; default: break; } return X86EMUL_CONTINUE; } static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) { struct segmented_address addr; rsp_increment(ctxt, -bytes); addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; return segmented_write(ctxt, addr, data, bytes); } static int em_push(struct x86_emulate_ctxt *ctxt) { /* Disable writeback. */ ctxt->dst.type = OP_NONE; return push(ctxt, &ctxt->src.val, ctxt->op_bytes); } static int emulate_pop(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; struct segmented_address addr; addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; rc = segmented_read(ctxt, addr, dest, len); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, len); return rc; } static int em_pop(struct x86_emulate_ctxt *ctxt) { return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int emulate_popf(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; unsigned long val, change_mask; int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; int cpl = ctxt->ops->cpl(ctxt); rc = emulate_pop(ctxt, &val, len); if (rc != X86EMUL_CONTINUE) return rc; change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID; switch(ctxt->mode) { case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT16: if (cpl == 0) change_mask |= EFLG_IOPL; if (cpl <= iopl) change_mask |= EFLG_IF; break; case X86EMUL_MODE_VM86: if (iopl < 3) return emulate_gp(ctxt, 0); change_mask |= EFLG_IF; break; default: /* real mode */ change_mask |= (EFLG_IOPL | EFLG_IF); break; } *(unsigned long *)dest = (ctxt->eflags & ~change_mask) | (val & change_mask); return rc; } static int em_popf(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.addr.reg = &ctxt->eflags; ctxt->dst.bytes = ctxt->op_bytes; return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int em_enter(struct x86_emulate_ctxt *ctxt) { int rc; unsigned frame_size = ctxt->src.val; unsigned nesting_level = ctxt->src2.val & 31; ulong rbp; if (nesting_level) return X86EMUL_UNHANDLEABLE; rbp = reg_read(ctxt, VCPU_REGS_RBP); rc = push(ctxt, &rbp, stack_size(ctxt)); if (rc != X86EMUL_CONTINUE) return rc; assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), stack_mask(ctxt)); assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RSP) - frame_size, stack_mask(ctxt)); return X86EMUL_CONTINUE; } static int em_leave(struct x86_emulate_ctxt *ctxt) { assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), stack_mask(ctxt)); return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); } static int em_push_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; ctxt->src.val = get_segment_selector(ctxt, seg); return em_push(ctxt); } static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned long selector; int rc; rc = emulate_pop(ctxt, &selector, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; rc = load_segment_descriptor(ctxt, (u16)selector, seg); return rc; } static int em_pusha(struct x86_emulate_ctxt *ctxt) { unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RAX; while (reg <= VCPU_REGS_RDI) { (reg == VCPU_REGS_RSP) ? (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ++reg; } return rc; } static int em_pushf(struct x86_emulate_ctxt *ctxt) { ctxt->src.val = (unsigned long)ctxt->eflags; return em_push(ctxt); } static int em_popa(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RDI; while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { rsp_increment(ctxt, ctxt->op_bytes); --reg; } rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) break; --reg; } return rc; } static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { const struct x86_emulate_ops *ops = ctxt->ops; int rc; struct desc_ptr dt; gva_t cs_addr; gva_t eip_addr; u16 cs, eip; /* TODO: Add limit checks */ ctxt->src.val = ctxt->eflags; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = ctxt->_eip; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ops->get_idt(ctxt, &dt); eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = eip; return rc; } int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { int rc; invalidate_registers(ctxt); rc = __emulate_int_real(ctxt, irq); if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return rc; } static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return __emulate_int_real(ctxt, irq); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* Protected mode interrupts unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; unsigned long temp_eip = 0; unsigned long temp_eflags = 0; unsigned long cs = 0; unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; /* TODO: Add stack limit check */ rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (temp_eip & ~0xffff) return emulate_gp(ctxt, 0); rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = temp_eip; if (ctxt->op_bytes == 4) ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); else if (ctxt->op_bytes == 2) { ctxt->eflags &= ~0xffff; ctxt->eflags |= temp_eflags; } ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ ctxt->eflags |= EFLG_RESERVED_ONE_MASK; return rc; } static int em_iret(struct x86_emulate_ctxt *ctxt) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return emulate_iret_real(ctxt); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* iret from protected mode unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned short sel, old_sel; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; u8 cpl = ctxt->ops->cpl(ctxt); /* Assignment of RIP may only fail in 64-bit mode */ if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_sel, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); if (rc != X86EMUL_CONTINUE) { WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); /* assigning eip failed; restore the old cs */ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); return rc; } return rc; } static int em_grp45(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; switch (ctxt->modrm_reg) { case 2: /* call near abs */ { long int old_eip; old_eip = ctxt->_eip; rc = assign_eip_near(ctxt, ctxt->src.val); if (rc != X86EMUL_CONTINUE) break; ctxt->src.val = old_eip; rc = em_push(ctxt); break; } case 4: /* jmp abs */ rc = assign_eip_near(ctxt, ctxt->src.val); break; case 5: /* jmp far */ rc = em_jmp_far(ctxt); break; case 6: /* push */ rc = em_push(ctxt); break; } return rc; } static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) { u64 old = ctxt->dst.orig_val64; if (ctxt->dst.bytes == 16) return X86EMUL_UNHANDLEABLE; if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); ctxt->eflags &= ~EFLG_ZF; } else { ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | (u32) reg_read(ctxt, VCPU_REGS_RBX); ctxt->eflags |= EFLG_ZF; } return X86EMUL_CONTINUE; } static int em_ret(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; return assign_eip_near(ctxt, eip); } static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip, cs; u16 old_cs; int cpl = ctxt->ops->cpl(ctxt); struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; /* Outer-privilege level return is not implemented */ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) return X86EMUL_UNHANDLEABLE; rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, eip, new_desc.l); if (rc != X86EMUL_CONTINUE) { WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); } return rc; } static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) { int rc; rc = em_ret_far(ctxt); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) { /* Save real source value, then compare EAX against destination. */ ctxt->dst.orig_val = ctxt->dst.val; ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); ctxt->src.orig_val = ctxt->src.val; ctxt->src.val = ctxt->dst.orig_val; fastop(ctxt, em_cmp); if (ctxt->eflags & EFLG_ZF) { /* Success: write back to memory. */ ctxt->dst.val = ctxt->src.orig_val; } else { /* Failure: write the value we saw to EAX. */ ctxt->dst.type = OP_REG; ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); ctxt->dst.val = ctxt->dst.orig_val; } return X86EMUL_CONTINUE; } static int em_lseg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned short sel; int rc; memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = load_segment_descriptor(ctxt, sel, seg); if (rc != X86EMUL_CONTINUE) return rc; ctxt->dst.val = ctxt->src.val; return rc; } static void setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, struct desc_struct *cs, struct desc_struct *ss) { cs->l = 0; /* will be adjusted later */ set_desc_base(cs, 0); /* flat segment */ cs->g = 1; /* 4kb granularity */ set_desc_limit(cs, 0xfffff); /* 4GB limit */ cs->type = 0x0b; /* Read, Execute, Accessed */ cs->s = 1; cs->dpl = 0; /* will be adjusted later */ cs->p = 1; cs->d = 1; cs->avl = 0; set_desc_base(ss, 0); /* flat segment */ set_desc_limit(ss, 0xfffff); /* 4GB limit */ ss->g = 1; /* 4kb granularity */ ss->s = 1; ss->type = 0x03; /* Read/Write, Accessed */ ss->d = 1; /* 32bit stack segment */ ss->dpl = 0; ss->p = 1; ss->l = 0; ss->avl = 0; } static bool vendor_intel(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = ecx = 0; ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; } static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 eax, ebx, ecx, edx; /* * syscall should always be enabled in longmode - so only become * vendor specific (cpuid) if other modes are active... */ if (ctxt->mode == X86EMUL_MODE_PROT64) return true; eax = 0x00000000; ecx = 0x00000000; ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); /* * Intel ("GenuineIntel") * remark: Intel CPUs only support "syscall" in 64bit * longmode. Also an 64bit guest with a * 32bit compat-app running will #UD !! While this * behaviour can be fixed (by emulating) into AMD * response - CPUs of AMD can't behave like Intel. */ if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) return false; /* AMD ("AuthenticAMD") */ if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) return true; /* AMD ("AMDisbetter!") */ if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) return true; /* default: (not Intel, not AMD), apply Intel's stricter rules... */ return false; } static int em_syscall(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; /* syscall is not available in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_ud(ctxt); if (!(em_syscall_is_enabled(ctxt))) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_EFER, &efer); setup_syscalls_segments(ctxt, &cs, &ss); if (!(efer & EFER_SCE)) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; cs_sel = (u16)(msr_data & 0xfffc); ss_sel = (u16)(msr_data + 8); if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; if (efer & EFER_LMA) { #ifdef CONFIG_X86_64 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; ops->get_msr(ctxt, ctxt->mode == X86EMUL_MODE_PROT64 ? MSR_LSTAR : MSR_CSTAR, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); ctxt->eflags &= ~msr_data; #endif } else { /* legacy mode */ ops->get_msr(ctxt, MSR_STAR, &msr_data); ctxt->_eip = (u32)msr_data; ctxt->eflags &= ~(EFLG_VM | EFLG_IF); } return X86EMUL_CONTINUE; } static int em_sysenter(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; ops->get_msr(ctxt, MSR_EFER, &efer); /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); /* * Not recognized on AMD in compat mode (but is recognized in legacy * mode). */ if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) && !vendor_intel(ctxt)) return emulate_ud(ctxt); /* XXX sysenter/sysexit have not been tested in 64bit mode. * Therefore, we inject an #UD. */ if (ctxt->mode == X86EMUL_MODE_PROT64) return emulate_ud(ctxt); setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (ctxt->mode) { case X86EMUL_MODE_PROT32: if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); break; case X86EMUL_MODE_PROT64: if (msr_data == 0x0) return emulate_gp(ctxt, 0); break; default: break; } ctxt->eflags &= ~(EFLG_VM | EFLG_IF); cs_sel = (u16)msr_data; cs_sel &= ~SELECTOR_RPL_MASK; ss_sel = cs_sel + 8; ss_sel &= ~SELECTOR_RPL_MASK; if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; return X86EMUL_CONTINUE; } static int em_sysexit(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data, rcx, rdx; int usermode; u16 cs_sel = 0, ss_sel = 0; /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_gp(ctxt, 0); setup_syscalls_segments(ctxt, &cs, &ss); if ((ctxt->rex_prefix & 0x8) != 0x0) usermode = X86EMUL_MODE_PROT64; else usermode = X86EMUL_MODE_PROT32; rcx = reg_read(ctxt, VCPU_REGS_RCX); rdx = reg_read(ctxt, VCPU_REGS_RDX); cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (usermode) { case X86EMUL_MODE_PROT32: cs_sel = (u16)(msr_data + 16); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ss_sel = (u16)(msr_data + 24); break; case X86EMUL_MODE_PROT64: cs_sel = (u16)(msr_data + 32); if (msr_data == 0x0) return emulate_gp(ctxt, 0); ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; if (is_noncanonical_address(rcx) || is_noncanonical_address(rdx)) return emulate_gp(ctxt, 0); break; } cs_sel |= SELECTOR_RPL_MASK; ss_sel |= SELECTOR_RPL_MASK; ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = rdx; *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; } static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) { int iopl; if (ctxt->mode == X86EMUL_MODE_REAL) return false; if (ctxt->mode == X86EMUL_MODE_VM86) return true; iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; return ctxt->ops->cpl(ctxt) > iopl; } static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct tr_seg; u32 base3; int r; u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; unsigned mask = (1 << len) - 1; unsigned long base; ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); if (!tr_seg.p) return false; if (desc_limit_scaled(&tr_seg) < 103) return false; base = get_desc_base(&tr_seg); #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) return false; return true; } static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { if (ctxt->perm_ok) return true; if (emulator_bad_iopl(ctxt)) if (!emulator_io_port_access_allowed(ctxt, port, len)) return false; ctxt->perm_ok = true; return true; } static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { tss->ip = ctxt->_eip; tss->flag = ctxt->eflags; tss->ax = reg_read(ctxt, VCPU_REGS_RAX); tss->cx = reg_read(ctxt, VCPU_REGS_RCX); tss->dx = reg_read(ctxt, VCPU_REGS_RDX); tss->bx = reg_read(ctxt, VCPU_REGS_RBX); tss->sp = reg_read(ctxt, VCPU_REGS_RSP); tss->bp = reg_read(ctxt, VCPU_REGS_RBP); tss->si = reg_read(ctxt, VCPU_REGS_RSI); tss->di = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); } static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { int ret; u8 cpl; ctxt->_eip = tss->ip; ctxt->eflags = tss->flag | 2; *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; /* * SDM says that segment selectors are loaded before segment * descriptors */ set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); cpl = tss->cs & 3; /* * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; } static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; save_state_to_tss16(ctxt, &tss_seg); ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; } return load_state_from_tss16(ctxt, &tss_seg); } static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { /* CR3 and ldt selector are not saved intentionally */ tss->eip = ctxt->_eip; tss->eflags = ctxt->eflags; tss->eax = reg_read(ctxt, VCPU_REGS_RAX); tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); tss->edx = reg_read(ctxt, VCPU_REGS_RDX); tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); tss->esp = reg_read(ctxt, VCPU_REGS_RSP); tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); tss->esi = reg_read(ctxt, VCPU_REGS_RSI); tss->edi = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); } static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { int ret; u8 cpl; if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) return emulate_gp(ctxt, 0); ctxt->_eip = tss->eip; ctxt->eflags = tss->eflags | 2; /* General purpose registers */ *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; /* * SDM says that segment selectors are loaded before segment * descriptors. This is important because CPL checks will * use CS.RPL. */ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); /* * If we're switching between Protected Mode and VM86, we need to make * sure to update the mode before loading the segment descriptors so * that the selectors are interpreted correctly. */ if (ctxt->eflags & X86_EFLAGS_VM) { ctxt->mode = X86EMUL_MODE_VM86; cpl = 3; } else { ctxt->mode = X86EMUL_MODE_PROT32; cpl = tss->cs & 3; } /* * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; } static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_32 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; save_state_to_tss32(ctxt, &tss_seg); /* Only GP registers and segment selectors are saved */ ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; } return load_state_from_tss32(ctxt, &tss_seg); } static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct curr_tss_desc, next_tss_desc; int ret; u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); ulong old_tss_base = ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); u32 desc_limit; ulong desc_addr; /* FIXME: old_tss_base == ~0 ? */ ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; /* FIXME: check that next_tss_desc is tss */ /* * Check privileges. The three cases are task switch caused by... * * 1. jmp/call/int to task gate: Check against DPL of the task gate * 2. Exception/IRQ/iret: No check is performed * 3. jmp/call to TSS: Check against DPL of the TSS */ if (reason == TASK_SWITCH_GATE) { if (idt_index != -1) { /* Software interrupts */ struct desc_struct task_gate_desc; int dpl; ret = read_interrupt_descriptor(ctxt, idt_index, &task_gate_desc); if (ret != X86EMUL_CONTINUE) return ret; dpl = task_gate_desc.dpl; if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) return emulate_gp(ctxt, (idt_index << 3) | 0x2); } } else if (reason != TASK_SWITCH_IRET) { int dpl = next_tss_desc.dpl; if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) return emulate_gp(ctxt, tss_selector); } desc_limit = desc_limit_scaled(&next_tss_desc); if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) { return emulate_ts(ctxt, tss_selector & 0xfffc); } if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); } if (reason == TASK_SWITCH_IRET) ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; /* set back link to prev task only if NT bit is set in eflags note that old_tss_sel is not used after this point */ if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) old_tss_sel = 0xffff; if (next_tss_desc.type & 8) ret = task_switch_32(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); else ret = task_switch_16(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); if (ret != X86EMUL_CONTINUE) return ret; if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; if (reason != TASK_SWITCH_IRET) { next_tss_desc.type |= (1 << 1); /* set busy flag */ write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); } ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); if (has_error_code) { ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; ctxt->lock_prefix = 0; ctxt->src.val = (unsigned long) error_code; ret = em_push(ctxt); } return ret; } int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { int rc; invalidate_registers(ctxt); ctxt->_eip = ctxt->eip; ctxt->dst.type = OP_NONE; rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (rc == X86EMUL_CONTINUE) { ctxt->eip = ctxt->_eip; writeback_registers(ctxt); } return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; } static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, struct operand *op) { int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count; register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes); op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg)); } static int em_das(struct x86_emulate_ctxt *ctxt) { u8 al, old_al; bool af, cf, old_cf; cf = ctxt->eflags & X86_EFLAGS_CF; al = ctxt->dst.val; old_al = al; old_cf = cf; cf = false; af = ctxt->eflags & X86_EFLAGS_AF; if ((al & 0x0f) > 9 || af) { al -= 6; cf = old_cf | (al >= 250); af = true; } else { af = false; } if (old_al > 0x99 || old_cf) { al -= 0x60; cf = true; } ctxt->dst.val = al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); if (cf) ctxt->eflags |= X86_EFLAGS_CF; if (af) ctxt->eflags |= X86_EFLAGS_AF; return X86EMUL_CONTINUE; } static int em_aam(struct x86_emulate_ctxt *ctxt) { u8 al, ah; if (ctxt->src.val == 0) return emulate_de(ctxt); al = ctxt->dst.val & 0xff; ah = al / ctxt->src.val; al %= ctxt->src.val; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_aad(struct x86_emulate_ctxt *ctxt) { u8 al = ctxt->dst.val & 0xff; u8 ah = (ctxt->dst.val >> 8) & 0xff; al = (al + (ah * ctxt->src.val)) & 0xff; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_call(struct x86_emulate_ctxt *ctxt) { int rc; long rel = ctxt->src.val; ctxt->src.val = (unsigned long)ctxt->_eip; rc = jmp_rel(ctxt, rel); if (rc != X86EMUL_CONTINUE) return rc; return em_push(ctxt); } static int em_call_far(struct x86_emulate_ctxt *ctxt) { u16 sel, old_cs; ulong old_eip; int rc; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; int cpl = ctxt->ops->cpl(ctxt); old_eip = ctxt->_eip; ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, &new_desc); if (rc != X86EMUL_CONTINUE) return X86EMUL_CONTINUE; rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_eip; rc = em_push(ctxt); /* If we failed, we tainted the memory, but the very least we should restore cs */ if (rc != X86EMUL_CONTINUE) goto fail; return rc; fail: ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); return rc; } static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_near(ctxt, eip); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_xchg(struct x86_emulate_ctxt *ctxt) { /* Write back the register source. */ ctxt->src.val = ctxt->dst.val; write_register_operand(&ctxt->src); /* Write back the memory destination with implicit LOCK prefix. */ ctxt->dst.val = ctxt->src.orig_val; ctxt->lock_prefix = 1; return X86EMUL_CONTINUE; } static int em_imul_3op(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = ctxt->src2.val; return fastop(ctxt, em_imul); } static int em_cwd(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.bytes = ctxt->src.bytes; ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); return X86EMUL_CONTINUE; } static int em_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 tsc = 0; ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; return X86EMUL_CONTINUE; } static int em_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 pmc; if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; return X86EMUL_CONTINUE; } static int em_mov(struct x86_emulate_ctxt *ctxt) { memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); return X86EMUL_CONTINUE; } #define FFL(x) bit(X86_FEATURE_##x) static int em_movbe(struct x86_emulate_ctxt *ctxt) { u32 ebx, ecx, edx, eax = 1; u16 tmp; /* * Check MOVBE is set in the guest-visible CPUID leaf. */ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); if (!(ecx & FFL(MOVBE))) return emulate_ud(ctxt); switch (ctxt->op_bytes) { case 2: /* * From MOVBE definition: "...When the operand size is 16 bits, * the upper word of the destination register remains unchanged * ..." * * Both casting ->valptr and ->val to u16 breaks strict aliasing * rules so we have to do the operation almost per hand. */ tmp = (u16)ctxt->src.val; ctxt->dst.val &= ~0xffffUL; ctxt->dst.val |= (unsigned long)swab16(tmp); break; case 4: ctxt->dst.val = swab32((u32)ctxt->src.val); break; case 8: ctxt->dst.val = swab64(ctxt->src.val); break; default: BUG(); } return X86EMUL_CONTINUE; } static int em_cr_write(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_dr_write(struct x86_emulate_ctxt *ctxt) { unsigned long val; if (ctxt->mode == X86EMUL_MODE_PROT64) val = ctxt->src.val & ~0ULL; else val = ctxt->src.val & ~0U; /* #UD condition is already handled. */ if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_wrmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_data; msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int em_rdmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_data; if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; return X86EMUL_CONTINUE; } static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) { if (ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); return X86EMUL_CONTINUE; } static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); } static int em_lldt(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); } static int em_ltr(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); } static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; ulong linear; rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_clts(struct x86_emulate_ctxt *ctxt) { ulong cr0; cr0 = ctxt->ops->get_cr(ctxt, 0); cr0 &= ~X86_CR0_TS; ctxt->ops->set_cr(ctxt, 0, cr0); return X86EMUL_CONTINUE; } static int em_vmcall(struct x86_emulate_ctxt *ctxt) { int rc = ctxt->ops->fix_hypercall(ctxt); if (rc != X86EMUL_CONTINUE) return rc; /* Let the processor re-execute the fixed hypercall */ ctxt->_eip = ctxt->eip; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, void (*get)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *ptr)) { struct desc_ptr desc_ptr; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; get(ctxt, &desc_ptr); if (ctxt->op_bytes == 2) { ctxt->op_bytes = 4; desc_ptr.address &= 0x00ffffff; } /* Disable writeback. */ ctxt->dst.type = OP_NONE; return segmented_write(ctxt, ctxt->dst.addr.mem, &desc_ptr, 2 + ctxt->op_bytes); } static int em_sgdt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); } static int em_sidt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); } static int em_lgdt(struct x86_emulate_ctxt *ctxt) { struct desc_ptr desc_ptr; int rc; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; ctxt->ops->set_gdt(ctxt, &desc_ptr); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_vmmcall(struct x86_emulate_ctxt *ctxt) { int rc; rc = ctxt->ops->fix_hypercall(ctxt); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return rc; } static int em_lidt(struct x86_emulate_ctxt *ctxt) { struct desc_ptr desc_ptr; int rc; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; ctxt->ops->set_idt(ctxt, &desc_ptr); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_smsw(struct x86_emulate_ctxt *ctxt) { if (ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); return X86EMUL_CONTINUE; } static int em_lmsw(struct x86_emulate_ctxt *ctxt) { ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) | (ctxt->src.val & 0x0f)); ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_loop(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_jcxz(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_in(struct x86_emulate_ctxt *ctxt) { if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, &ctxt->dst.val)) return X86EMUL_IO_NEEDED; return X86EMUL_CONTINUE; } static int em_out(struct x86_emulate_ctxt *ctxt) { ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, &ctxt->src.val, 1); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_cli(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->eflags &= ~X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_sti(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; ctxt->eflags |= X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_cpuid(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = reg_read(ctxt, VCPU_REGS_RAX); ecx = reg_read(ctxt, VCPU_REGS_RCX); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); *reg_write(ctxt, VCPU_REGS_RAX) = eax; *reg_write(ctxt, VCPU_REGS_RBX) = ebx; *reg_write(ctxt, VCPU_REGS_RCX) = ecx; *reg_write(ctxt, VCPU_REGS_RDX) = edx; return X86EMUL_CONTINUE; } static int em_sahf(struct x86_emulate_ctxt *ctxt) { u32 flags; flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF; flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; ctxt->eflags &= ~0xffUL; ctxt->eflags |= flags | X86_EFLAGS_FIXED; return X86EMUL_CONTINUE; } static int em_lahf(struct x86_emulate_ctxt *ctxt) { *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; return X86EMUL_CONTINUE; } static int em_bswap(struct x86_emulate_ctxt *ctxt) { switch (ctxt->op_bytes) { #ifdef CONFIG_X86_64 case 8: asm("bswap %0" : "+r"(ctxt->dst.val)); break; #endif default: asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); break; } return X86EMUL_CONTINUE; } static bool valid_cr(int nr) { switch (nr) { case 0: case 2 ... 4: case 8: return true; default: return false; } } static int check_cr_read(struct x86_emulate_ctxt *ctxt) { if (!valid_cr(ctxt->modrm_reg)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_cr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int cr = ctxt->modrm_reg; u64 efer = 0; static u64 cr_reserved_bits[] = { 0xffffffff00000000ULL, 0, 0, 0, /* CR3 checked later */ CR4_RESERVED_BITS, 0, 0, 0, CR8_RESERVED_BITS, }; if (!valid_cr(cr)) return emulate_ud(ctxt); if (new_val & cr_reserved_bits[cr]) return emulate_gp(ctxt, 0); switch (cr) { case 0: { u64 cr4; if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) return emulate_gp(ctxt, 0); cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && !(cr4 & X86_CR4_PAE)) return emulate_gp(ctxt, 0); break; } case 3: { u64 rsvd = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) rsvd = CR3_L_MODE_RESERVED_BITS; if (new_val & rsvd) return emulate_gp(ctxt, 0); break; } case 4: { ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) return emulate_gp(ctxt, 0); break; } } return X86EMUL_CONTINUE; } static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) { unsigned long dr7; ctxt->ops->get_dr(ctxt, 7, &dr7); /* Check if DR7.Global_Enable is set */ return dr7 & (1 << 13); } static int check_dr_read(struct x86_emulate_ctxt *ctxt) { int dr = ctxt->modrm_reg; u64 cr4; if (dr > 7) return emulate_ud(ctxt); cr4 = ctxt->ops->get_cr(ctxt, 4); if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) return emulate_ud(ctxt); if (check_dr7_gd(ctxt)) return emulate_db(ctxt); return X86EMUL_CONTINUE; } static int check_dr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int dr = ctxt->modrm_reg; if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) return emulate_gp(ctxt, 0); return check_dr_read(ctxt); } static int check_svme(struct x86_emulate_ctxt *ctxt) { u64 efer; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (!(efer & EFER_SVME)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_svme_pa(struct x86_emulate_ctxt *ctxt) { u64 rax = reg_read(ctxt, VCPU_REGS_RAX); /* Valid physical address? */ if (rax & 0xffff000000000000ULL) return emulate_gp(ctxt, 0); return check_svme(ctxt); } static int check_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || ctxt->ops->check_pmc(ctxt, rcx)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_in(struct x86_emulate_ctxt *ctxt) { ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_out(struct x86_emulate_ctxt *ctxt) { ctxt->src.bytes = min(ctxt->src.bytes, 4u); if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } #define D(_y) { .flags = (_y) } #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define N D(NotImpl) #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } #define II(_f, _e, _i) \ { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } #define IIP(_f, _e, _i, _p) \ { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } #define D2bv(_f) D((_f) | ByteOp), D(_f) #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) #define I2bvIP(_f, _e, _i, _p) \ IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) static const struct opcode group7_rm0[] = { N, I(SrcNone | Priv | EmulateOnUD, em_vmcall), N, N, N, N, N, N, }; static const struct opcode group7_rm1[] = { DI(SrcNone | Priv, monitor), DI(SrcNone | Priv, mwait), N, N, N, N, N, N, }; static const struct opcode group7_rm3[] = { DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall), DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), DIP(SrcNone | Prot | Priv, stgi, check_svme), DIP(SrcNone | Prot | Priv, clgi, check_svme), DIP(SrcNone | Prot | Priv, skinit, check_svme), DIP(SrcNone | Prot | Priv, invlpga, check_svme), }; static const struct opcode group7_rm7[] = { N, DIP(SrcNone, rdtscp, check_rdtsc), N, N, N, N, N, N, }; static const struct opcode group1[] = { F(Lock, em_add), F(Lock | PageTable, em_or), F(Lock, em_adc), F(Lock, em_sbb), F(Lock | PageTable, em_and), F(Lock, em_sub), F(Lock, em_xor), F(NoWrite, em_cmp), }; static const struct opcode group1A[] = { I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N, }; static const struct opcode group2[] = { F(DstMem | ModRM, em_rol), F(DstMem | ModRM, em_ror), F(DstMem | ModRM, em_rcl), F(DstMem | ModRM, em_rcr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_shr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_sar), }; static const struct opcode group3[] = { F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcNone | Lock, em_not), F(DstMem | SrcNone | Lock, em_neg), F(DstXacc | Src2Mem, em_mul_ex), F(DstXacc | Src2Mem, em_imul_ex), F(DstXacc | Src2Mem, em_div_ex), F(DstXacc | Src2Mem, em_idiv_ex), }; static const struct opcode group4[] = { F(ByteOp | DstMem | SrcNone | Lock, em_inc), F(ByteOp | DstMem | SrcNone | Lock, em_dec), N, N, N, N, N, N, }; static const struct opcode group5[] = { F(DstMem | SrcNone | Lock, em_inc), F(DstMem | SrcNone | Lock, em_dec), I(SrcMem | Stack, em_grp45), I(SrcMemFAddr | ImplicitOps | Stack, em_call_far), I(SrcMem | Stack, em_grp45), I(SrcMemFAddr | ImplicitOps, em_grp45), I(SrcMem | Stack, em_grp45), D(Undefined), }; static const struct opcode group6[] = { DI(Prot, sldt), DI(Prot, str), II(Prot | Priv | SrcMem16, em_lldt, lldt), II(Prot | Priv | SrcMem16, em_ltr, ltr), N, N, N, N, }; static const struct group_dual group7 = { { II(Mov | DstMem, em_sgdt, sgdt), II(Mov | DstMem, em_sidt, sidt), II(SrcMem | Priv, em_lgdt, lgdt), II(SrcMem | Priv, em_lidt, lidt), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), }, { EXT(0, group7_rm0), EXT(0, group7_rm1), N, EXT(0, group7_rm3), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7), } }; static const struct opcode group8[] = { N, N, N, N, F(DstMem | SrcImmByte | NoWrite, em_bt), F(DstMem | SrcImmByte | Lock | PageTable, em_bts), F(DstMem | SrcImmByte | Lock, em_btr), F(DstMem | SrcImmByte | Lock | PageTable, em_btc), }; static const struct group_dual group9 = { { N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, }, { N, N, N, N, N, N, N, N, } }; static const struct opcode group11[] = { I(DstMem | SrcImm | Mov | PageTable, em_mov), X7(D(Undefined)), }; static const struct gprefix pfx_0f_6f_0f_7f = { I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), }; static const struct gprefix pfx_0f_2b = { I(0, em_mov), I(0, em_mov), N, N, }; static const struct gprefix pfx_0f_28_0f_29 = { I(Aligned, em_mov), I(Aligned, em_mov), N, N, }; static const struct gprefix pfx_0f_e7 = { N, I(Sse, em_mov), N, N, }; static const struct escape escape_d9 = { { N, N, N, N, N, N, N, I(DstMem, em_fnstcw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_db = { { N, N, N, N, N, N, N, N, }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_dd = { { N, N, N, N, N, N, N, I(DstMem, em_fnstsw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct opcode opcode_table[256] = { /* 0x00 - 0x07 */ F6ALU(Lock, em_add), I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), /* 0x08 - 0x0F */ F6ALU(Lock | PageTable, em_or), I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), N, /* 0x10 - 0x17 */ F6ALU(Lock, em_adc), I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), /* 0x18 - 0x1F */ F6ALU(Lock, em_sbb), I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), /* 0x20 - 0x27 */ F6ALU(Lock | PageTable, em_and), N, N, /* 0x28 - 0x2F */ F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), /* 0x30 - 0x37 */ F6ALU(Lock, em_xor), N, N, /* 0x38 - 0x3F */ F6ALU(NoWrite, em_cmp), N, N, /* 0x40 - 0x4F */ X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), /* 0x50 - 0x57 */ X8(I(SrcReg | Stack, em_push)), /* 0x58 - 0x5F */ X8(I(DstReg | Stack, em_pop)), /* 0x60 - 0x67 */ I(ImplicitOps | Stack | No64, em_pusha), I(ImplicitOps | Stack | No64, em_popa), N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ , N, N, N, N, /* 0x68 - 0x6F */ I(SrcImm | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), I(SrcImmByte | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ /* 0x70 - 0x7F */ X16(D(SrcImmByte)), /* 0x80 - 0x87 */ G(ByteOp | DstMem | SrcImm, group1), G(DstMem | SrcImm, group1), G(ByteOp | DstMem | SrcImm | No64, group1), G(DstMem | SrcImmByte, group1), F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), /* 0x88 - 0x8F */ I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), D(ModRM | SrcMem | NoAccess | DstReg), I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), G(0, group1A), /* 0x90 - 0x97 */ DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), /* 0x98 - 0x9F */ D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), I(SrcImmFAddr | No64, em_call_far), N, II(ImplicitOps | Stack, em_pushf, pushf), II(ImplicitOps | Stack, em_popf, popf), I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), /* 0xA0 - 0xA7 */ I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), I2bv(SrcSI | DstDI | Mov | String, em_mov), F2bv(SrcSI | DstDI | String | NoWrite, em_cmp), /* 0xA8 - 0xAF */ F2bv(DstAcc | SrcImm | NoWrite, em_test), I2bv(SrcAcc | DstDI | Mov | String, em_mov), I2bv(SrcSI | DstAcc | Mov | String, em_mov), F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp), /* 0xB0 - 0xB7 */ X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), /* 0xB8 - 0xBF */ X8(I(DstReg | SrcImm64 | Mov, em_mov)), /* 0xC0 - 0xC7 */ G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm), I(ImplicitOps | Stack, em_ret), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), G(ByteOp, group11), G(0, group11), /* 0xC8 - 0xCF */ I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm), I(ImplicitOps | Stack, em_ret_far), D(ImplicitOps), DI(SrcImmByte, intn), D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), /* 0xD0 - 0xD7 */ G(Src2One | ByteOp, group2), G(Src2One, group2), G(Src2CL | ByteOp, group2), G(Src2CL, group2), I(DstAcc | SrcImmUByte | No64, em_aam), I(DstAcc | SrcImmUByte | No64, em_aad), F(DstAcc | ByteOp | No64, em_salc), I(DstAcc | SrcXLat | ByteOp, em_mov), /* 0xD8 - 0xDF */ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, /* 0xE0 - 0xE7 */ X3(I(SrcImmByte, em_loop)), I(SrcImmByte, em_jcxz), I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), /* 0xE8 - 0xEF */ I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps), I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps), I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), /* 0xF0 - 0xF7 */ N, DI(ImplicitOps, icebp), N, N, DI(ImplicitOps | Priv, hlt), D(ImplicitOps), G(ByteOp, group3), G(0, group3), /* 0xF8 - 0xFF */ D(ImplicitOps), D(ImplicitOps), I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), }; static const struct opcode twobyte_table[256] = { /* 0x00 - 0x0F */ G(0, group6), GD(0, &group7), N, N, N, I(ImplicitOps | EmulateOnUD, em_syscall), II(ImplicitOps | Priv, em_clts, clts), N, DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM), N, N, /* 0x10 - 0x1F */ N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), /* 0x20 - 0x2F */ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, check_cr_write), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, check_dr_write), N, N, N, N, GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b), N, N, N, N, /* 0x30 - 0x3F */ II(ImplicitOps | Priv, em_wrmsr, wrmsr), IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), II(ImplicitOps | Priv, em_rdmsr, rdmsr), IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), I(ImplicitOps | EmulateOnUD, em_sysenter), I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), N, N, N, N, N, N, N, N, N, N, /* 0x40 - 0x4F */ X16(D(DstReg | SrcMem | ModRM)), /* 0x50 - 0x5F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0x60 - 0x6F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x70 - 0x7F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x80 - 0x8F */ X16(D(SrcImm)), /* 0x90 - 0x9F */ X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), /* 0xA0 - 0xA7 */ I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), II(ImplicitOps, em_cpuid, cpuid), F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, /* 0xA8 - 0xAF */ I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), DI(ImplicitOps, rsm), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), D(ModRM), F(DstReg | SrcMem | ModRM, em_imul), /* 0xB0 - 0xB7 */ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xB8 - 0xBF */ N, N, G(BitOp, group8), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xC0 - 0xC7 */ F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), N, D(DstMem | SrcReg | ModRM | Mov), N, N, N, GD(0, &group9), /* 0xC8 - 0xCF */ X8(I(DstReg, em_bswap)), /* 0xD0 - 0xDF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0xE0 - 0xEF */ N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7), N, N, N, N, N, N, N, N, /* 0xF0 - 0xFF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N }; static const struct gprefix three_byte_0f_38_f0 = { I(DstReg | SrcMem | Mov, em_movbe), N, N, N }; static const struct gprefix three_byte_0f_38_f1 = { I(DstMem | SrcReg | Mov, em_movbe), N, N, N }; /* * Insns below are selected by the prefix which indexed by the third opcode * byte. */ static const struct opcode opcode_map_0f_38[256] = { /* 0x00 - 0x7f */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0x80 - 0xef */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0xf0 - 0xf1 */ GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0), GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1), /* 0xf2 - 0xff */ N, N, X4(N), X8(N) }; #undef D #undef N #undef G #undef GD #undef I #undef GP #undef EXT #undef D2bv #undef D2bvIP #undef I2bv #undef I2bvIP #undef I6ALU static unsigned imm_size(struct x86_emulate_ctxt *ctxt) { unsigned size; size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; if (size == 8) size = 4; return size; } static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned size, bool sign_extension) { int rc = X86EMUL_CONTINUE; op->type = OP_IMM; op->bytes = size; op->addr.mem.ea = ctxt->_eip; /* NB. Immediates are sign-extended as necessary. */ switch (op->bytes) { case 1: op->val = insn_fetch(s8, ctxt); break; case 2: op->val = insn_fetch(s16, ctxt); break; case 4: op->val = insn_fetch(s32, ctxt); break; case 8: op->val = insn_fetch(s64, ctxt); break; } if (!sign_extension) { switch (op->bytes) { case 1: op->val &= 0xff; break; case 2: op->val &= 0xffff; break; case 4: op->val &= 0xffffffff; break; } } done: return rc; } static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned d) { int rc = X86EMUL_CONTINUE; switch (d) { case OpReg: decode_register_operand(ctxt, op); break; case OpImmUByte: rc = decode_imm(ctxt, op, 1, false); break; case OpMem: ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; mem_common: *op = ctxt->memop; ctxt->memopp = op; if (ctxt->d & BitOp) fetch_bit_operand(ctxt); op->orig_val = op->val; break; case OpMem64: ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; goto mem_common; case OpAcc: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccLo: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccHi: if (ctxt->d & ByteOp) { op->type = OP_NONE; break; } op->type = OP_REG; op->bytes = ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); op->orig_val = op->val; break; case OpDI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI)); op->addr.mem.seg = VCPU_SREG_ES; op->val = 0; op->count = 1; break; case OpDX: op->type = OP_REG; op->bytes = 2; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); break; case OpCL: op->bytes = 1; op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; break; case OpImmByte: rc = decode_imm(ctxt, op, 1, true); break; case OpOne: op->bytes = 1; op->val = 1; break; case OpImm: rc = decode_imm(ctxt, op, imm_size(ctxt), true); break; case OpImm64: rc = decode_imm(ctxt, op, ctxt->op_bytes, true); break; case OpMem8: ctxt->memop.bytes = 1; if (ctxt->memop.type == OP_REG) { ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, true); fetch_register_operand(&ctxt->memop); } goto mem_common; case OpMem16: ctxt->memop.bytes = 2; goto mem_common; case OpMem32: ctxt->memop.bytes = 4; goto mem_common; case OpImmU16: rc = decode_imm(ctxt, op, 2, false); break; case OpImmU: rc = decode_imm(ctxt, op, imm_size(ctxt), false); break; case OpSI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); op->addr.mem.seg = ctxt->seg_override; op->val = 0; op->count = 1; break; case OpXLat: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, VCPU_REGS_RBX) + (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); op->addr.mem.seg = ctxt->seg_override; op->val = 0; break; case OpImmFAddr: op->type = OP_IMM; op->addr.mem.ea = ctxt->_eip; op->bytes = ctxt->op_bytes + 2; insn_fetch_arr(op->valptr, op->bytes, ctxt); break; case OpMemFAddr: ctxt->memop.bytes = ctxt->op_bytes + 2; goto mem_common; case OpES: op->val = VCPU_SREG_ES; break; case OpCS: op->val = VCPU_SREG_CS; break; case OpSS: op->val = VCPU_SREG_SS; break; case OpDS: op->val = VCPU_SREG_DS; break; case OpFS: op->val = VCPU_SREG_FS; break; case OpGS: op->val = VCPU_SREG_GS; break; case OpImplicit: /* Special instructions do their own operand decoding. */ default: op->type = OP_NONE; /* Disable writeback. */ break; } done: return rc; } int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) { int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; ctxt->_eip = ctxt->eip; ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { rc = __do_insn_fetch_bytes(ctxt, 1); if (rc != X86EMUL_CONTINUE) return rc; } switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; case X86EMUL_MODE_PROT32: def_op_bytes = def_ad_bytes = 4; break; #ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: def_op_bytes = 4; def_ad_bytes = 8; break; #endif default: return EMULATION_FAILED; } ctxt->op_bytes = def_op_bytes; ctxt->ad_bytes = def_ad_bytes; /* Legacy prefixes. */ for (;;) { switch (ctxt->b = insn_fetch(u8, ctxt)) { case 0x66: /* operand-size override */ op_prefix = true; /* switch between 2/4 bytes */ ctxt->op_bytes = def_op_bytes ^ 6; break; case 0x67: /* address-size override */ if (mode == X86EMUL_MODE_PROT64) /* switch between 4/8 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 12; else /* switch between 2/4 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: /* ES override */ case 0x2e: /* CS override */ case 0x36: /* SS override */ case 0x3e: /* DS override */ has_seg_override = true; ctxt->seg_override = (ctxt->b >> 3) & 3; break; case 0x64: /* FS override */ case 0x65: /* GS override */ has_seg_override = true; ctxt->seg_override = ctxt->b & 7; break; case 0x40 ... 0x4f: /* REX */ if (mode != X86EMUL_MODE_PROT64) goto done_prefixes; ctxt->rex_prefix = ctxt->b; continue; case 0xf0: /* LOCK */ ctxt->lock_prefix = 1; break; case 0xf2: /* REPNE/REPNZ */ case 0xf3: /* REP/REPE/REPZ */ ctxt->rep_prefix = ctxt->b; break; default: goto done_prefixes; } /* Any legacy prefix after a REX prefix nullifies its effect. */ ctxt->rex_prefix = 0; } done_prefixes: /* REX prefix. */ if (ctxt->rex_prefix & 8) ctxt->op_bytes = 8; /* REX.W */ /* Opcode byte(s). */ opcode = opcode_table[ctxt->b]; /* Two-byte opcode? */ if (ctxt->b == 0x0f) { ctxt->opcode_len = 2; ctxt->b = insn_fetch(u8, ctxt); opcode = twobyte_table[ctxt->b]; /* 0F_38 opcode map */ if (ctxt->b == 0x38) { ctxt->opcode_len = 3; ctxt->b = insn_fetch(u8, ctxt); opcode = opcode_map_0f_38[ctxt->b]; } } ctxt->d = opcode.flags; if (ctxt->d & ModRM) ctxt->modrm = insn_fetch(u8, ctxt); /* vex-prefix instructions are not implemented */ if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && (mode == X86EMUL_MODE_PROT64 || (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) { ctxt->d = NotImpl; } while (ctxt->d & GroupMask) { switch (ctxt->d & GroupMask) { case Group: goffset = (ctxt->modrm >> 3) & 7; opcode = opcode.u.group[goffset]; break; case GroupDual: goffset = (ctxt->modrm >> 3) & 7; if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.gdual->mod3[goffset]; else opcode = opcode.u.gdual->mod012[goffset]; break; case RMExt: goffset = ctxt->modrm & 7; opcode = opcode.u.group[goffset]; break; case Prefix: if (ctxt->rep_prefix && op_prefix) return EMULATION_FAILED; simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; switch (simd_prefix) { case 0x00: opcode = opcode.u.gprefix->pfx_no; break; case 0x66: opcode = opcode.u.gprefix->pfx_66; break; case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; } break; case Escape: if (ctxt->modrm > 0xbf) opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; else opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; break; default: return EMULATION_FAILED; } ctxt->d &= ~(u64)GroupMask; ctxt->d |= opcode.flags; } /* Unrecognised? */ if (ctxt->d == 0) return EMULATION_FAILED; ctxt->execute = opcode.u.execute; if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; if (unlikely(ctxt->d & (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { /* * These are copied unconditionally here, and checked unconditionally * in x86_emulate_insn. */ ctxt->check_perm = opcode.check_perm; ctxt->intercept = opcode.intercept; if (ctxt->d & NotImpl) return EMULATION_FAILED; if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) ctxt->op_bytes = 8; if (ctxt->d & Op3264) { if (mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; else ctxt->op_bytes = 4; } if (ctxt->d & Sse) ctxt->op_bytes = 16; else if (ctxt->d & Mmx) ctxt->op_bytes = 8; } /* ModRM and SIB bytes. */ if (ctxt->d & ModRM) { rc = decode_modrm(ctxt, &ctxt->memop); if (!has_seg_override) { has_seg_override = true; ctxt->seg_override = ctxt->modrm_seg; } } else if (ctxt->d & MemAbs) rc = decode_abs(ctxt, &ctxt->memop); if (rc != X86EMUL_CONTINUE) goto done; if (!has_seg_override) ctxt->seg_override = VCPU_SREG_DS; ctxt->memop.addr.mem.seg = ctxt->seg_override; /* * Decode and fetch the source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* * Decode and fetch the second source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* Decode and fetch the destination operand: register or memory. */ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); done: if (ctxt->rip_relative) ctxt->memopp->addr.mem.ea += ctxt->_eip; return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; } bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) { return ctxt->d & PageTable; } static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) { /* The second termination condition only applies for REPE * and REPNE. Test if the repeat string operation prefix is * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the * corresponding termination condition according to: * - if REPE/REPZ and ZF = 0 then done * - if REPNE/REPNZ and ZF = 1 then done */ if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || (ctxt->b == 0xae) || (ctxt->b == 0xaf)) && (((ctxt->rep_prefix == REPE_PREFIX) && ((ctxt->eflags & EFLG_ZF) == 0)) || ((ctxt->rep_prefix == REPNE_PREFIX) && ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) return true; return false; } static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) { bool fault = false; ctxt->ops->get_fpu(ctxt); asm volatile("1: fwait \n\t" "2: \n\t" ".pushsection .fixup,\"ax\" \n\t" "3: \n\t" "movb $1, %[fault] \n\t" "jmp 2b \n\t" ".popsection \n\t" _ASM_EXTABLE(1b, 3b) : [fault]"+qm"(fault)); ctxt->ops->put_fpu(ctxt); if (unlikely(fault)) return emulate_exception(ctxt, MF_VECTOR, 0, false); return X86EMUL_CONTINUE; } static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { if (op->type == OP_MM) read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); } static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) { ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; if (!(ctxt->d & ByteOp)) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), [fastop]"+S"(fop) : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); if (!fop) /* exception is returned in fop variable */ return emulate_de(ctxt); return X86EMUL_CONTINUE; } void init_decode_cache(struct x86_emulate_ctxt *ctxt) { memset(&ctxt->rip_relative, 0, (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); ctxt->io_read.pos = 0; ctxt->io_read.end = 0; ctxt->mem_read.end = 0; } int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; ctxt->mem_read.pos = 0; /* LOCK prefix is allowed only with some instructions */ if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { rc = emulate_ud(ctxt); goto done; } if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || (ctxt->d & Undefined)) { rc = emulate_ud(ctxt); goto done; } if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { rc = emulate_nm(ctxt); goto done; } if (ctxt->d & Mmx) { rc = flush_pending_x87_faults(ctxt); if (rc != X86EMUL_CONTINUE) goto done; /* * Now that we know the fpu is exception safe, we can fetch * operands from it. */ fetch_possible_mmx_operand(ctxt, &ctxt->src); fetch_possible_mmx_operand(ctxt, &ctxt->src2); if (!(ctxt->d & Mov)) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } /* Privileged instruction can be executed only in CPL=0 */ if ((ctxt->d & Priv) && ops->cpl(ctxt)) { if (ctxt->d & PrivUD) rc = emulate_ud(ctxt); else rc = emulate_gp(ctxt, 0); goto done; } /* Instruction can only be executed in protected mode */ if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { rc = emulate_ud(ctxt); goto done; } /* Do instruction specific permission checks */ if (ctxt->d & CheckPerm) { rc = ctxt->check_perm(ctxt); if (rc != X86EMUL_CONTINUE) goto done; } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) { /* All REP prefixes have the same first termination condition */ if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { ctxt->eip = ctxt->_eip; ctxt->eflags &= ~EFLG_RF; goto done; } } } if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { rc = segmented_read(ctxt, ctxt->src.addr.mem, ctxt->src.valptr, ctxt->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; ctxt->src.orig_val64 = ctxt->src.val64; } if (ctxt->src2.type == OP_MEM) { rc = segmented_read(ctxt, ctxt->src2.addr.mem, &ctxt->src2.val, ctxt->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; } if ((ctxt->d & DstMask) == ImplicitOps) goto special_insn; if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { /* optimisation - avoid slow emulated read if Mov */ rc = segmented_read(ctxt, ctxt->dst.addr.mem, &ctxt->dst.val, ctxt->dst.bytes); if (rc != X86EMUL_CONTINUE) goto done; } ctxt->dst.orig_val = ctxt->dst.val; special_insn: if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) ctxt->eflags |= EFLG_RF; else ctxt->eflags &= ~EFLG_RF; if (ctxt->execute) { if (ctxt->d & Fastop) { void (*fop)(struct fastop *) = (void *)ctxt->execute; rc = fastop(ctxt, fop); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } if (ctxt->opcode_len == 2) goto twobyte_insn; else if (ctxt->opcode_len == 3) goto threebyte_insn; switch (ctxt->b) { case 0x63: /* movsxd */ if (ctxt->mode != X86EMUL_MODE_PROT64) goto cannot_emulate; ctxt->dst.val = (s32) ctxt->src.val; break; case 0x70 ... 0x7f: /* jcc (short) */ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: /* lea r16/r32, m */ ctxt->dst.val = ctxt->src.addr.mem.ea; break; case 0x90 ... 0x97: /* nop / xchg reg, rax */ if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) ctxt->dst.type = OP_NONE; else rc = em_xchg(ctxt); break; case 0x98: /* cbw/cwde/cdqe */ switch (ctxt->op_bytes) { case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; } break; case 0xcc: /* int3 */ rc = emulate_int(ctxt, 3); break; case 0xcd: /* int n */ rc = emulate_int(ctxt, ctxt->src.val); break; case 0xce: /* into */ if (ctxt->eflags & EFLG_OF) rc = emulate_int(ctxt, 4); break; case 0xe9: /* jmp rel */ case 0xeb: /* jmp rel short */ rc = jmp_rel(ctxt, ctxt->src.val); ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xf4: /* hlt */ ctxt->ops->halt(ctxt); break; case 0xf5: /* cmc */ /* complement carry flag from eflags reg */ ctxt->eflags ^= EFLG_CF; break; case 0xf8: /* clc */ ctxt->eflags &= ~EFLG_CF; break; case 0xf9: /* stc */ ctxt->eflags |= EFLG_CF; break; case 0xfc: /* cld */ ctxt->eflags &= ~EFLG_DF; break; case 0xfd: /* std */ ctxt->eflags |= EFLG_DF; break; default: goto cannot_emulate; } if (rc != X86EMUL_CONTINUE) goto done; writeback: if (ctxt->d & SrcWrite) { BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); rc = writeback(ctxt, &ctxt->src); if (rc != X86EMUL_CONTINUE) goto done; } if (!(ctxt->d & NoWrite)) { rc = writeback(ctxt, &ctxt->dst); if (rc != X86EMUL_CONTINUE) goto done; } /* * restore dst type in case the decoding will be reused * (happens for string instruction ) */ ctxt->dst.type = saved_dst_type; if ((ctxt->d & SrcMask) == SrcSI) string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); if ((ctxt->d & DstMask) == DstDI) string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); if (ctxt->rep_prefix && (ctxt->d & String)) { unsigned int count; struct read_cache *r = &ctxt->io_read; if ((ctxt->d & SrcMask) == SrcSI) count = ctxt->src.count; else count = ctxt->dst.count; register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -count); if (!string_insn_completed(ctxt)) { /* * Re-enter guest when pio read ahead buffer is empty * or, if it is not used, after each 1024 iteration. */ if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && (r->end == 0 || r->end != r->pos)) { /* * Reset read cache. Usually happens before * decode, but since instruction is restarted * we have to do it here. */ ctxt->mem_read.end = 0; writeback_registers(ctxt); return EMULATION_RESTART; } goto done; /* skip rip writeback */ } ctxt->eflags &= ~EFLG_RF; } ctxt->eip = ctxt->_eip; done: if (rc == X86EMUL_PROPAGATE_FAULT) { WARN_ON(ctxt->exception.vector > 0x1f); ctxt->have_exception = true; } if (rc == X86EMUL_INTERCEPTED) return EMULATION_INTERCEPTED; if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; twobyte_insn: switch (ctxt->b) { case 0x09: /* wbinvd */ (ctxt->ops->wbinvd)(ctxt); break; case 0x08: /* invd */ case 0x0d: /* GrpP (prefetch) */ case 0x18: /* Grp16 (prefetch/nop) */ case 0x1f: /* nop */ break; case 0x20: /* mov cr, reg */ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); break; case 0x21: /* mov from dr to reg */ ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); break; case 0x40 ... 0x4f: /* cmov */ if (test_cc(ctxt->b, ctxt->eflags)) ctxt->dst.val = ctxt->src.val; else if (ctxt->mode != X86EMUL_MODE_PROT64 || ctxt->op_bytes != 4) ctxt->dst.type = OP_NONE; /* no writeback */ break; case 0x80 ... 0x8f: /* jnz rel, etc*/ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: /* setcc r/m8 */ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; case 0xae: /* clflush */ break; case 0xb6 ... 0xb7: /* movzx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val : (u16) ctxt->src.val; break; case 0xbe ... 0xbf: /* movsx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : (s16) ctxt->src.val; break; case 0xc3: /* movnti */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val : (u32) ctxt->src.val; break; default: goto cannot_emulate; } threebyte_insn: if (rc != X86EMUL_CONTINUE) goto done; goto writeback; cannot_emulate: return EMULATION_FAILED; } void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) { invalidate_registers(ctxt); } void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) { writeback_registers(ctxt); }
./CrossVul/dataset_final_sorted/CWE-264/c/good_2165_0
crossvul-cpp_data_bad_2287_0
/* * linux/fs/block_dev.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE */ #include <linux/init.h> #include <linux/mm.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/major.h> #include <linux/device_cgroup.h> #include <linux/highmem.h> #include <linux/blkdev.h> #include <linux/module.h> #include <linux/blkpg.h> #include <linux/magic.h> #include <linux/buffer_head.h> #include <linux/swap.h> #include <linux/pagevec.h> #include <linux/writeback.h> #include <linux/mpage.h> #include <linux/mount.h> #include <linux/uio.h> #include <linux/namei.h> #include <linux/log2.h> #include <linux/cleancache.h> #include <linux/aio.h> #include <asm/uaccess.h> #include "internal.h" struct bdev_inode { struct block_device bdev; struct inode vfs_inode; }; static const struct address_space_operations def_blk_aops; static inline struct bdev_inode *BDEV_I(struct inode *inode) { return container_of(inode, struct bdev_inode, vfs_inode); } inline struct block_device *I_BDEV(struct inode *inode) { return &BDEV_I(inode)->bdev; } EXPORT_SYMBOL(I_BDEV); /* * Move the inode from its current bdi to a new bdi. If the inode is dirty we * need to move it onto the dirty list of @dst so that the inode is always on * the right list. */ static void bdev_inode_switch_bdi(struct inode *inode, struct backing_dev_info *dst) { struct backing_dev_info *old = inode->i_data.backing_dev_info; bool wakeup_bdi = false; if (unlikely(dst == old)) /* deadlock avoidance */ return; bdi_lock_two(&old->wb, &dst->wb); spin_lock(&inode->i_lock); inode->i_data.backing_dev_info = dst; if (inode->i_state & I_DIRTY) { if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb)) wakeup_bdi = true; list_move(&inode->i_wb_list, &dst->wb.b_dirty); } spin_unlock(&inode->i_lock); spin_unlock(&old->wb.list_lock); spin_unlock(&dst->wb.list_lock); if (wakeup_bdi) bdi_wakeup_thread_delayed(dst); } /* Kill _all_ buffers and pagecache , dirty or not.. */ void kill_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_inode->i_mapping; if (mapping->nrpages == 0 && mapping->nrshadows == 0) return; invalidate_bh_lrus(); truncate_inode_pages(mapping, 0); } EXPORT_SYMBOL(kill_bdev); /* Invalidate clean unused buffers and pagecache. */ void invalidate_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_inode->i_mapping; if (mapping->nrpages == 0) return; invalidate_bh_lrus(); lru_add_drain_all(); /* make sure all lru add caches are flushed */ invalidate_mapping_pages(mapping, 0, -1); /* 99% of the time, we don't need to flush the cleancache on the bdev. * But, for the strange corners, lets be cautious */ cleancache_invalidate_inode(mapping); } EXPORT_SYMBOL(invalidate_bdev); int set_blocksize(struct block_device *bdev, int size) { /* Size must be a power of two, and between 512 and PAGE_SIZE */ if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) return -EINVAL; /* Size cannot be smaller than the size supported by the device */ if (size < bdev_logical_block_size(bdev)) return -EINVAL; /* Don't change the size if it is same as current */ if (bdev->bd_block_size != size) { sync_blockdev(bdev); bdev->bd_block_size = size; bdev->bd_inode->i_blkbits = blksize_bits(size); kill_bdev(bdev); } return 0; } EXPORT_SYMBOL(set_blocksize); int sb_set_blocksize(struct super_block *sb, int size) { if (set_blocksize(sb->s_bdev, size)) return 0; /* If we get here, we know size is power of two * and it's value is between 512 and PAGE_SIZE */ sb->s_blocksize = size; sb->s_blocksize_bits = blksize_bits(size); return sb->s_blocksize; } EXPORT_SYMBOL(sb_set_blocksize); int sb_min_blocksize(struct super_block *sb, int size) { int minsize = bdev_logical_block_size(sb->s_bdev); if (size < minsize) size = minsize; return sb_set_blocksize(sb, size); } EXPORT_SYMBOL(sb_min_blocksize); static int blkdev_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { bh->b_bdev = I_BDEV(inode); bh->b_blocknr = iblock; set_buffer_mapped(bh); return 0; } static ssize_t blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter, offset, blkdev_get_block, NULL, NULL, 0); } int __sync_blockdev(struct block_device *bdev, int wait) { if (!bdev) return 0; if (!wait) return filemap_flush(bdev->bd_inode->i_mapping); return filemap_write_and_wait(bdev->bd_inode->i_mapping); } /* * Write out and wait upon all the dirty data associated with a block * device via its mapping. Does not take the superblock lock. */ int sync_blockdev(struct block_device *bdev) { return __sync_blockdev(bdev, 1); } EXPORT_SYMBOL(sync_blockdev); /* * Write out and wait upon all dirty data associated with this * device. Filesystem data as well as the underlying block * device. Takes the superblock lock. */ int fsync_bdev(struct block_device *bdev) { struct super_block *sb = get_super(bdev); if (sb) { int res = sync_filesystem(sb); drop_super(sb); return res; } return sync_blockdev(bdev); } EXPORT_SYMBOL(fsync_bdev); /** * freeze_bdev -- lock a filesystem and force it into a consistent state * @bdev: blockdevice to lock * * If a superblock is found on this device, we take the s_umount semaphore * on it to make sure nobody unmounts until the snapshot creation is done. * The reference counter (bd_fsfreeze_count) guarantees that only the last * unfreeze process can unfreeze the frozen filesystem actually when multiple * freeze requests arrive simultaneously. It counts up in freeze_bdev() and * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze * actually. */ struct super_block *freeze_bdev(struct block_device *bdev) { struct super_block *sb; int error = 0; mutex_lock(&bdev->bd_fsfreeze_mutex); if (++bdev->bd_fsfreeze_count > 1) { /* * We don't even need to grab a reference - the first call * to freeze_bdev grab an active reference and only the last * thaw_bdev drops it. */ sb = get_super(bdev); drop_super(sb); mutex_unlock(&bdev->bd_fsfreeze_mutex); return sb; } sb = get_active_super(bdev); if (!sb) goto out; error = freeze_super(sb); if (error) { deactivate_super(sb); bdev->bd_fsfreeze_count--; mutex_unlock(&bdev->bd_fsfreeze_mutex); return ERR_PTR(error); } deactivate_super(sb); out: sync_blockdev(bdev); mutex_unlock(&bdev->bd_fsfreeze_mutex); return sb; /* thaw_bdev releases s->s_umount */ } EXPORT_SYMBOL(freeze_bdev); /** * thaw_bdev -- unlock filesystem * @bdev: blockdevice to unlock * @sb: associated superblock * * Unlocks the filesystem and marks it writeable again after freeze_bdev(). */ int thaw_bdev(struct block_device *bdev, struct super_block *sb) { int error = -EINVAL; mutex_lock(&bdev->bd_fsfreeze_mutex); if (!bdev->bd_fsfreeze_count) goto out; error = 0; if (--bdev->bd_fsfreeze_count > 0) goto out; if (!sb) goto out; error = thaw_super(sb); if (error) { bdev->bd_fsfreeze_count++; mutex_unlock(&bdev->bd_fsfreeze_mutex); return error; } out: mutex_unlock(&bdev->bd_fsfreeze_mutex); return 0; } EXPORT_SYMBOL(thaw_bdev); static int blkdev_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, blkdev_get_block, wbc); } static int blkdev_readpage(struct file * file, struct page * page) { return block_read_full_page(page, blkdev_get_block); } static int blkdev_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { return block_write_begin(mapping, pos, len, flags, pagep, blkdev_get_block); } static int blkdev_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int ret; ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); unlock_page(page); page_cache_release(page); return ret; } /* * private llseek: * for a block special file file_inode(file)->i_size is zero * so we compute the size by hand (just as in block_read/write above) */ static loff_t block_llseek(struct file *file, loff_t offset, int whence) { struct inode *bd_inode = file->f_mapping->host; loff_t retval; mutex_lock(&bd_inode->i_mutex); retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); mutex_unlock(&bd_inode->i_mutex); return retval; } int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *bd_inode = filp->f_mapping->host; struct block_device *bdev = I_BDEV(bd_inode); int error; error = filemap_write_and_wait_range(filp->f_mapping, start, end); if (error) return error; /* * There is no need to serialise calls to blkdev_issue_flush with * i_mutex and doing so causes performance issues with concurrent * O_SYNC writers to a block device. */ error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL); if (error == -EOPNOTSUPP) error = 0; return error; } EXPORT_SYMBOL(blkdev_fsync); /* * pseudo-fs */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); static struct kmem_cache * bdev_cachep __read_mostly; static struct inode *bdev_alloc_inode(struct super_block *sb) { struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void bdev_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct bdev_inode *bdi = BDEV_I(inode); kmem_cache_free(bdev_cachep, bdi); } static void bdev_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, bdev_i_callback); } static void init_once(void *foo) { struct bdev_inode *ei = (struct bdev_inode *) foo; struct block_device *bdev = &ei->bdev; memset(bdev, 0, sizeof(*bdev)); mutex_init(&bdev->bd_mutex); INIT_LIST_HEAD(&bdev->bd_inodes); INIT_LIST_HEAD(&bdev->bd_list); #ifdef CONFIG_SYSFS INIT_LIST_HEAD(&bdev->bd_holder_disks); #endif inode_init_once(&ei->vfs_inode); /* Initialize mutex for freeze. */ mutex_init(&bdev->bd_fsfreeze_mutex); } static inline void __bd_forget(struct inode *inode) { list_del_init(&inode->i_devices); inode->i_bdev = NULL; inode->i_mapping = &inode->i_data; } static void bdev_evict_inode(struct inode *inode) { struct block_device *bdev = &BDEV_I(inode)->bdev; struct list_head *p; truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); /* is it needed here? */ clear_inode(inode); spin_lock(&bdev_lock); while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) { __bd_forget(list_entry(p, struct inode, i_devices)); } list_del_init(&bdev->bd_list); spin_unlock(&bdev_lock); } static const struct super_operations bdev_sops = { .statfs = simple_statfs, .alloc_inode = bdev_alloc_inode, .destroy_inode = bdev_destroy_inode, .drop_inode = generic_delete_inode, .evict_inode = bdev_evict_inode, }; static struct dentry *bd_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC); } static struct file_system_type bd_type = { .name = "bdev", .mount = bd_mount, .kill_sb = kill_anon_super, }; static struct super_block *blockdev_superblock __read_mostly; void __init bdev_cache_init(void) { int err; static struct vfsmount *bd_mnt; bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_PANIC), init_once); err = register_filesystem(&bd_type); if (err) panic("Cannot register bdev pseudo-fs"); bd_mnt = kern_mount(&bd_type); if (IS_ERR(bd_mnt)) panic("Cannot create bdev pseudo-fs"); blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ } /* * Most likely _very_ bad one - but then it's hardly critical for small * /dev and can be fixed when somebody will need really large one. * Keep in mind that it will be fed through icache hash function too. */ static inline unsigned long hash(dev_t dev) { return MAJOR(dev)+MINOR(dev); } static int bdev_test(struct inode *inode, void *data) { return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; } static int bdev_set(struct inode *inode, void *data) { BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; return 0; } static LIST_HEAD(all_bdevs); struct block_device *bdget(dev_t dev) { struct block_device *bdev; struct inode *inode; inode = iget5_locked(blockdev_superblock, hash(dev), bdev_test, bdev_set, &dev); if (!inode) return NULL; bdev = &BDEV_I(inode)->bdev; if (inode->i_state & I_NEW) { bdev->bd_contains = NULL; bdev->bd_super = NULL; bdev->bd_inode = inode; bdev->bd_block_size = (1 << inode->i_blkbits); bdev->bd_part_count = 0; bdev->bd_invalidated = 0; inode->i_mode = S_IFBLK; inode->i_rdev = dev; inode->i_bdev = bdev; inode->i_data.a_ops = &def_blk_aops; mapping_set_gfp_mask(&inode->i_data, GFP_USER); inode->i_data.backing_dev_info = &default_backing_dev_info; spin_lock(&bdev_lock); list_add(&bdev->bd_list, &all_bdevs); spin_unlock(&bdev_lock); unlock_new_inode(inode); } return bdev; } EXPORT_SYMBOL(bdget); /** * bdgrab -- Grab a reference to an already referenced block device * @bdev: Block device to grab a reference to. */ struct block_device *bdgrab(struct block_device *bdev) { ihold(bdev->bd_inode); return bdev; } EXPORT_SYMBOL(bdgrab); long nr_blockdev_pages(void) { struct block_device *bdev; long ret = 0; spin_lock(&bdev_lock); list_for_each_entry(bdev, &all_bdevs, bd_list) { ret += bdev->bd_inode->i_mapping->nrpages; } spin_unlock(&bdev_lock); return ret; } void bdput(struct block_device *bdev) { iput(bdev->bd_inode); } EXPORT_SYMBOL(bdput); static struct block_device *bd_acquire(struct inode *inode) { struct block_device *bdev; spin_lock(&bdev_lock); bdev = inode->i_bdev; if (bdev) { ihold(bdev->bd_inode); spin_unlock(&bdev_lock); return bdev; } spin_unlock(&bdev_lock); bdev = bdget(inode->i_rdev); if (bdev) { spin_lock(&bdev_lock); if (!inode->i_bdev) { /* * We take an additional reference to bd_inode, * and it's released in clear_inode() of inode. * So, we can access it via ->i_mapping always * without igrab(). */ ihold(bdev->bd_inode); inode->i_bdev = bdev; inode->i_mapping = bdev->bd_inode->i_mapping; list_add(&inode->i_devices, &bdev->bd_inodes); } spin_unlock(&bdev_lock); } return bdev; } int sb_is_blkdev_sb(struct super_block *sb) { return sb == blockdev_superblock; } /* Call when you free inode */ void bd_forget(struct inode *inode) { struct block_device *bdev = NULL; spin_lock(&bdev_lock); if (!sb_is_blkdev_sb(inode->i_sb)) bdev = inode->i_bdev; __bd_forget(inode); spin_unlock(&bdev_lock); if (bdev) iput(bdev->bd_inode); } /** * bd_may_claim - test whether a block device can be claimed * @bdev: block device of interest * @whole: whole block device containing @bdev, may equal @bdev * @holder: holder trying to claim @bdev * * Test whether @bdev can be claimed by @holder. * * CONTEXT: * spin_lock(&bdev_lock). * * RETURNS: * %true if @bdev can be claimed, %false otherwise. */ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, void *holder) { if (bdev->bd_holder == holder) return true; /* already a holder */ else if (bdev->bd_holder != NULL) return false; /* held by someone else */ else if (bdev->bd_contains == bdev) return true; /* is a whole device which isn't held */ else if (whole->bd_holder == bd_may_claim) return true; /* is a partition of a device that is being partitioned */ else if (whole->bd_holder != NULL) return false; /* is a partition of a held device */ else return true; /* is a partition of an un-held device */ } /** * bd_prepare_to_claim - prepare to claim a block device * @bdev: block device of interest * @whole: the whole device containing @bdev, may equal @bdev * @holder: holder trying to claim @bdev * * Prepare to claim @bdev. This function fails if @bdev is already * claimed by another holder and waits if another claiming is in * progress. This function doesn't actually claim. On successful * return, the caller has ownership of bd_claiming and bd_holder[s]. * * CONTEXT: * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab * it multiple times. * * RETURNS: * 0 if @bdev can be claimed, -EBUSY otherwise. */ static int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole, void *holder) { retry: /* if someone else claimed, fail */ if (!bd_may_claim(bdev, whole, holder)) return -EBUSY; /* if claiming is already in progress, wait for it to finish */ if (whole->bd_claiming) { wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); DEFINE_WAIT(wait); prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); spin_unlock(&bdev_lock); schedule(); finish_wait(wq, &wait); spin_lock(&bdev_lock); goto retry; } /* yay, all mine */ return 0; } /** * bd_start_claiming - start claiming a block device * @bdev: block device of interest * @holder: holder trying to claim @bdev * * @bdev is about to be opened exclusively. Check @bdev can be opened * exclusively and mark that an exclusive open is in progress. Each * successful call to this function must be matched with a call to * either bd_finish_claiming() or bd_abort_claiming() (which do not * fail). * * This function is used to gain exclusive access to the block device * without actually causing other exclusive open attempts to fail. It * should be used when the open sequence itself requires exclusive * access but may subsequently fail. * * CONTEXT: * Might sleep. * * RETURNS: * Pointer to the block device containing @bdev on success, ERR_PTR() * value on failure. */ static struct block_device *bd_start_claiming(struct block_device *bdev, void *holder) { struct gendisk *disk; struct block_device *whole; int partno, err; might_sleep(); /* * @bdev might not have been initialized properly yet, look up * and grab the outer block device the hard way. */ disk = get_gendisk(bdev->bd_dev, &partno); if (!disk) return ERR_PTR(-ENXIO); /* * Normally, @bdev should equal what's returned from bdget_disk() * if partno is 0; however, some drivers (floppy) use multiple * bdev's for the same physical device and @bdev may be one of the * aliases. Keep @bdev if partno is 0. This means claimer * tracking is broken for those devices but it has always been that * way. */ if (partno) whole = bdget_disk(disk, 0); else whole = bdgrab(bdev); module_put(disk->fops->owner); put_disk(disk); if (!whole) return ERR_PTR(-ENOMEM); /* prepare to claim, if successful, mark claiming in progress */ spin_lock(&bdev_lock); err = bd_prepare_to_claim(bdev, whole, holder); if (err == 0) { whole->bd_claiming = holder; spin_unlock(&bdev_lock); return whole; } else { spin_unlock(&bdev_lock); bdput(whole); return ERR_PTR(err); } } #ifdef CONFIG_SYSFS struct bd_holder_disk { struct list_head list; struct gendisk *disk; int refcnt; }; static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev, struct gendisk *disk) { struct bd_holder_disk *holder; list_for_each_entry(holder, &bdev->bd_holder_disks, list) if (holder->disk == disk) return holder; return NULL; } static int add_symlink(struct kobject *from, struct kobject *to) { return sysfs_create_link(from, to, kobject_name(to)); } static void del_symlink(struct kobject *from, struct kobject *to) { sysfs_remove_link(from, kobject_name(to)); } /** * bd_link_disk_holder - create symlinks between holding disk and slave bdev * @bdev: the claimed slave bdev * @disk: the holding disk * * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT. * * This functions creates the following sysfs symlinks. * * - from "slaves" directory of the holder @disk to the claimed @bdev * - from "holders" directory of the @bdev to the holder @disk * * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is * passed to bd_link_disk_holder(), then: * * /sys/block/dm-0/slaves/sda --> /sys/block/sda * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 * * The caller must have claimed @bdev before calling this function and * ensure that both @bdev and @disk are valid during the creation and * lifetime of these symlinks. * * CONTEXT: * Might sleep. * * RETURNS: * 0 on success, -errno on failure. */ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) { struct bd_holder_disk *holder; int ret = 0; mutex_lock(&bdev->bd_mutex); WARN_ON_ONCE(!bdev->bd_holder); /* FIXME: remove the following once add_disk() handles errors */ if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) goto out_unlock; holder = bd_find_holder_disk(bdev, disk); if (holder) { holder->refcnt++; goto out_unlock; } holder = kzalloc(sizeof(*holder), GFP_KERNEL); if (!holder) { ret = -ENOMEM; goto out_unlock; } INIT_LIST_HEAD(&holder->list); holder->disk = disk; holder->refcnt = 1; ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); if (ret) goto out_free; ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); if (ret) goto out_del; /* * bdev could be deleted beneath us which would implicitly destroy * the holder directory. Hold on to it. */ kobject_get(bdev->bd_part->holder_dir); list_add(&holder->list, &bdev->bd_holder_disks); goto out_unlock; out_del: del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); out_free: kfree(holder); out_unlock: mutex_unlock(&bdev->bd_mutex); return ret; } EXPORT_SYMBOL_GPL(bd_link_disk_holder); /** * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder() * @bdev: the calimed slave bdev * @disk: the holding disk * * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT. * * CONTEXT: * Might sleep. */ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) { struct bd_holder_disk *holder; mutex_lock(&bdev->bd_mutex); holder = bd_find_holder_disk(bdev, disk); if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) { del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); kobject_put(bdev->bd_part->holder_dir); list_del_init(&holder->list); kfree(holder); } mutex_unlock(&bdev->bd_mutex); } EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); #endif /** * flush_disk - invalidates all buffer-cache entries on a disk * * @bdev: struct block device to be flushed * @kill_dirty: flag to guide handling of dirty inodes * * Invalidates all buffer-cache entries on a disk. It should be called * when a disk has been changed -- either by a media change or online * resize. */ static void flush_disk(struct block_device *bdev, bool kill_dirty) { if (__invalidate_device(bdev, kill_dirty)) { char name[BDEVNAME_SIZE] = ""; if (bdev->bd_disk) disk_name(bdev->bd_disk, 0, name); printk(KERN_WARNING "VFS: busy inodes on changed media or " "resized disk %s\n", name); } if (!bdev->bd_disk) return; if (disk_part_scan_enabled(bdev->bd_disk)) bdev->bd_invalidated = 1; } /** * check_disk_size_change - checks for disk size change and adjusts bdev size. * @disk: struct gendisk to check * @bdev: struct bdev to adjust. * * This routine checks to see if the bdev size does not match the disk size * and adjusts it if it differs. */ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev) { loff_t disk_size, bdev_size; disk_size = (loff_t)get_capacity(disk) << 9; bdev_size = i_size_read(bdev->bd_inode); if (disk_size != bdev_size) { char name[BDEVNAME_SIZE]; disk_name(disk, 0, name); printk(KERN_INFO "%s: detected capacity change from %lld to %lld\n", name, bdev_size, disk_size); i_size_write(bdev->bd_inode, disk_size); flush_disk(bdev, false); } } EXPORT_SYMBOL(check_disk_size_change); /** * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back * @disk: struct gendisk to be revalidated * * This routine is a wrapper for lower-level driver's revalidate_disk * call-backs. It is used to do common pre and post operations needed * for all revalidate_disk operations. */ int revalidate_disk(struct gendisk *disk) { struct block_device *bdev; int ret = 0; if (disk->fops->revalidate_disk) ret = disk->fops->revalidate_disk(disk); bdev = bdget_disk(disk, 0); if (!bdev) return ret; mutex_lock(&bdev->bd_mutex); check_disk_size_change(disk, bdev); bdev->bd_invalidated = 0; mutex_unlock(&bdev->bd_mutex); bdput(bdev); return ret; } EXPORT_SYMBOL(revalidate_disk); /* * This routine checks whether a removable media has been changed, * and invalidates all buffer-cache-entries in that case. This * is a relatively slow routine, so we have to try to minimize using * it. Thus it is called only upon a 'mount' or 'open'. This * is the best way of combining speed and utility, I think. * People changing diskettes in the middle of an operation deserve * to lose :-) */ int check_disk_change(struct block_device *bdev) { struct gendisk *disk = bdev->bd_disk; const struct block_device_operations *bdops = disk->fops; unsigned int events; events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST); if (!(events & DISK_EVENT_MEDIA_CHANGE)) return 0; flush_disk(bdev, true); if (bdops->revalidate_disk) bdops->revalidate_disk(bdev->bd_disk); return 1; } EXPORT_SYMBOL(check_disk_change); void bd_set_size(struct block_device *bdev, loff_t size) { unsigned bsize = bdev_logical_block_size(bdev); mutex_lock(&bdev->bd_inode->i_mutex); i_size_write(bdev->bd_inode, size); mutex_unlock(&bdev->bd_inode->i_mutex); while (bsize < PAGE_CACHE_SIZE) { if (size & bsize) break; bsize <<= 1; } bdev->bd_block_size = bsize; bdev->bd_inode->i_blkbits = blksize_bits(bsize); } EXPORT_SYMBOL(bd_set_size); static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); /* * bd_mutex locking: * * mutex_lock(part->bd_mutex) * mutex_lock_nested(whole->bd_mutex, 1) */ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) { struct gendisk *disk; struct module *owner; int ret; int partno; int perm = 0; if (mode & FMODE_READ) perm |= MAY_READ; if (mode & FMODE_WRITE) perm |= MAY_WRITE; /* * hooks: /n/, see "layering violations". */ if (!for_part) { ret = devcgroup_inode_permission(bdev->bd_inode, perm); if (ret != 0) { bdput(bdev); return ret; } } restart: ret = -ENXIO; disk = get_gendisk(bdev->bd_dev, &partno); if (!disk) goto out; owner = disk->fops->owner; disk_block_events(disk); mutex_lock_nested(&bdev->bd_mutex, for_part); if (!bdev->bd_openers) { bdev->bd_disk = disk; bdev->bd_queue = disk->queue; bdev->bd_contains = bdev; if (!partno) { struct backing_dev_info *bdi; ret = -ENXIO; bdev->bd_part = disk_get_part(disk, partno); if (!bdev->bd_part) goto out_clear; ret = 0; if (disk->fops->open) { ret = disk->fops->open(bdev, mode); if (ret == -ERESTARTSYS) { /* Lost a race with 'disk' being * deleted, try again. * See md.c */ disk_put_part(bdev->bd_part); bdev->bd_part = NULL; bdev->bd_disk = NULL; bdev->bd_queue = NULL; mutex_unlock(&bdev->bd_mutex); disk_unblock_events(disk); put_disk(disk); module_put(owner); goto restart; } } if (!ret) { bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); bdi = blk_get_backing_dev_info(bdev); if (bdi == NULL) bdi = &default_backing_dev_info; bdev_inode_switch_bdi(bdev->bd_inode, bdi); } /* * If the device is invalidated, rescan partition * if open succeeded or failed with -ENOMEDIUM. * The latter is necessary to prevent ghost * partitions on a removed medium. */ if (bdev->bd_invalidated) { if (!ret) rescan_partitions(disk, bdev); else if (ret == -ENOMEDIUM) invalidate_partitions(disk, bdev); } if (ret) goto out_clear; } else { struct block_device *whole; whole = bdget_disk(disk, 0); ret = -ENOMEM; if (!whole) goto out_clear; BUG_ON(for_part); ret = __blkdev_get(whole, mode, 1); if (ret) goto out_clear; bdev->bd_contains = whole; bdev_inode_switch_bdi(bdev->bd_inode, whole->bd_inode->i_data.backing_dev_info); bdev->bd_part = disk_get_part(disk, partno); if (!(disk->flags & GENHD_FL_UP) || !bdev->bd_part || !bdev->bd_part->nr_sects) { ret = -ENXIO; goto out_clear; } bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); } } else { if (bdev->bd_contains == bdev) { ret = 0; if (bdev->bd_disk->fops->open) ret = bdev->bd_disk->fops->open(bdev, mode); /* the same as first opener case, read comment there */ if (bdev->bd_invalidated) { if (!ret) rescan_partitions(bdev->bd_disk, bdev); else if (ret == -ENOMEDIUM) invalidate_partitions(bdev->bd_disk, bdev); } if (ret) goto out_unlock_bdev; } /* only one opener holds refs to the module and disk */ put_disk(disk); module_put(owner); } bdev->bd_openers++; if (for_part) bdev->bd_part_count++; mutex_unlock(&bdev->bd_mutex); disk_unblock_events(disk); return 0; out_clear: disk_put_part(bdev->bd_part); bdev->bd_disk = NULL; bdev->bd_part = NULL; bdev->bd_queue = NULL; bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info); if (bdev != bdev->bd_contains) __blkdev_put(bdev->bd_contains, mode, 1); bdev->bd_contains = NULL; out_unlock_bdev: mutex_unlock(&bdev->bd_mutex); disk_unblock_events(disk); put_disk(disk); module_put(owner); out: bdput(bdev); return ret; } /** * blkdev_get - open a block device * @bdev: block_device to open * @mode: FMODE_* mask * @holder: exclusive holder identifier * * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is * open with exclusive access. Specifying %FMODE_EXCL with %NULL * @holder is invalid. Exclusive opens may nest for the same @holder. * * On success, the reference count of @bdev is unchanged. On failure, * @bdev is put. * * CONTEXT: * Might sleep. * * RETURNS: * 0 on success, -errno on failure. */ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) { struct block_device *whole = NULL; int res; WARN_ON_ONCE((mode & FMODE_EXCL) && !holder); if ((mode & FMODE_EXCL) && holder) { whole = bd_start_claiming(bdev, holder); if (IS_ERR(whole)) { bdput(bdev); return PTR_ERR(whole); } } res = __blkdev_get(bdev, mode, 0); if (whole) { struct gendisk *disk = whole->bd_disk; /* finish claiming */ mutex_lock(&bdev->bd_mutex); spin_lock(&bdev_lock); if (!res) { BUG_ON(!bd_may_claim(bdev, whole, holder)); /* * Note that for a whole device bd_holders * will be incremented twice, and bd_holder * will be set to bd_may_claim before being * set to holder */ whole->bd_holders++; whole->bd_holder = bd_may_claim; bdev->bd_holders++; bdev->bd_holder = holder; } /* tell others that we're done */ BUG_ON(whole->bd_claiming != holder); whole->bd_claiming = NULL; wake_up_bit(&whole->bd_claiming, 0); spin_unlock(&bdev_lock); /* * Block event polling for write claims if requested. Any * write holder makes the write_holder state stick until * all are released. This is good enough and tracking * individual writeable reference is too fragile given the * way @mode is used in blkdev_get/put(). */ if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder && (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { bdev->bd_write_holder = true; disk_block_events(disk); } mutex_unlock(&bdev->bd_mutex); bdput(whole); } return res; } EXPORT_SYMBOL(blkdev_get); /** * blkdev_get_by_path - open a block device by name * @path: path to the block device to open * @mode: FMODE_* mask * @holder: exclusive holder identifier * * Open the blockdevice described by the device file at @path. @mode * and @holder are identical to blkdev_get(). * * On success, the returned block_device has reference count of one. * * CONTEXT: * Might sleep. * * RETURNS: * Pointer to block_device on success, ERR_PTR(-errno) on failure. */ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, void *holder) { struct block_device *bdev; int err; bdev = lookup_bdev(path); if (IS_ERR(bdev)) return bdev; err = blkdev_get(bdev, mode, holder); if (err) return ERR_PTR(err); if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { blkdev_put(bdev, mode); return ERR_PTR(-EACCES); } return bdev; } EXPORT_SYMBOL(blkdev_get_by_path); /** * blkdev_get_by_dev - open a block device by device number * @dev: device number of block device to open * @mode: FMODE_* mask * @holder: exclusive holder identifier * * Open the blockdevice described by device number @dev. @mode and * @holder are identical to blkdev_get(). * * Use it ONLY if you really do not have anything better - i.e. when * you are behind a truly sucky interface and all you are given is a * device number. _Never_ to be used for internal purposes. If you * ever need it - reconsider your API. * * On success, the returned block_device has reference count of one. * * CONTEXT: * Might sleep. * * RETURNS: * Pointer to block_device on success, ERR_PTR(-errno) on failure. */ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) { struct block_device *bdev; int err; bdev = bdget(dev); if (!bdev) return ERR_PTR(-ENOMEM); err = blkdev_get(bdev, mode, holder); if (err) return ERR_PTR(err); return bdev; } EXPORT_SYMBOL(blkdev_get_by_dev); static int blkdev_open(struct inode * inode, struct file * filp) { struct block_device *bdev; /* * Preserve backwards compatibility and allow large file access * even if userspace doesn't ask for it explicitly. Some mkfs * binary needs it. We might want to drop this workaround * during an unstable branch. */ filp->f_flags |= O_LARGEFILE; if (filp->f_flags & O_NDELAY) filp->f_mode |= FMODE_NDELAY; if (filp->f_flags & O_EXCL) filp->f_mode |= FMODE_EXCL; if ((filp->f_flags & O_ACCMODE) == 3) filp->f_mode |= FMODE_WRITE_IOCTL; bdev = bd_acquire(inode); if (bdev == NULL) return -ENOMEM; filp->f_mapping = bdev->bd_inode->i_mapping; return blkdev_get(bdev, filp->f_mode, filp); } static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) { struct gendisk *disk = bdev->bd_disk; struct block_device *victim = NULL; mutex_lock_nested(&bdev->bd_mutex, for_part); if (for_part) bdev->bd_part_count--; if (!--bdev->bd_openers) { WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); kill_bdev(bdev); /* ->release can cause the old bdi to disappear, * so must switch it out first */ bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info); } if (bdev->bd_contains == bdev) { if (disk->fops->release) disk->fops->release(disk, mode); } if (!bdev->bd_openers) { struct module *owner = disk->fops->owner; disk_put_part(bdev->bd_part); bdev->bd_part = NULL; bdev->bd_disk = NULL; if (bdev != bdev->bd_contains) victim = bdev->bd_contains; bdev->bd_contains = NULL; put_disk(disk); module_put(owner); } mutex_unlock(&bdev->bd_mutex); bdput(bdev); if (victim) __blkdev_put(victim, mode, 1); } void blkdev_put(struct block_device *bdev, fmode_t mode) { mutex_lock(&bdev->bd_mutex); if (mode & FMODE_EXCL) { bool bdev_free; /* * Release a claim on the device. The holder fields * are protected with bdev_lock. bd_mutex is to * synchronize disk_holder unlinking. */ spin_lock(&bdev_lock); WARN_ON_ONCE(--bdev->bd_holders < 0); WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); /* bd_contains might point to self, check in a separate step */ if ((bdev_free = !bdev->bd_holders)) bdev->bd_holder = NULL; if (!bdev->bd_contains->bd_holders) bdev->bd_contains->bd_holder = NULL; spin_unlock(&bdev_lock); /* * If this was the last claim, remove holder link and * unblock evpoll if it was a write holder. */ if (bdev_free && bdev->bd_write_holder) { disk_unblock_events(bdev->bd_disk); bdev->bd_write_holder = false; } } /* * Trigger event checking and tell drivers to flush MEDIA_CHANGE * event. This is to ensure detection of media removal commanded * from userland - e.g. eject(1). */ disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE); mutex_unlock(&bdev->bd_mutex); __blkdev_put(bdev, mode, 0); } EXPORT_SYMBOL(blkdev_put); static int blkdev_close(struct inode * inode, struct file * filp) { struct block_device *bdev = I_BDEV(filp->f_mapping->host); blkdev_put(bdev, filp->f_mode); return 0; } static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bdev = I_BDEV(file->f_mapping->host); fmode_t mode = file->f_mode; /* * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have * to updated it before every ioctl. */ if (file->f_flags & O_NDELAY) mode |= FMODE_NDELAY; else mode &= ~FMODE_NDELAY; return blkdev_ioctl(bdev, mode, cmd, arg); } /* * Write data to the block device. Only intended for the block device itself * and the raw driver which basically is a fake block device. * * Does not take i_mutex for the write and thus is not for general purpose * use. */ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct blk_plug plug; ssize_t ret; blk_start_plug(&plug); ret = __generic_file_write_iter(iocb, from); if (ret > 0) { ssize_t err; err = generic_write_sync(file, iocb->ki_pos - ret, ret); if (err < 0) ret = err; } blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL_GPL(blkdev_write_iter); static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct inode *bd_inode = file->f_mapping->host; loff_t size = i_size_read(bd_inode); loff_t pos = iocb->ki_pos; if (pos >= size) return 0; size -= pos; iov_iter_truncate(to, size); return generic_file_read_iter(iocb, to); } /* * Try to release a page associated with block device when the system * is under memory pressure. */ static int blkdev_releasepage(struct page *page, gfp_t wait) { struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super; if (super && super->s_op->bdev_try_to_free_page) return super->s_op->bdev_try_to_free_page(super, page, wait); return try_to_free_buffers(page); } static const struct address_space_operations def_blk_aops = { .readpage = blkdev_readpage, .writepage = blkdev_writepage, .write_begin = blkdev_write_begin, .write_end = blkdev_write_end, .writepages = generic_writepages, .releasepage = blkdev_releasepage, .direct_IO = blkdev_direct_IO, .is_dirty_writeback = buffer_check_dirty_writeback, }; const struct file_operations def_blk_fops = { .open = blkdev_open, .release = blkdev_close, .llseek = block_llseek, .read = new_sync_read, .write = new_sync_write, .read_iter = blkdev_read_iter, .write_iter = blkdev_write_iter, .mmap = generic_file_mmap, .fsync = blkdev_fsync, .unlocked_ioctl = block_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_blkdev_ioctl, #endif .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, }; int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) { int res; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); res = blkdev_ioctl(bdev, 0, cmd, arg); set_fs(old_fs); return res; } EXPORT_SYMBOL(ioctl_by_bdev); /** * lookup_bdev - lookup a struct block_device by name * @pathname: special file representing the block device * * Get a reference to the blockdevice at @pathname in the current * namespace if possible and return it. Return ERR_PTR(error) * otherwise. */ struct block_device *lookup_bdev(const char *pathname) { struct block_device *bdev; struct inode *inode; struct path path; int error; if (!pathname || !*pathname) return ERR_PTR(-EINVAL); error = kern_path(pathname, LOOKUP_FOLLOW, &path); if (error) return ERR_PTR(error); inode = path.dentry->d_inode; error = -ENOTBLK; if (!S_ISBLK(inode->i_mode)) goto fail; error = -EACCES; if (path.mnt->mnt_flags & MNT_NODEV) goto fail; error = -ENOMEM; bdev = bd_acquire(inode); if (!bdev) goto fail; out: path_put(&path); return bdev; fail: bdev = ERR_PTR(error); goto out; } EXPORT_SYMBOL(lookup_bdev); int __invalidate_device(struct block_device *bdev, bool kill_dirty) { struct super_block *sb = get_super(bdev); int res = 0; if (sb) { /* * no need to lock the super, get_super holds the * read mutex so the filesystem cannot go away * under us (->put_super runs with the write lock * hold). */ shrink_dcache_sb(sb); res = invalidate_inodes(sb, kill_dirty); drop_super(sb); } invalidate_bdev(bdev); return res; } EXPORT_SYMBOL(__invalidate_device); void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) { struct inode *inode, *old_inode = NULL; spin_lock(&inode_sb_list_lock); list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { struct address_space *mapping = inode->i_mapping; spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || mapping->nrpages == 0) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&inode_sb_list_lock); /* * We hold a reference to 'inode' so it couldn't have been * removed from s_inodes list while we dropped the * inode_sb_list_lock. We cannot iput the inode now as we can * be holding the last reference and we cannot iput it under * inode_sb_list_lock. So we keep the reference and iput it * later. */ iput(old_inode); old_inode = inode; func(I_BDEV(inode), arg); spin_lock(&inode_sb_list_lock); } spin_unlock(&inode_sb_list_lock); iput(old_inode); }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2287_0
crossvul-cpp_data_bad_2163_1
/* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include "irq.h" #include "mmu.h" #include "cpuid.h" #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/moduleparam.h> #include <linux/mod_devicetable.h> #include <linux/ftrace_event.h> #include <linux/slab.h> #include <linux/tboot.h> #include <linux/hrtimer.h> #include "kvm_cache_regs.h" #include "x86.h" #include <asm/io.h> #include <asm/desc.h> #include <asm/vmx.h> #include <asm/virtext.h> #include <asm/mce.h> #include <asm/i387.h> #include <asm/xcr.h> #include <asm/perf_event.h> #include <asm/debugreg.h> #include <asm/kexec.h> #include "trace.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) #define __ex_clear(x, reg) \ ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); static const struct x86_cpu_id vmx_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_VMX), {} }; MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); static bool __read_mostly enable_vpid = 1; module_param_named(vpid, enable_vpid, bool, 0444); static bool __read_mostly flexpriority_enabled = 1; module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); static bool __read_mostly enable_ept = 1; module_param_named(ept, enable_ept, bool, S_IRUGO); static bool __read_mostly enable_unrestricted_guest = 1; module_param_named(unrestricted_guest, enable_unrestricted_guest, bool, S_IRUGO); static bool __read_mostly enable_ept_ad_bits = 1; module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); static bool __read_mostly emulate_invalid_guest_state = true; module_param(emulate_invalid_guest_state, bool, S_IRUGO); static bool __read_mostly vmm_exclusive = 1; module_param(vmm_exclusive, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); static bool __read_mostly enable_apicv = 1; module_param(enable_apicv, bool, S_IRUGO); static bool __read_mostly enable_shadow_vmcs = 1; module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); /* * If nested=1, nested virtualization is supported, i.e., guests may use * VMX and be a hypervisor for its own guests. If nested=0, guests may not * use VMX instructions. */ static bool __read_mostly nested = 0; module_param(nested, bool, S_IRUGO); #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD) #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE) #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_CR4_GUEST_OWNED_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXMMEXCPT) #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 /* * These 2 parameters are used to config the controls for Pause-Loop Exiting: * ple_gap: upper bound on the amount of time between two successive * executions of PAUSE in a loop. Also indicate if ple enabled. * According to test, this time is usually smaller than 128 cycles. * ple_window: upper bound on the amount of time a guest is allowed to execute * in a PAUSE loop. Tests indicate that most spinlocks are held for * less than 2^12 cycles * Time is measured based on a counter that runs at the same rate as the TSC, * refer SDM volume 3b section 21.6.13 & 22.1.3. */ #define KVM_VMX_DEFAULT_PLE_GAP 128 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \ INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP; module_param(ple_gap, int, S_IRUGO); static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; module_param(ple_window, int, S_IRUGO); /* Default doubles per-vcpu window every exit. */ static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW; module_param(ple_window_grow, int, S_IRUGO); /* Default resets per-vcpu window every exit to ple_window. */ static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK; module_param(ple_window_shrink, int, S_IRUGO); /* Default is to compute the maximum so we can never overflow. */ static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; module_param(ple_window_max, int, S_IRUGO); extern const ulong vmx_return; #define NR_AUTOLOAD_MSRS 8 #define VMCS02_POOL_SIZE 1 struct vmcs { u32 revision_id; u32 abort; char data[0]; }; /* * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs * loaded on this CPU (so we can clear them if the CPU goes down). */ struct loaded_vmcs { struct vmcs *vmcs; int cpu; int launched; struct list_head loaded_vmcss_on_cpu_link; }; struct shared_msr_entry { unsigned index; u64 data; u64 mask; }; /* * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a * single nested guest (L2), hence the name vmcs12. Any VMX implementation has * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is * stored in guest memory specified by VMPTRLD, but is opaque to the guest, * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. * More than one of these structures may exist, if L1 runs multiple L2 guests. * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the * underlying hardware which will be used to run L2. * This structure is packed to ensure that its layout is identical across * machines (necessary for live migration). * If there are changes in this struct, VMCS12_REVISION must be changed. */ typedef u64 natural_width; struct __packed vmcs12 { /* According to the Intel spec, a VMCS region must start with the * following two fields. Then follow implementation-specific data. */ u32 revision_id; u32 abort; u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ u32 padding[7]; /* room for future expansion */ u64 io_bitmap_a; u64 io_bitmap_b; u64 msr_bitmap; u64 vm_exit_msr_store_addr; u64 vm_exit_msr_load_addr; u64 vm_entry_msr_load_addr; u64 tsc_offset; u64 virtual_apic_page_addr; u64 apic_access_addr; u64 ept_pointer; u64 guest_physical_address; u64 vmcs_link_pointer; u64 guest_ia32_debugctl; u64 guest_ia32_pat; u64 guest_ia32_efer; u64 guest_ia32_perf_global_ctrl; u64 guest_pdptr0; u64 guest_pdptr1; u64 guest_pdptr2; u64 guest_pdptr3; u64 guest_bndcfgs; u64 host_ia32_pat; u64 host_ia32_efer; u64 host_ia32_perf_global_ctrl; u64 padding64[8]; /* room for future expansion */ /* * To allow migration of L1 (complete with its L2 guests) between * machines of different natural widths (32 or 64 bit), we cannot have * unsigned long fields with no explict size. We use u64 (aliased * natural_width) instead. Luckily, x86 is little-endian. */ natural_width cr0_guest_host_mask; natural_width cr4_guest_host_mask; natural_width cr0_read_shadow; natural_width cr4_read_shadow; natural_width cr3_target_value0; natural_width cr3_target_value1; natural_width cr3_target_value2; natural_width cr3_target_value3; natural_width exit_qualification; natural_width guest_linear_address; natural_width guest_cr0; natural_width guest_cr3; natural_width guest_cr4; natural_width guest_es_base; natural_width guest_cs_base; natural_width guest_ss_base; natural_width guest_ds_base; natural_width guest_fs_base; natural_width guest_gs_base; natural_width guest_ldtr_base; natural_width guest_tr_base; natural_width guest_gdtr_base; natural_width guest_idtr_base; natural_width guest_dr7; natural_width guest_rsp; natural_width guest_rip; natural_width guest_rflags; natural_width guest_pending_dbg_exceptions; natural_width guest_sysenter_esp; natural_width guest_sysenter_eip; natural_width host_cr0; natural_width host_cr3; natural_width host_cr4; natural_width host_fs_base; natural_width host_gs_base; natural_width host_tr_base; natural_width host_gdtr_base; natural_width host_idtr_base; natural_width host_ia32_sysenter_esp; natural_width host_ia32_sysenter_eip; natural_width host_rsp; natural_width host_rip; natural_width paddingl[8]; /* room for future expansion */ u32 pin_based_vm_exec_control; u32 cpu_based_vm_exec_control; u32 exception_bitmap; u32 page_fault_error_code_mask; u32 page_fault_error_code_match; u32 cr3_target_count; u32 vm_exit_controls; u32 vm_exit_msr_store_count; u32 vm_exit_msr_load_count; u32 vm_entry_controls; u32 vm_entry_msr_load_count; u32 vm_entry_intr_info_field; u32 vm_entry_exception_error_code; u32 vm_entry_instruction_len; u32 tpr_threshold; u32 secondary_vm_exec_control; u32 vm_instruction_error; u32 vm_exit_reason; u32 vm_exit_intr_info; u32 vm_exit_intr_error_code; u32 idt_vectoring_info_field; u32 idt_vectoring_error_code; u32 vm_exit_instruction_len; u32 vmx_instruction_info; u32 guest_es_limit; u32 guest_cs_limit; u32 guest_ss_limit; u32 guest_ds_limit; u32 guest_fs_limit; u32 guest_gs_limit; u32 guest_ldtr_limit; u32 guest_tr_limit; u32 guest_gdtr_limit; u32 guest_idtr_limit; u32 guest_es_ar_bytes; u32 guest_cs_ar_bytes; u32 guest_ss_ar_bytes; u32 guest_ds_ar_bytes; u32 guest_fs_ar_bytes; u32 guest_gs_ar_bytes; u32 guest_ldtr_ar_bytes; u32 guest_tr_ar_bytes; u32 guest_interruptibility_info; u32 guest_activity_state; u32 guest_sysenter_cs; u32 host_ia32_sysenter_cs; u32 vmx_preemption_timer_value; u32 padding32[7]; /* room for future expansion */ u16 virtual_processor_id; u16 guest_es_selector; u16 guest_cs_selector; u16 guest_ss_selector; u16 guest_ds_selector; u16 guest_fs_selector; u16 guest_gs_selector; u16 guest_ldtr_selector; u16 guest_tr_selector; u16 host_es_selector; u16 host_cs_selector; u16 host_ss_selector; u16 host_ds_selector; u16 host_fs_selector; u16 host_gs_selector; u16 host_tr_selector; }; /* * VMCS12_REVISION is an arbitrary id that should be changed if the content or * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and * VMPTRLD verifies that the VMCS region that L1 is loading contains this id. */ #define VMCS12_REVISION 0x11e57ed0 /* * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region * and any VMCS region. Although only sizeof(struct vmcs12) are used by the * current implementation, 4K are reserved to avoid future complications. */ #define VMCS12_SIZE 0x1000 /* Used to remember the last vmcs02 used for some recently used vmcs12s */ struct vmcs02_list { struct list_head list; gpa_t vmptr; struct loaded_vmcs vmcs02; }; /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. */ struct nested_vmx { /* Has the level1 guest done vmxon? */ bool vmxon; gpa_t vmxon_ptr; /* The guest-physical address of the current VMCS L1 keeps for L2 */ gpa_t current_vmptr; /* The host-usable pointer to the above */ struct page *current_vmcs12_page; struct vmcs12 *current_vmcs12; struct vmcs *current_shadow_vmcs; /* * Indicates if the shadow vmcs must be updated with the * data hold by vmcs12 */ bool sync_shadow_vmcs; /* vmcs02_list cache of VMCSs recently used to run L2 guests */ struct list_head vmcs02_pool; int vmcs02_num; u64 vmcs01_tsc_offset; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; /* * Guest pages referred to in vmcs02 with host-physical pointers, so * we must keep them pinned while L2 runs. */ struct page *apic_access_page; struct page *virtual_apic_page; u64 msr_ia32_feature_control; struct hrtimer preemption_timer; bool preemption_timer_expired; /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ u64 vmcs01_debugctl; }; #define POSTED_INTR_ON 0 /* Posted-Interrupt Descriptor */ struct pi_desc { u32 pir[8]; /* Posted interrupt requested */ u32 control; /* bit 0 of control is outstanding notification bit */ u32 rsvd[7]; } __aligned(64); static bool pi_test_and_set_on(struct pi_desc *pi_desc) { return test_and_set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static bool pi_test_and_clear_on(struct pi_desc *pi_desc) { return test_and_clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) { return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); } struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; u8 fail; bool nmi_known_unmasked; u32 exit_intr_info; u32 idt_vectoring_info; ulong rflags; struct shared_msr_entry *guest_msrs; int nmsrs; int save_nmsrs; unsigned long host_idt_base; #ifdef CONFIG_X86_64 u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; /* * loaded_vmcs points to the VMCS currently used in this vcpu. For a * non-nested (L1) guest, it always points to vmcs01. For a nested * guest (L2), it points to a different VMCS. */ struct loaded_vmcs vmcs01; struct loaded_vmcs *loaded_vmcs; bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { unsigned nr; struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; } msr_autoload; struct { int loaded; u16 fs_sel, gs_sel, ldt_sel; #ifdef CONFIG_X86_64 u16 ds_sel, es_sel; #endif int gs_ldt_reload_needed; int fs_reload_needed; u64 msr_host_bndcfgs; unsigned long vmcs_host_cr4; /* May not match real cr4 */ } host_state; struct { int vm86_active; ulong save_rflags; struct kvm_segment segs[8]; } rmode; struct { u32 bitmask; /* 4 bits per segment (1 bit per field) */ struct kvm_save_segment { u16 selector; unsigned long base; u32 limit; u32 ar; } seg[8]; } segment_cache; int vpid; bool emulation_required; /* Support for vnmi-less CPUs */ int soft_vnmi_blocked; ktime_t entry_time; s64 vnmi_blocked_time; u32 exit_reason; bool rdtscp_enabled; /* Posted interrupt descriptor */ struct pi_desc pi_desc; /* Support for a guest hypervisor (nested VMX) */ struct nested_vmx nested; /* Dynamic PLE window. */ int ple_window; bool ple_window_dirty; }; enum segment_cache_field { SEG_FIELD_SEL = 0, SEG_FIELD_BASE = 1, SEG_FIELD_LIMIT = 2, SEG_FIELD_AR = 3, SEG_FIELD_NR = 4 }; static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_vmx, vcpu); } #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x) #define FIELD(number, name) [number] = VMCS12_OFFSET(name) #define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \ [number##_HIGH] = VMCS12_OFFSET(name)+4 static unsigned long shadow_read_only_fields[] = { /* * We do NOT shadow fields that are modified when L0 * traps and emulates any vmx instruction (e.g. VMPTRLD, * VMXON...) executed by L1. * For example, VM_INSTRUCTION_ERROR is read * by L1 if a vmx instruction fails (part of the error path). * Note the code assumes this logic. If for some reason * we start shadowing these fields then we need to * force a shadow sync when L0 emulates vmx instructions * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified * by nested_vmx_failValid) */ VM_EXIT_REASON, VM_EXIT_INTR_INFO, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_INFO_FIELD, IDT_VECTORING_ERROR_CODE, VM_EXIT_INTR_ERROR_CODE, EXIT_QUALIFICATION, GUEST_LINEAR_ADDRESS, GUEST_PHYSICAL_ADDRESS }; static int max_shadow_read_only_fields = ARRAY_SIZE(shadow_read_only_fields); static unsigned long shadow_read_write_fields[] = { TPR_THRESHOLD, GUEST_RIP, GUEST_RSP, GUEST_CR0, GUEST_CR3, GUEST_CR4, GUEST_INTERRUPTIBILITY_INFO, GUEST_RFLAGS, GUEST_CS_SELECTOR, GUEST_CS_AR_BYTES, GUEST_CS_LIMIT, GUEST_CS_BASE, GUEST_ES_BASE, GUEST_BNDCFGS, CR0_GUEST_HOST_MASK, CR0_READ_SHADOW, CR4_READ_SHADOW, TSC_OFFSET, EXCEPTION_BITMAP, CPU_BASED_VM_EXEC_CONTROL, VM_ENTRY_EXCEPTION_ERROR_CODE, VM_ENTRY_INTR_INFO_FIELD, VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE, HOST_FS_BASE, HOST_GS_BASE, HOST_FS_SELECTOR, HOST_GS_SELECTOR }; static int max_shadow_read_write_fields = ARRAY_SIZE(shadow_read_write_fields); static const unsigned short vmcs_field_to_offset_table[] = { FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), FIELD(GUEST_ES_SELECTOR, guest_es_selector), FIELD(GUEST_CS_SELECTOR, guest_cs_selector), FIELD(GUEST_SS_SELECTOR, guest_ss_selector), FIELD(GUEST_DS_SELECTOR, guest_ds_selector), FIELD(GUEST_FS_SELECTOR, guest_fs_selector), FIELD(GUEST_GS_SELECTOR, guest_gs_selector), FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector), FIELD(GUEST_TR_SELECTOR, guest_tr_selector), FIELD(HOST_ES_SELECTOR, host_es_selector), FIELD(HOST_CS_SELECTOR, host_cs_selector), FIELD(HOST_SS_SELECTOR, host_ss_selector), FIELD(HOST_DS_SELECTOR, host_ds_selector), FIELD(HOST_FS_SELECTOR, host_fs_selector), FIELD(HOST_GS_SELECTOR, host_gs_selector), FIELD(HOST_TR_SELECTOR, host_tr_selector), FIELD64(IO_BITMAP_A, io_bitmap_a), FIELD64(IO_BITMAP_B, io_bitmap_b), FIELD64(MSR_BITMAP, msr_bitmap), FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr), FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr), FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr), FIELD64(TSC_OFFSET, tsc_offset), FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), FIELD64(APIC_ACCESS_ADDR, apic_access_addr), FIELD64(EPT_POINTER, ept_pointer), FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), FIELD64(GUEST_IA32_PAT, guest_ia32_pat), FIELD64(GUEST_IA32_EFER, guest_ia32_efer), FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl), FIELD64(GUEST_PDPTR0, guest_pdptr0), FIELD64(GUEST_PDPTR1, guest_pdptr1), FIELD64(GUEST_PDPTR2, guest_pdptr2), FIELD64(GUEST_PDPTR3, guest_pdptr3), FIELD64(GUEST_BNDCFGS, guest_bndcfgs), FIELD64(HOST_IA32_PAT, host_ia32_pat), FIELD64(HOST_IA32_EFER, host_ia32_efer), FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl), FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control), FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control), FIELD(EXCEPTION_BITMAP, exception_bitmap), FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask), FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match), FIELD(CR3_TARGET_COUNT, cr3_target_count), FIELD(VM_EXIT_CONTROLS, vm_exit_controls), FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count), FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count), FIELD(VM_ENTRY_CONTROLS, vm_entry_controls), FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count), FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field), FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code), FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len), FIELD(TPR_THRESHOLD, tpr_threshold), FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control), FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error), FIELD(VM_EXIT_REASON, vm_exit_reason), FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info), FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code), FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field), FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code), FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len), FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info), FIELD(GUEST_ES_LIMIT, guest_es_limit), FIELD(GUEST_CS_LIMIT, guest_cs_limit), FIELD(GUEST_SS_LIMIT, guest_ss_limit), FIELD(GUEST_DS_LIMIT, guest_ds_limit), FIELD(GUEST_FS_LIMIT, guest_fs_limit), FIELD(GUEST_GS_LIMIT, guest_gs_limit), FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit), FIELD(GUEST_TR_LIMIT, guest_tr_limit), FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit), FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit), FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes), FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes), FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes), FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes), FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes), FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes), FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes), FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes), FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info), FIELD(GUEST_ACTIVITY_STATE, guest_activity_state), FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs), FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs), FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value), FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask), FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask), FIELD(CR0_READ_SHADOW, cr0_read_shadow), FIELD(CR4_READ_SHADOW, cr4_read_shadow), FIELD(CR3_TARGET_VALUE0, cr3_target_value0), FIELD(CR3_TARGET_VALUE1, cr3_target_value1), FIELD(CR3_TARGET_VALUE2, cr3_target_value2), FIELD(CR3_TARGET_VALUE3, cr3_target_value3), FIELD(EXIT_QUALIFICATION, exit_qualification), FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address), FIELD(GUEST_CR0, guest_cr0), FIELD(GUEST_CR3, guest_cr3), FIELD(GUEST_CR4, guest_cr4), FIELD(GUEST_ES_BASE, guest_es_base), FIELD(GUEST_CS_BASE, guest_cs_base), FIELD(GUEST_SS_BASE, guest_ss_base), FIELD(GUEST_DS_BASE, guest_ds_base), FIELD(GUEST_FS_BASE, guest_fs_base), FIELD(GUEST_GS_BASE, guest_gs_base), FIELD(GUEST_LDTR_BASE, guest_ldtr_base), FIELD(GUEST_TR_BASE, guest_tr_base), FIELD(GUEST_GDTR_BASE, guest_gdtr_base), FIELD(GUEST_IDTR_BASE, guest_idtr_base), FIELD(GUEST_DR7, guest_dr7), FIELD(GUEST_RSP, guest_rsp), FIELD(GUEST_RIP, guest_rip), FIELD(GUEST_RFLAGS, guest_rflags), FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions), FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp), FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip), FIELD(HOST_CR0, host_cr0), FIELD(HOST_CR3, host_cr3), FIELD(HOST_CR4, host_cr4), FIELD(HOST_FS_BASE, host_fs_base), FIELD(HOST_GS_BASE, host_gs_base), FIELD(HOST_TR_BASE, host_tr_base), FIELD(HOST_GDTR_BASE, host_gdtr_base), FIELD(HOST_IDTR_BASE, host_idtr_base), FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp), FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip), FIELD(HOST_RSP, host_rsp), FIELD(HOST_RIP, host_rip), }; static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table); static inline short vmcs_field_to_offset(unsigned long field) { if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0) return -1; return vmcs_field_to_offset_table[field]; } static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) { return to_vmx(vcpu)->nested.current_vmcs12; } static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) { struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT); if (is_error_page(page)) return NULL; return page; } static void nested_release_page(struct page *page) { kvm_release_page_dirty(page); } static void nested_release_page_clean(struct page *page) { kvm_release_page_clean(page); } static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); static bool vmx_mpx_supported(void); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static bool guest_state_valid(struct kvm_vcpu *vcpu); static u32 vmx_segment_access_rights(struct kvm_segment *var); static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu); static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); static int alloc_identity_pagetable(struct kvm *kvm); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); /* * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. */ static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); static DEFINE_PER_CPU(struct desc_ptr, host_gdt); static unsigned long *vmx_io_bitmap_a; static unsigned long *vmx_io_bitmap_b; static unsigned long *vmx_msr_bitmap_legacy; static unsigned long *vmx_msr_bitmap_longmode; static unsigned long *vmx_msr_bitmap_legacy_x2apic; static unsigned long *vmx_msr_bitmap_longmode_x2apic; static unsigned long *vmx_vmread_bitmap; static unsigned long *vmx_vmwrite_bitmap; static bool cpu_has_load_ia32_efer; static bool cpu_has_load_perf_global_ctrl; static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DEFINE_SPINLOCK(vmx_vpid_lock); static struct vmcs_config { int size; int order; u32 revision_id; u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; } vmcs_config; static struct vmx_capability { u32 ept; u32 vpid; } vmx_capability; #define VMX_SEGMENT_FIELD(seg) \ [VCPU_SREG_##seg] = { \ .selector = GUEST_##seg##_SELECTOR, \ .base = GUEST_##seg##_BASE, \ .limit = GUEST_##seg##_LIMIT, \ .ar_bytes = GUEST_##seg##_AR_BYTES, \ } static const struct kvm_vmx_segment_field { unsigned selector; unsigned base; unsigned limit; unsigned ar_bytes; } kvm_vmx_segment_fields[] = { VMX_SEGMENT_FIELD(CS), VMX_SEGMENT_FIELD(DS), VMX_SEGMENT_FIELD(ES), VMX_SEGMENT_FIELD(FS), VMX_SEGMENT_FIELD(GS), VMX_SEGMENT_FIELD(SS), VMX_SEGMENT_FIELD(TR), VMX_SEGMENT_FIELD(LDTR), }; static u64 host_efer; static void ept_save_pdptrs(struct kvm_vcpu *vcpu); /* * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it * away by decrementing the array size. */ static const u32 vmx_msr_index[] = { #ifdef CONFIG_X86_64 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, #endif MSR_EFER, MSR_TSC_AUX, MSR_STAR, }; static inline bool is_page_fault(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); } static inline bool is_no_device(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); } static inline bool is_invalid_opcode(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); } static inline bool is_external_interrupt(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); } static inline bool is_machine_check(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); } static inline bool cpu_has_vmx_msr_bitmap(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; } static inline bool cpu_has_vmx_tpr_shadow(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; } static inline bool vm_need_tpr_shadow(struct kvm *kvm) { return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)); } static inline bool cpu_has_secondary_exec_ctrls(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; } static inline bool cpu_has_vmx_virtualize_apic_accesses(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; } static inline bool cpu_has_vmx_virtualize_x2apic_mode(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; } static inline bool cpu_has_vmx_apic_register_virt(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_APIC_REGISTER_VIRT; } static inline bool cpu_has_vmx_virtual_intr_delivery(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; } static inline bool cpu_has_vmx_posted_intr(void) { return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR; } static inline bool cpu_has_vmx_apicv(void) { return cpu_has_vmx_apic_register_virt() && cpu_has_vmx_virtual_intr_delivery() && cpu_has_vmx_posted_intr(); } static inline bool cpu_has_vmx_flexpriority(void) { return cpu_has_vmx_tpr_shadow() && cpu_has_vmx_virtualize_apic_accesses(); } static inline bool cpu_has_vmx_ept_execute_only(void) { return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT; } static inline bool cpu_has_vmx_eptp_uncacheable(void) { return vmx_capability.ept & VMX_EPTP_UC_BIT; } static inline bool cpu_has_vmx_eptp_writeback(void) { return vmx_capability.ept & VMX_EPTP_WB_BIT; } static inline bool cpu_has_vmx_ept_2m_page(void) { return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT; } static inline bool cpu_has_vmx_ept_1g_page(void) { return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; } static inline bool cpu_has_vmx_ept_4levels(void) { return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; } static inline bool cpu_has_vmx_ept_ad_bits(void) { return vmx_capability.ept & VMX_EPT_AD_BIT; } static inline bool cpu_has_vmx_invept_context(void) { return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT; } static inline bool cpu_has_vmx_invept_global(void) { return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; } static inline bool cpu_has_vmx_invvpid_single(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; } static inline bool cpu_has_vmx_invvpid_global(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; } static inline bool cpu_has_vmx_ept(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_EPT; } static inline bool cpu_has_vmx_unrestricted_guest(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_UNRESTRICTED_GUEST; } static inline bool cpu_has_vmx_ple(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PAUSE_LOOP_EXITING; } static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm) { return flexpriority_enabled && irqchip_in_kernel(kvm); } static inline bool cpu_has_vmx_vpid(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_VPID; } static inline bool cpu_has_vmx_rdtscp(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_RDTSCP; } static inline bool cpu_has_vmx_invpcid(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_INVPCID; } static inline bool cpu_has_virtual_nmis(void) { return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; } static inline bool cpu_has_vmx_wbinvd_exit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_WBINVD_EXITING; } static inline bool cpu_has_vmx_shadow_vmcs(void) { u64 vmx_msr; rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); /* check if the cpu supports writing r/o exit information fields */ if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) return false; return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_SHADOW_VMCS; } static inline bool report_flexpriority(void) { return flexpriority_enabled; } static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) { return vmcs12->cpu_based_vm_exec_control & bit; } static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) { return (vmcs12->cpu_based_vm_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && (vmcs12->secondary_vm_exec_control & bit); } static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; } static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER; } static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); } static inline bool is_exception(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); } static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification); static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification); static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; for (i = 0; i < vmx->nmsrs; ++i) if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) return i; return -1; } static inline void __invvpid(int ext, u16 vpid, gva_t gva) { struct { u64 vpid : 16; u64 rsvd : 48; u64 gva; } operand = { vpid, 0, gva }; asm volatile (__ex(ASM_VMX_INVVPID) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:" : : "a"(&operand), "c"(ext) : "cc", "memory"); } static inline void __invept(int ext, u64 eptp, gpa_t gpa) { struct { u64 eptp, gpa; } operand = {eptp, gpa}; asm volatile (__ex(ASM_VMX_INVEPT) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:\n" : : "a" (&operand), "c" (ext) : "cc", "memory"); } static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) { int i; i = __find_msr_index(vmx, msr); if (i >= 0) return &vmx->guest_msrs[i]; return NULL; } static void vmcs_clear(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", vmcs, phys_addr); } static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) { vmcs_clear(loaded_vmcs->vmcs); loaded_vmcs->cpu = -1; loaded_vmcs->launched = 0; } static void vmcs_load(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", vmcs, phys_addr); } #ifdef CONFIG_KEXEC /* * This bitmap is used to indicate whether the vmclear * operation is enabled on all cpus. All disabled by * default. */ static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; static inline void crash_enable_local_vmclear(int cpu) { cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); } static inline void crash_disable_local_vmclear(int cpu) { cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); } static inline int crash_local_vmclear_enabled(int cpu) { return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); } static void crash_vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; if (!crash_local_vmclear_enabled(cpu)) return; list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); } #else static inline void crash_enable_local_vmclear(int cpu) { } static inline void crash_disable_local_vmclear(int cpu) { } #endif /* CONFIG_KEXEC */ static void __loaded_vmcs_clear(void *arg) { struct loaded_vmcs *loaded_vmcs = arg; int cpu = raw_smp_processor_id(); if (loaded_vmcs->cpu != cpu) return; /* vcpu migration can race with cpu offline */ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) per_cpu(current_vmcs, cpu) = NULL; crash_disable_local_vmclear(cpu); list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); /* * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link * is before setting loaded_vmcs->vcpu to -1 which is done in * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist * then adds the vmcs into percpu list before it is deleted. */ smp_wmb(); loaded_vmcs_init(loaded_vmcs); crash_enable_local_vmclear(cpu); } static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) { int cpu = loaded_vmcs->cpu; if (cpu != -1) smp_call_function_single(cpu, __loaded_vmcs_clear, loaded_vmcs, 1); } static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx) { if (vmx->vpid == 0) return; if (cpu_has_vmx_invvpid_single()) __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0); } static inline void vpid_sync_vcpu_global(void) { if (cpu_has_vmx_invvpid_global()) __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); } static inline void vpid_sync_context(struct vcpu_vmx *vmx) { if (cpu_has_vmx_invvpid_single()) vpid_sync_vcpu_single(vmx); else vpid_sync_vcpu_global(); } static inline void ept_sync_global(void) { if (cpu_has_vmx_invept_global()) __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); } static inline void ept_sync_context(u64 eptp) { if (enable_ept) { if (cpu_has_vmx_invept_context()) __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); else ept_sync_global(); } } static __always_inline unsigned long vmcs_readl(unsigned long field) { unsigned long value; asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0") : "=a"(value) : "d"(field) : "cc"); return value; } static __always_inline u16 vmcs_read16(unsigned long field) { return vmcs_readl(field); } static __always_inline u32 vmcs_read32(unsigned long field) { return vmcs_readl(field); } static __always_inline u64 vmcs_read64(unsigned long field) { #ifdef CONFIG_X86_64 return vmcs_readl(field); #else return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); #endif } static noinline void vmwrite_error(unsigned long field, unsigned long value) { printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); dump_stack(); } static void vmcs_writel(unsigned long field, unsigned long value) { u8 error; asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" : "=q"(error) : "a"(value), "d"(field) : "cc"); if (unlikely(error)) vmwrite_error(field, value); } static void vmcs_write16(unsigned long field, u16 value) { vmcs_writel(field, value); } static void vmcs_write32(unsigned long field, u32 value) { vmcs_writel(field, value); } static void vmcs_write64(unsigned long field, u64 value) { vmcs_writel(field, value); #ifndef CONFIG_X86_64 asm volatile (""); vmcs_writel(field+1, value >> 32); #endif } static void vmcs_clear_bits(unsigned long field, u32 mask) { vmcs_writel(field, vmcs_readl(field) & ~mask); } static void vmcs_set_bits(unsigned long field, u32 mask) { vmcs_writel(field, vmcs_readl(field) | mask); } static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) { vmcs_write32(VM_ENTRY_CONTROLS, val); vmx->vm_entry_controls_shadow = val; } static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) { if (vmx->vm_entry_controls_shadow != val) vm_entry_controls_init(vmx, val); } static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) { return vmx->vm_entry_controls_shadow; } static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) { vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); } static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) { vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); } static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) { vmcs_write32(VM_EXIT_CONTROLS, val); vmx->vm_exit_controls_shadow = val; } static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) { if (vmx->vm_exit_controls_shadow != val) vm_exit_controls_init(vmx, val); } static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) { return vmx->vm_exit_controls_shadow; } static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) { vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); } static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) { vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); } static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) { vmx->segment_cache.bitmask = 0; } static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, unsigned field) { bool ret; u32 mask = 1 << (seg * SEG_FIELD_NR + field); if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); vmx->segment_cache.bitmask = 0; } ret = vmx->segment_cache.bitmask & mask; vmx->segment_cache.bitmask |= mask; return ret; } static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) { u16 *p = &vmx->segment_cache.seg[seg].selector; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); return *p; } static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) { ulong *p = &vmx->segment_cache.seg[seg].base; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); return *p; } static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) { u32 *p = &vmx->segment_cache.seg[seg].limit; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); return *p; } static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) { u32 *p = &vmx->segment_cache.seg[seg].ar; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); return *p; } static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | (1u << NM_VECTOR) | (1u << DB_VECTOR); if ((vcpu->guest_debug & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) eb |= 1u << BP_VECTOR; if (to_vmx(vcpu)->rmode.vm86_active) eb = ~0; if (enable_ept) eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ if (vcpu->fpu_active) eb &= ~(1u << NM_VECTOR); /* When we are running a nested L2 guest and L1 specified for it a * certain exception bitmap, we must trap the same exceptions and pass * them to L1. When running L2, we will only handle the exceptions * specified above if L1 did not want them. */ if (is_guest_mode(vcpu)) eb |= get_vmcs12(vcpu)->exception_bitmap; vmcs_write32(EXCEPTION_BITMAP, eb); } static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) { vm_entry_controls_clearbit(vmx, entry); vm_exit_controls_clearbit(vmx, exit); } static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) { unsigned i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); return; } break; } for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; if (i == m->nr) return; --m->nr; m->guest[i] = m->guest[m->nr]; m->host[i] = m->host[m->nr]; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit, unsigned long guest_val_vmcs, unsigned long host_val_vmcs, u64 guest_val, u64 host_val) { vmcs_write64(guest_val_vmcs, guest_val); vmcs_write64(host_val_vmcs, host_val); vm_entry_controls_setbit(vmx, entry); vm_exit_controls_setbit(vmx, exit); } static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val) { unsigned i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER, GUEST_IA32_EFER, HOST_IA32_EFER, guest_val, host_val); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, GUEST_IA32_PERF_GLOBAL_CTRL, HOST_IA32_PERF_GLOBAL_CTRL, guest_val, host_val); return; } break; } for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; if (i == NR_AUTOLOAD_MSRS) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; } else if (i == m->nr) { ++m->nr; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } m->guest[i].index = msr; m->guest[i].value = guest_val; m->host[i].index = msr; m->host[i].value = host_val; } static void reload_tss(void) { /* * VT restores TR but not its size. Useless. */ struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); struct desc_struct *descs; descs = (void *)gdt->address; descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ load_TR_desc(); } static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) { u64 guest_efer; u64 ignore_bits; guest_efer = vmx->vcpu.arch.efer; /* * NX is emulated; LMA and LME handled by hardware; SCE meaningless * outside long mode */ ignore_bits = EFER_NX | EFER_SCE; #ifdef CONFIG_X86_64 ignore_bits |= EFER_LMA | EFER_LME; /* SCE is meaningful only in long mode on Intel */ if (guest_efer & EFER_LMA) ignore_bits &= ~(u64)EFER_SCE; #endif guest_efer &= ~ignore_bits; guest_efer |= host_efer & ignore_bits; vmx->guest_msrs[efer_offset].data = guest_efer; vmx->guest_msrs[efer_offset].mask = ~ignore_bits; clear_atomic_switch_msr(vmx, MSR_EFER); /* On ept, can't emulate nx, and must switch nx atomically */ if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) { guest_efer = vmx->vcpu.arch.efer; if (!(guest_efer & EFER_LMA)) guest_efer &= ~EFER_LME; add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); return false; } return true; } static unsigned long segment_base(u16 selector) { struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); struct desc_struct *d; unsigned long table_base; unsigned long v; if (!(selector & ~3)) return 0; table_base = gdt->address; if (selector & 4) { /* from ldt */ u16 ldt_selector = kvm_read_ldt(); if (!(ldt_selector & ~3)) return 0; table_base = segment_base(ldt_selector); } d = (struct desc_struct *)(table_base + (selector & ~7)); v = get_desc_base(d); #ifdef CONFIG_X86_64 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11)) v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32; #endif return v; } static inline unsigned long kvm_read_tr_base(void) { u16 tr; asm("str %0" : "=g"(tr)); return segment_base(tr); } static void vmx_save_host_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int i; if (vmx->host_state.loaded) return; vmx->host_state.loaded = 1; /* * Set host fs and gs selectors. Unfortunately, 22.2.3 does not * allow segment selectors with cpl > 0 or ti == 1. */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; savesegment(fs, vmx->host_state.fs_sel); if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; } else { vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; } savesegment(gs, vmx->host_state.gs_sel); if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { vmcs_write16(HOST_GS_SELECTOR, 0); vmx->host_state.gs_ldt_reload_needed = 1; } #ifdef CONFIG_X86_64 savesegment(ds, vmx->host_state.ds_sel); savesegment(es, vmx->host_state.es_sel); #endif #ifdef CONFIG_X86_64 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); #else vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); #endif #ifdef CONFIG_X86_64 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); if (is_long_mode(&vmx->vcpu)) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (boot_cpu_has(X86_FEATURE_MPX)) rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); for (i = 0; i < vmx->save_nmsrs; ++i) kvm_set_shared_msr(vmx->guest_msrs[i].index, vmx->guest_msrs[i].data, vmx->guest_msrs[i].mask); } static void __vmx_load_host_state(struct vcpu_vmx *vmx) { if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); #ifdef CONFIG_X86_64 load_gs_index(vmx->host_state.gs_sel); #else loadsegment(gs, vmx->host_state.gs_sel); #endif } if (vmx->host_state.fs_reload_needed) loadsegment(fs, vmx->host_state.fs_sel); #ifdef CONFIG_X86_64 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { loadsegment(ds, vmx->host_state.ds_sel); loadsegment(es, vmx->host_state.es_sel); } #endif reload_tss(); #ifdef CONFIG_X86_64 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif if (vmx->host_state.msr_host_bndcfgs) wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); /* * If the FPU is not active (through the host task or * the guest vcpu), then restore the cr0.TS bit. */ if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) stts(); load_gdt(this_cpu_ptr(&host_gdt)); } static void vmx_load_host_state(struct vcpu_vmx *vmx) { preempt_disable(); __vmx_load_host_state(vmx); preempt_enable(); } /* * Switches to specified vcpu, until a matching vcpu_put(), but assumes * vcpu mutex is already taken. */ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); if (!vmm_exclusive) kvm_cpu_vmxon(phys_addr); else if (vmx->loaded_vmcs->cpu != cpu) loaded_vmcs_clear(vmx->loaded_vmcs); if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); } if (vmx->loaded_vmcs->cpu != cpu) { struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); unsigned long sysenter_esp; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); local_irq_disable(); crash_disable_local_vmclear(cpu); /* * Read loaded_vmcs->cpu should be before fetching * loaded_vmcs->loaded_vmcss_on_cpu_link. * See the comments in __loaded_vmcs_clear(). */ smp_rmb(); list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, &per_cpu(loaded_vmcss_on_cpu, cpu)); crash_enable_local_vmclear(cpu); local_irq_enable(); /* * Linux uses per-cpu TSS and GDT, so set these when switching * processors. */ vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */ rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ vmx->loaded_vmcs->cpu = cpu; } } static void vmx_vcpu_put(struct kvm_vcpu *vcpu) { __vmx_load_host_state(to_vmx(vcpu)); if (!vmm_exclusive) { __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); vcpu->cpu = -1; kvm_cpu_vmxoff(); } } static void vmx_fpu_activate(struct kvm_vcpu *vcpu) { ulong cr0; if (vcpu->fpu_active) return; vcpu->fpu_active = 1; cr0 = vmcs_readl(GUEST_CR0); cr0 &= ~(X86_CR0_TS | X86_CR0_MP); cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP); vmcs_writel(GUEST_CR0, cr0); update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; if (is_guest_mode(vcpu)) vcpu->arch.cr0_guest_owned_bits &= ~get_vmcs12(vcpu)->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); /* * Return the cr0 value that a nested guest would read. This is a combination * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by * its hypervisor (cr0_read_shadow). */ static inline unsigned long nested_read_cr0(struct vmcs12 *fields) { return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | (fields->cr0_read_shadow & fields->cr0_guest_host_mask); } static inline unsigned long nested_read_cr4(struct vmcs12 *fields) { return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | (fields->cr4_read_shadow & fields->cr4_guest_host_mask); } static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) { /* Note that there is no vcpu->fpu_active = 0 here. The caller must * set this *before* calling this function. */ vmx_decache_cr0_guest_bits(vcpu); vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP); update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = 0; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); if (is_guest_mode(vcpu)) { /* * L1's specified read shadow might not contain the TS bit, * so now that we turned on shadowing of this bit, we need to * set this bit of the shadow. Like in nested_vmx_run we need * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet * up-to-date here because we just decached cr0.TS (and we'll * only update vmcs12->guest_cr0 on nested exit). */ struct vmcs12 *vmcs12 = get_vmcs12(vcpu); vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) | (vcpu->arch.cr0 & X86_CR0_TS); vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); } else vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); } static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags, save_rflags; if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); rflags = vmcs_readl(GUEST_RFLAGS); if (to_vmx(vcpu)->rmode.vm86_active) { rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; save_rflags = to_vmx(vcpu)->rmode.save_rflags; rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; } to_vmx(vcpu)->rflags = rflags; } return to_vmx(vcpu)->rflags; } static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); to_vmx(vcpu)->rflags = rflags; if (to_vmx(vcpu)->rmode.vm86_active) { to_vmx(vcpu)->rmode.save_rflags = rflags; rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; } vmcs_writel(GUEST_RFLAGS, rflags); } static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) { u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); int ret = 0; if (interruptibility & GUEST_INTR_STATE_STI) ret |= KVM_X86_SHADOW_INT_STI; if (interruptibility & GUEST_INTR_STATE_MOV_SS) ret |= KVM_X86_SHADOW_INT_MOV_SS; return ret; } static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) { u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); u32 interruptibility = interruptibility_old; interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); if (mask & KVM_X86_SHADOW_INT_MOV_SS) interruptibility |= GUEST_INTR_STATE_MOV_SS; else if (mask & KVM_X86_SHADOW_INT_STI) interruptibility |= GUEST_INTR_STATE_STI; if ((interruptibility != interruptibility_old)) vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); } static void skip_emulated_instruction(struct kvm_vcpu *vcpu) { unsigned long rip; rip = kvm_rip_read(vcpu); rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_rip_write(vcpu, rip); /* skipping an emulated instruction also counts */ vmx_set_interrupt_shadow(vcpu, 0); } /* * KVM wants to inject page-faults which it got to the guest. This function * checks whether in a nested guest, we need to inject them to L1 or L2. */ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (!(vmcs12->exception_bitmap & (1u << nr))) return 0; nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; } static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code, bool reinject) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info = nr | INTR_INFO_VALID_MASK; if (!reinject && is_guest_mode(vcpu) && nested_vmx_check_exception(vcpu, nr)) return; if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; } if (vmx->rmode.vm86_active) { int inc_eip = 0; if (kvm_exception_is_soft(nr)) inc_eip = vcpu->arch.event_exit_inst_len; if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } if (kvm_exception_is_soft(nr)) { vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); intr_info |= INTR_TYPE_SOFT_EXCEPTION; } else intr_info |= INTR_TYPE_HARD_EXCEPTION; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); } static bool vmx_rdtscp_supported(void) { return cpu_has_vmx_rdtscp(); } static bool vmx_invpcid_supported(void) { return cpu_has_vmx_invpcid() && enable_ept; } /* * Swap MSR entry in host/guest MSR entry array. */ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) { struct shared_msr_entry tmp; tmp = vmx->guest_msrs[to]; vmx->guest_msrs[to] = vmx->guest_msrs[from]; vmx->guest_msrs[from] = tmp; } static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) { unsigned long *msr_bitmap; if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode_x2apic; else msr_bitmap = vmx_msr_bitmap_legacy_x2apic; } else { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode; else msr_bitmap = vmx_msr_bitmap_legacy; } vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); } /* * Set up the vmcs to automatically save and restore system * msrs. Don't touch the 64-bit msrs if the guest is in legacy * mode, as fiddling with msrs is very expensive. */ static void setup_msrs(struct vcpu_vmx *vmx) { int save_nmsrs, index; save_nmsrs = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) { index = __find_msr_index(vmx, MSR_SYSCALL_MASK); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_LSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_CSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_TSC_AUX); if (index >= 0 && vmx->rdtscp_enabled) move_msr_up(vmx, index, save_nmsrs++); /* * MSR_STAR is only needed on long mode guests, and only * if efer.sce is enabled. */ index = __find_msr_index(vmx, MSR_STAR); if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) move_msr_up(vmx, index, save_nmsrs++); } #endif index = __find_msr_index(vmx, MSR_EFER); if (index >= 0 && update_transition_efer(vmx, index)) move_msr_up(vmx, index, save_nmsrs++); vmx->save_nmsrs = save_nmsrs; if (cpu_has_vmx_msr_bitmap()) vmx_set_msr_bitmap(&vmx->vcpu); } /* * reads and returns guest's timestamp counter "register" * guest_tsc = host_tsc + tsc_offset -- 21.3 */ static u64 guest_read_tsc(void) { u64 host_tsc, tsc_offset; rdtscll(host_tsc); tsc_offset = vmcs_read64(TSC_OFFSET); return host_tsc + tsc_offset; } /* * Like guest_read_tsc, but always returns L1's notion of the timestamp * counter, even if a nested guest (L2) is currently running. */ static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) { u64 tsc_offset; tsc_offset = is_guest_mode(vcpu) ? to_vmx(vcpu)->nested.vmcs01_tsc_offset : vmcs_read64(TSC_OFFSET); return host_tsc + tsc_offset; } /* * Engage any workarounds for mis-matched TSC rates. Currently limited to * software catchup for faster rates on slower CPUs. */ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) { if (!scale) return; if (user_tsc_khz > tsc_khz) { vcpu->arch.tsc_catchup = 1; vcpu->arch.tsc_always_catchup = 1; } else WARN(1, "user requested TSC rate below hardware speed\n"); } static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu) { return vmcs_read64(TSC_OFFSET); } /* * writes 'offset' into guest's timestamp counter offset register */ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { if (is_guest_mode(vcpu)) { /* * We're here if L1 chose not to trap WRMSR to TSC. According * to the spec, this should set L1's TSC; The offset that L1 * set for L2 remains unchanged, and still needs to be added * to the newly set TSC to get L2's TSC. */ struct vmcs12 *vmcs12; to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset; /* recalculate vmcs02.TSC_OFFSET: */ vmcs12 = get_vmcs12(vcpu); vmcs_write64(TSC_OFFSET, offset + (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? vmcs12->tsc_offset : 0)); } else { trace_kvm_write_tsc_offset(vcpu->vcpu_id, vmcs_read64(TSC_OFFSET), offset); vmcs_write64(TSC_OFFSET, offset); } } static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host) { u64 offset = vmcs_read64(TSC_OFFSET); vmcs_write64(TSC_OFFSET, offset + adjustment); if (is_guest_mode(vcpu)) { /* Even when running L2, the adjustment needs to apply to L1 */ to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; } else trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset, offset + adjustment); } static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { return target_tsc - native_read_tsc(); } static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31))); } /* * nested_vmx_allowed() checks whether a guest should be allowed to use VMX * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for * all guests if the "nested" module option is off, and can also be disabled * for a single guest by disabling its VMX cpuid bit. */ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) { return nested && guest_cpuid_has_vmx(vcpu); } /* * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be * returned for the various VMX controls MSRs when nested VMX is enabled. * The same values should also be used to verify that vmcs12 control fields are * valid during nested entry from L1 to L2. * Each of these control msrs has a low and high 32-bit half: A low bit is on * if the corresponding bit in the (32-bit) control field *must* be on, and a * bit in the high half is on if the corresponding bit in the control field * may be on. See also vmx_control_verify(). * TODO: allow these variables to be modified (downgraded) by module options * or other means. */ static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high; static u32 nested_vmx_true_procbased_ctls_low; static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high; static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high; static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high; static u32 nested_vmx_true_exit_ctls_low; static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high; static u32 nested_vmx_true_entry_ctls_low; static u32 nested_vmx_misc_low, nested_vmx_misc_high; static u32 nested_vmx_ept_caps; static __init void nested_vmx_setup_ctls_msrs(void) { /* * Note that as a general rule, the high half of the MSRs (bits in * the control fields which may be 1) should be initialized by the * intersection of the underlying hardware's MSR (i.e., features which * can be supported) and the list of features we want to expose - * because they are known to be properly supported in our code. * Also, usually, the low half of the MSRs (bits which must be 1) can * be set to 0, meaning that L1 may turn off any of these bits. The * reason is that if one of these bits is necessary, it will appear * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control * fields of vmcs01 and vmcs02, will turn these bits off - and * nested_vmx_exit_handled() will not pass related exits to L1. * These rules have exceptions below. */ /* pin-based controls */ rdmsr(MSR_IA32_VMX_PINBASED_CTLS, nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high); nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | PIN_BASED_VMX_PREEMPTION_TIMER; /* exit controls */ rdmsr(MSR_IA32_VMX_EXIT_CTLS, nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high); nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; nested_vmx_exit_ctls_high &= #ifdef CONFIG_X86_64 VM_EXIT_HOST_ADDR_SPACE_SIZE | #endif VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; if (vmx_mpx_supported()) nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; /* We support free control of debug control saving. */ nested_vmx_true_exit_ctls_low = nested_vmx_exit_ctls_low & ~VM_EXIT_SAVE_DEBUG_CONTROLS; /* entry controls */ rdmsr(MSR_IA32_VMX_ENTRY_CTLS, nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high); nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; nested_vmx_entry_ctls_high &= #ifdef CONFIG_X86_64 VM_ENTRY_IA32E_MODE | #endif VM_ENTRY_LOAD_IA32_PAT; nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); if (vmx_mpx_supported()) nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; /* We support free control of debug control loading. */ nested_vmx_true_entry_ctls_low = nested_vmx_entry_ctls_low & ~VM_ENTRY_LOAD_DEBUG_CONTROLS; /* cpu-based controls */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; nested_vmx_procbased_ctls_high &= CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; /* * We can allow some features even when not supported by the * hardware. For example, L1 can specify an MSR bitmap - and we * can use it to avoid exits to L1 - even when L0 runs L2 * without MSR bitmaps. */ nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | CPU_BASED_USE_MSR_BITMAPS; /* We support free control of CR3 access interception. */ nested_vmx_true_procbased_ctls_low = nested_vmx_procbased_ctls_low & ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); /* secondary cpu-based controls */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high); nested_vmx_secondary_ctls_low = 0; nested_vmx_secondary_ctls_high &= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_WBINVD_EXITING; if (enable_ept) { /* nested EPT: emulate EPT also to L1 */ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | VMX_EPT_INVEPT_BIT; nested_vmx_ept_caps &= vmx_capability.ept; /* * For nested guests, we don't do anything specific * for single context invalidation. Hence, only advertise * support for global context invalidation. */ nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT; } else nested_vmx_ept_caps = 0; /* miscellaneous data */ rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high); nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA; nested_vmx_misc_low |= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | VMX_MISC_ACTIVITY_HLT; nested_vmx_misc_high = 0; } static inline bool vmx_control_verify(u32 control, u32 low, u32 high) { /* * Bits 0 in high must be 0, and bits 1 in low must be 1. */ return ((control & high) | low) == control; } static inline u64 vmx_control_msr(u32 low, u32 high) { return low | ((u64)high << 32); } /* Returns 0 on success, non-0 otherwise. */ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { switch (msr_index) { case MSR_IA32_VMX_BASIC: /* * This MSR reports some information about VMX support. We * should return information about the VMX we emulate for the * guest, and the VMCS structure we give it - not about the * VMX support of the underlying hardware. */ *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS | ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); break; case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_PINBASED_CTLS: *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high); break; case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *pdata = vmx_control_msr(nested_vmx_true_procbased_ctls_low, nested_vmx_procbased_ctls_high); break; case MSR_IA32_VMX_PROCBASED_CTLS: *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); break; case MSR_IA32_VMX_TRUE_EXIT_CTLS: *pdata = vmx_control_msr(nested_vmx_true_exit_ctls_low, nested_vmx_exit_ctls_high); break; case MSR_IA32_VMX_EXIT_CTLS: *pdata = vmx_control_msr(nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high); break; case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *pdata = vmx_control_msr(nested_vmx_true_entry_ctls_low, nested_vmx_entry_ctls_high); break; case MSR_IA32_VMX_ENTRY_CTLS: *pdata = vmx_control_msr(nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high); break; case MSR_IA32_VMX_MISC: *pdata = vmx_control_msr(nested_vmx_misc_low, nested_vmx_misc_high); break; /* * These MSRs specify bits which the guest must keep fixed (on or off) * while L1 is in VMXON mode (in L1's root mode, or running an L2). * We picked the standard core2 setting. */ #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) #define VMXON_CR4_ALWAYSON X86_CR4_VMXE case MSR_IA32_VMX_CR0_FIXED0: *pdata = VMXON_CR0_ALWAYSON; break; case MSR_IA32_VMX_CR0_FIXED1: *pdata = -1ULL; break; case MSR_IA32_VMX_CR4_FIXED0: *pdata = VMXON_CR4_ALWAYSON; break; case MSR_IA32_VMX_CR4_FIXED1: *pdata = -1ULL; break; case MSR_IA32_VMX_VMCS_ENUM: *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */ break; case MSR_IA32_VMX_PROCBASED_CTLS2: *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high); break; case MSR_IA32_VMX_EPT_VPID_CAP: /* Currently, no nested vpid support */ *pdata = nested_vmx_ept_caps; break; default: return 1; } return 0; } /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { u64 data; struct shared_msr_entry *msr; if (!pdata) { printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); return -EINVAL; } switch (msr_index) { #ifdef CONFIG_X86_64 case MSR_FS_BASE: data = vmcs_readl(GUEST_FS_BASE); break; case MSR_GS_BASE: data = vmcs_readl(GUEST_GS_BASE); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(to_vmx(vcpu)); data = to_vmx(vcpu)->msr_guest_kernel_gs_base; break; #endif case MSR_EFER: return kvm_get_msr_common(vcpu, msr_index, pdata); case MSR_IA32_TSC: data = guest_read_tsc(); break; case MSR_IA32_SYSENTER_CS: data = vmcs_read32(GUEST_SYSENTER_CS); break; case MSR_IA32_SYSENTER_EIP: data = vmcs_readl(GUEST_SYSENTER_EIP); break; case MSR_IA32_SYSENTER_ESP: data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: if (!vmx_mpx_supported()) return 1; data = vmcs_read64(GUEST_BNDCFGS); break; case MSR_IA32_FEATURE_CONTROL: if (!nested_vmx_allowed(vcpu)) return 1; data = to_vmx(vcpu)->nested.msr_ia32_feature_control; break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!nested_vmx_allowed(vcpu)) return 1; return vmx_get_vmx_msr(vcpu, msr_index, pdata); case MSR_TSC_AUX: if (!to_vmx(vcpu)->rdtscp_enabled) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(to_vmx(vcpu), msr_index); if (msr) { data = msr->data; break; } return kvm_get_msr_common(vcpu, msr_index, pdata); } *pdata = data; return 0; } static void vmx_leave_nested(struct kvm_vcpu *vcpu); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr; int ret = 0; u32 msr_index = msr_info->index; u64 data = msr_info->data; switch (msr_index) { case MSR_EFER: ret = kvm_set_msr_common(vcpu, msr_info); break; #ifdef CONFIG_X86_64 case MSR_FS_BASE: vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_FS_BASE, data); break; case MSR_GS_BASE: vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_GS_BASE, data); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(vmx); vmx->msr_guest_kernel_gs_base = data; break; #endif case MSR_IA32_SYSENTER_CS: vmcs_write32(GUEST_SYSENTER_CS, data); break; case MSR_IA32_SYSENTER_EIP: vmcs_writel(GUEST_SYSENTER_EIP, data); break; case MSR_IA32_SYSENTER_ESP: vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: if (!vmx_mpx_supported()) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr_info); break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) return 1; vmcs_write64(GUEST_IA32_PAT, data); vcpu->arch.pat = data; break; } ret = kvm_set_msr_common(vcpu, msr_info); break; case MSR_IA32_TSC_ADJUST: ret = kvm_set_msr_common(vcpu, msr_info); break; case MSR_IA32_FEATURE_CONTROL: if (!nested_vmx_allowed(vcpu) || (to_vmx(vcpu)->nested.msr_ia32_feature_control & FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) return 1; vmx->nested.msr_ia32_feature_control = data; if (msr_info->host_initiated && data == 0) vmx_leave_nested(vcpu); break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: return 1; /* they are read-only */ case MSR_TSC_AUX: if (!vmx->rdtscp_enabled) return 1; /* Check reserved bit, higher 32 bits should be zero */ if ((data >> 32) != 0) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(vmx, msr_index); if (msr) { u64 old_msr_data = msr->data; msr->data = data; if (msr - vmx->guest_msrs < vmx->save_nmsrs) { preempt_disable(); ret = kvm_set_shared_msr(msr->index, msr->data, msr->mask); preempt_enable(); if (ret) msr->data = old_msr_data; } break; } ret = kvm_set_msr_common(vcpu, msr_info); } return ret; } static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); switch (reg) { case VCPU_REGS_RSP: vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); break; case VCPU_REGS_RIP: vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); break; case VCPU_EXREG_PDPTR: if (enable_ept) ept_save_pdptrs(vcpu); break; default: break; } } static __init int cpu_has_kvm_support(void) { return cpu_has_vmx(); } static __init int vmx_disabled_by_bios(void) { u64 msr; rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); if (msr & FEATURE_CONTROL_LOCKED) { /* launched w/ TXT and VMX disabled */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) && tboot_enabled()) return 1; /* launched w/o TXT and VMX only enabled w/ TXT */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) && !tboot_enabled()) { printk(KERN_WARNING "kvm: disable TXT in the BIOS or " "activate TXT before enabling KVM\n"); return 1; } /* launched w/o TXT and VMX disabled */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) && !tboot_enabled()) return 1; } return 0; } static void kvm_cpu_vmxon(u64 addr) { asm volatile (ASM_VMX_VMXON_RAX : : "a"(&addr), "m"(addr) : "memory", "cc"); } static int hardware_enable(void) { int cpu = raw_smp_processor_id(); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); u64 old, test_bits; if (read_cr4() & X86_CR4_VMXE) return -EBUSY; INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); /* * Now we can enable the vmclear operation in kdump * since the loaded_vmcss_on_cpu list on this cpu * has been initialized. * * Though the cpu is not in VMX operation now, there * is no problem to enable the vmclear operation * for the loaded_vmcss_on_cpu list is empty! */ crash_enable_local_vmclear(cpu); rdmsrl(MSR_IA32_FEATURE_CONTROL, old); test_bits = FEATURE_CONTROL_LOCKED; test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; if (tboot_enabled()) test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; if ((old & test_bits) != test_bits) { /* enable and lock */ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); } write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ if (vmm_exclusive) { kvm_cpu_vmxon(phys_addr); ept_sync_global(); } native_store_gdt(this_cpu_ptr(&host_gdt)); return 0; } static void vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v, *n; list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) __loaded_vmcs_clear(v); } /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() * tricks. */ static void kvm_cpu_vmxoff(void) { asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); } static void hardware_disable(void) { if (vmm_exclusive) { vmclear_local_loaded_vmcss(); kvm_cpu_vmxoff(); } write_cr4(read_cr4() & ~X86_CR4_VMXE); } static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result) { u32 vmx_msr_low, vmx_msr_high; u32 ctl = ctl_min | ctl_opt; rdmsr(msr, vmx_msr_low, vmx_msr_high); ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ /* Ensure minimum (required) set of control bits are supported. */ if (ctl_min & ~ctl) return -EIO; *result = ctl; return 0; } static __init bool allow_1_setting(u32 msr, u32 ctl) { u32 vmx_msr_low, vmx_msr_high; rdmsr(msr, vmx_msr_low, vmx_msr_high); return vmx_msr_high & ctl; } static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) { u32 vmx_msr_low, vmx_msr_high; u32 min, opt, min2, opt2; u32 _pin_based_exec_control = 0; u32 _cpu_based_exec_control = 0; u32 _cpu_based_2nd_exec_control = 0; u32 _vmexit_control = 0; u32 _vmentry_control = 0; min = CPU_BASED_HLT_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_MWAIT_EXITING | CPU_BASED_MONITOR_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_RDPMC_EXITING; opt = CPU_BASED_TPR_SHADOW | CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, &_cpu_based_exec_control) < 0) return -EIO; #ifdef CONFIG_X86_64 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & ~CPU_BASED_CR8_STORE_EXITING; #endif if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { min2 = 0; opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_WBINVD_EXITING | SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_PAUSE_LOOP_EXITING | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_SHADOW_VMCS; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) return -EIO; } #ifndef CONFIG_X86_64 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; #endif if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_2nd_exec_control &= ~( SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { /* CR3 accesses and invlpg don't need to cause VM Exits when EPT enabled */ _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_INVLPG_EXITING); rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, vmx_capability.ept, vmx_capability.vpid); } min = VM_EXIT_SAVE_DEBUG_CONTROLS; #ifdef CONFIG_X86_64 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; #endif opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_CLEAR_BNDCFGS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, &_vmexit_control) < 0) return -EIO; min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, &_pin_based_exec_control) < 0) return -EIO; if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) || !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT)) _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; min = VM_ENTRY_LOAD_DEBUG_CONTROLS; opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, &_vmentry_control) < 0) return -EIO; rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) return -EIO; #ifdef CONFIG_X86_64 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ if (vmx_msr_high & (1u<<16)) return -EIO; #endif /* Require Write-Back (WB) memory type for VMCS accesses. */ if (((vmx_msr_high >> 18) & 15) != 6) return -EIO; vmcs_conf->size = vmx_msr_high & 0x1fff; vmcs_conf->order = get_order(vmcs_config.size); vmcs_conf->revision_id = vmx_msr_low; vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; vmcs_conf->vmexit_ctrl = _vmexit_control; vmcs_conf->vmentry_ctrl = _vmentry_control; cpu_has_load_ia32_efer = allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, VM_ENTRY_LOAD_IA32_EFER) && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_LOAD_IA32_EFER); cpu_has_load_perf_global_ctrl = allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); /* * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL * but due to arrata below it can't be used. Workaround is to use * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. * * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] * * AAK155 (model 26) * AAP115 (model 30) * AAT100 (model 37) * BC86,AAY89,BD102 (model 44) * BA97 (model 46) * */ if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) { switch (boot_cpu_data.x86_model) { case 26: case 30: case 37: case 44: case 46: cpu_has_load_perf_global_ctrl = false; printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " "does not work properly. Using workaround\n"); break; default: break; } } return 0; } static struct vmcs *alloc_vmcs_cpu(int cpu) { int node = cpu_to_node(cpu); struct page *pages; struct vmcs *vmcs; pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order); if (!pages) return NULL; vmcs = page_address(pages); memset(vmcs, 0, vmcs_config.size); vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ return vmcs; } static struct vmcs *alloc_vmcs(void) { return alloc_vmcs_cpu(raw_smp_processor_id()); } static void free_vmcs(struct vmcs *vmcs) { free_pages((unsigned long)vmcs, vmcs_config.order); } /* * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded */ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) { if (!loaded_vmcs->vmcs) return; loaded_vmcs_clear(loaded_vmcs); free_vmcs(loaded_vmcs->vmcs); loaded_vmcs->vmcs = NULL; } static void free_kvm_area(void) { int cpu; for_each_possible_cpu(cpu) { free_vmcs(per_cpu(vmxarea, cpu)); per_cpu(vmxarea, cpu) = NULL; } } static void init_vmcs_shadow_fields(void) { int i, j; /* No checks for read only fields yet */ for (i = j = 0; i < max_shadow_read_write_fields; i++) { switch (shadow_read_write_fields[i]) { case GUEST_BNDCFGS: if (!vmx_mpx_supported()) continue; break; default: break; } if (j < i) shadow_read_write_fields[j] = shadow_read_write_fields[i]; j++; } max_shadow_read_write_fields = j; /* shadowed fields guest access without vmexit */ for (i = 0; i < max_shadow_read_write_fields; i++) { clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap); clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap); } for (i = 0; i < max_shadow_read_only_fields; i++) clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap); } static __init int alloc_kvm_area(void) { int cpu; for_each_possible_cpu(cpu) { struct vmcs *vmcs; vmcs = alloc_vmcs_cpu(cpu); if (!vmcs) { free_kvm_area(); return -ENOMEM; } per_cpu(vmxarea, cpu) = vmcs; } return 0; } static __init int hardware_setup(void) { if (setup_vmcs_config(&vmcs_config) < 0) return -EIO; if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); if (!cpu_has_vmx_vpid()) enable_vpid = 0; if (!cpu_has_vmx_shadow_vmcs()) enable_shadow_vmcs = 0; if (enable_shadow_vmcs) init_vmcs_shadow_fields(); if (!cpu_has_vmx_ept() || !cpu_has_vmx_ept_4levels()) { enable_ept = 0; enable_unrestricted_guest = 0; enable_ept_ad_bits = 0; } if (!cpu_has_vmx_ept_ad_bits()) enable_ept_ad_bits = 0; if (!cpu_has_vmx_unrestricted_guest()) enable_unrestricted_guest = 0; if (!cpu_has_vmx_flexpriority()) { flexpriority_enabled = 0; /* * set_apic_access_page_addr() is used to reload apic access * page upon invalidation. No need to do anything if the * processor does not have the APIC_ACCESS_ADDR VMCS field. */ kvm_x86_ops->set_apic_access_page_addr = NULL; } if (!cpu_has_vmx_tpr_shadow()) kvm_x86_ops->update_cr8_intercept = NULL; if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); if (!cpu_has_vmx_ple()) ple_gap = 0; if (!cpu_has_vmx_apicv()) enable_apicv = 0; if (enable_apicv) kvm_x86_ops->update_cr8_intercept = NULL; else { kvm_x86_ops->hwapic_irr_update = NULL; kvm_x86_ops->deliver_posted_interrupt = NULL; kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy; } if (nested) nested_vmx_setup_ctls_msrs(); return alloc_kvm_area(); } static __exit void hardware_unsetup(void) { free_kvm_area(); } static bool emulation_required(struct kvm_vcpu *vcpu) { return emulate_invalid_guest_state && !guest_state_valid(vcpu); } static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save) { if (!emulate_invalid_guest_state) { /* * CS and SS RPL should be equal during guest entry according * to VMX spec, but in reality it is not always so. Since vcpu * is in the middle of the transition from real mode to * protected mode it is safe to assume that RPL 0 is a good * default value. */ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) save->selector &= ~SELECTOR_RPL_MASK; save->dpl = save->selector & SELECTOR_RPL_MASK; save->s = 1; } vmx_set_segment(vcpu, save, seg); } static void enter_pmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Update real mode segment cache. It may be not up-to-date if sement * register was written while vcpu was in a guest mode. */ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->rmode.vm86_active = 0; vmx_segment_cache_clear(vmx); vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); flags = vmcs_readl(GUEST_RFLAGS); flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); update_exception_bitmap(vcpu); fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); } static void fix_rmode_seg(int seg, struct kvm_segment *save) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; struct kvm_segment var = *save; var.dpl = 0x3; if (seg == VCPU_SREG_CS) var.type = 0x3; if (!emulate_invalid_guest_state) { var.selector = var.base >> 4; var.base = var.base & 0xffff0; var.limit = 0xffff; var.g = 0; var.db = 0; var.present = 1; var.s = 1; var.l = 0; var.unusable = 0; var.type = 0x3; var.avl = 0; if (save->base & 0xf) printk_once(KERN_WARNING "kvm: segment base is not " "paragraph aligned when entering " "protected mode (seg=%d)", seg); } vmcs_write16(sf->selector, var.selector); vmcs_write32(sf->base, var.base); vmcs_write32(sf->limit, var.limit); vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); } static void enter_rmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->rmode.vm86_active = 1; /* * Very old userspace does not call KVM_SET_TSS_ADDR before entering * vcpu. Warn the user that an update is overdue. */ if (!vcpu->kvm->arch.tss_addr) printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " "called before entering vcpu\n"); vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); flags = vmcs_readl(GUEST_RFLAGS); vmx->rmode.save_rflags = flags; flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); update_exception_bitmap(vcpu); fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); kvm_mmu_reset_context(vcpu); } static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); if (!msr) return; /* * Force kernel_gs_base reloading before EFER changes, as control * of this msr depends on is_long_mode(). */ vmx_load_host_state(to_vmx(vcpu)); vcpu->arch.efer = efer; if (efer & EFER_LMA) { vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer; } else { vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer & ~EFER_LME; } setup_msrs(vmx); } #ifdef CONFIG_X86_64 static void enter_lmode(struct kvm_vcpu *vcpu) { u32 guest_tr_ar; vmx_segment_cache_clear(to_vmx(vcpu)); guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { pr_debug_ratelimited("%s: tss fixup for long mode. \n", __func__); vmcs_write32(GUEST_TR_AR_BYTES, (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS); } vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); } static void exit_lmode(struct kvm_vcpu *vcpu) { vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); } #endif static void vmx_flush_tlb(struct kvm_vcpu *vcpu) { vpid_sync_context(to_vmx(vcpu)); if (enable_ept) { if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); } } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; vcpu->arch.cr0 &= ~cr0_guest_owned_bits; vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; } static void vmx_decache_cr3(struct kvm_vcpu *vcpu) { if (enable_ept && is_paging(vcpu)) vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); } static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; vcpu->arch.cr4 &= ~cr4_guest_owned_bits; vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; } static void ept_load_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty)) return; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); } } static void ept_save_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); } __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); } static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | (CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } else if (!is_paging(vcpu)) { /* From nonpaging to paging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } if (!(cr0 & X86_CR0_WP)) *hw_cr0 &= ~X86_CR0_WP; } static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long hw_cr0; hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK); if (enable_unrestricted_guest) hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; else { hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) enter_pmode(vcpu); if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) enter_rmode(vcpu); } #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) enter_lmode(vcpu); if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) exit_lmode(vcpu); } #endif if (enable_ept) ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); if (!vcpu->fpu_active) hw_cr0 |= X86_CR0_TS | X86_CR0_MP; vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(GUEST_CR0, hw_cr0); vcpu->arch.cr0 = cr0; /* depends on vcpu->arch.cr0 to be set to a new value */ vmx->emulation_required = emulation_required(vcpu); } static u64 construct_eptp(unsigned long root_hpa) { u64 eptp; /* TODO write the value reading from MSR */ eptp = VMX_EPT_DEFAULT_MT | VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; if (enable_ept_ad_bits) eptp |= VMX_EPT_AD_ENABLE_BIT; eptp |= (root_hpa & PAGE_MASK); return eptp; } static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { unsigned long guest_cr3; u64 eptp; guest_cr3 = cr3; if (enable_ept) { eptp = construct_eptp(cr3); vmcs_write64(EPT_POINTER, eptp); if (is_paging(vcpu) || is_guest_mode(vcpu)) guest_cr3 = kvm_read_cr3(vcpu); else guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; ept_load_pdptrs(vcpu); } vmx_flush_tlb(vcpu); vmcs_writel(GUEST_CR3, guest_cr3); } static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ? KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); if (cr4 & X86_CR4_VMXE) { /* * To use VMXON (and later other VMX instructions), a guest * must first be able to turn on cr4.VMXE (see handle_vmon()). * So basically the check on whether to allow nested VMX * is here. */ if (!nested_vmx_allowed(vcpu)) return 1; } if (to_vmx(vcpu)->nested.vmxon && ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) return 1; vcpu->arch.cr4 = cr4; if (enable_ept) { if (!is_paging(vcpu)) { hw_cr4 &= ~X86_CR4_PAE; hw_cr4 |= X86_CR4_PSE; /* * SMEP/SMAP is disabled if CPU is in non-paging mode * in hardware. However KVM always uses paging mode to * emulate guest non-paging mode with TDP. * To emulate this behavior, SMEP/SMAP needs to be * manually disabled when guest switches to non-paging * mode. */ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP); } else if (!(cr4 & X86_CR4_PAE)) { hw_cr4 &= ~X86_CR4_PAE; } } vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, hw_cr4); return 0; } static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 ar; if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { *var = vmx->rmode.segs[seg]; if (seg == VCPU_SREG_TR || var->selector == vmx_read_guest_seg_selector(vmx, seg)) return; var->base = vmx_read_guest_seg_base(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); return; } var->base = vmx_read_guest_seg_base(vmx, seg); var->limit = vmx_read_guest_seg_limit(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); ar = vmx_read_guest_seg_ar(vmx, seg); var->unusable = (ar >> 16) & 1; var->type = ar & 15; var->s = (ar >> 4) & 1; var->dpl = (ar >> 5) & 3; /* * Some userspaces do not preserve unusable property. Since usable * segment has to be present according to VMX spec we can use present * property to amend userspace bug by making unusable segment always * nonpresent. vmx_segment_access_rights() already marks nonpresent * segment as unusable. */ var->present = !var->unusable; var->avl = (ar >> 12) & 1; var->l = (ar >> 13) & 1; var->db = (ar >> 14) & 1; var->g = (ar >> 15) & 1; } static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment s; if (to_vmx(vcpu)->rmode.vm86_active) { vmx_get_segment(vcpu, &s, seg); return s.base; } return vmx_read_guest_seg_base(to_vmx(vcpu), seg); } static int vmx_get_cpl(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (unlikely(vmx->rmode.vm86_active)) return 0; else { int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); return AR_DPL(ar); } } static u32 vmx_segment_access_rights(struct kvm_segment *var) { u32 ar; if (var->unusable || !var->present) ar = 1 << 16; else { ar = var->type & 15; ar |= (var->s & 1) << 4; ar |= (var->dpl & 3) << 5; ar |= (var->present & 1) << 7; ar |= (var->avl & 1) << 12; ar |= (var->l & 1) << 13; ar |= (var->db & 1) << 14; ar |= (var->g & 1) << 15; } return ar; } static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_vmx *vmx = to_vmx(vcpu); const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; vmx_segment_cache_clear(vmx); if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { vmx->rmode.segs[seg] = *var; if (seg == VCPU_SREG_TR) vmcs_write16(sf->selector, var->selector); else if (var->s) fix_rmode_seg(seg, &vmx->rmode.segs[seg]); goto out; } vmcs_writel(sf->base, var->base); vmcs_write32(sf->limit, var->limit); vmcs_write16(sf->selector, var->selector); /* * Fix the "Accessed" bit in AR field of segment registers for older * qemu binaries. * IA32 arch specifies that at the time of processor reset the * "Accessed" bit in the AR field of segment registers is 1. And qemu * is setting it to 0 in the userland code. This causes invalid guest * state vmexit when "unrestricted guest" mode is turned on. * Fix for this setup issue in cpu_reset is being pushed in the qemu * tree. Newer qemu binaries with that qemu fix would not need this * kvm hack. */ if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) var->type |= 0x1; /* Accessed */ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); out: vmx->emulation_required = emulation_required(vcpu); } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); *db = (ar >> 14) & 1; *l = (ar >> 13) & 1; } static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { dt->size = vmcs_read32(GUEST_IDTR_LIMIT); dt->address = vmcs_readl(GUEST_IDTR_BASE); } static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { vmcs_write32(GUEST_IDTR_LIMIT, dt->size); vmcs_writel(GUEST_IDTR_BASE, dt->address); } static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { dt->size = vmcs_read32(GUEST_GDTR_LIMIT); dt->address = vmcs_readl(GUEST_GDTR_BASE); } static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { vmcs_write32(GUEST_GDTR_LIMIT, dt->size); vmcs_writel(GUEST_GDTR_BASE, dt->address); } static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; u32 ar; vmx_get_segment(vcpu, &var, seg); var.dpl = 0x3; if (seg == VCPU_SREG_CS) var.type = 0x3; ar = vmx_segment_access_rights(&var); if (var.base != (var.selector << 4)) return false; if (var.limit != 0xffff) return false; if (ar != 0xf3) return false; return true; } static bool code_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment cs; unsigned int cs_rpl; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); cs_rpl = cs.selector & SELECTOR_RPL_MASK; if (cs.unusable) return false; if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) return false; if (!cs.s) return false; if (cs.type & AR_TYPE_WRITEABLE_MASK) { if (cs.dpl > cs_rpl) return false; } else { if (cs.dpl != cs_rpl) return false; } if (!cs.present) return false; /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ return true; } static bool stack_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ss; unsigned int ss_rpl; vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); ss_rpl = ss.selector & SELECTOR_RPL_MASK; if (ss.unusable) return true; if (ss.type != 3 && ss.type != 7) return false; if (!ss.s) return false; if (ss.dpl != ss_rpl) /* DPL != RPL */ return false; if (!ss.present) return false; return true; } static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; unsigned int rpl; vmx_get_segment(vcpu, &var, seg); rpl = var.selector & SELECTOR_RPL_MASK; if (var.unusable) return true; if (!var.s) return false; if (!var.present) return false; if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) { if (var.dpl < rpl) /* DPL < RPL */ return false; } /* TODO: Add other members to kvm_segment_field to allow checking for other access * rights flags */ return true; } static bool tr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment tr; vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); if (tr.unusable) return false; if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ return false; if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ return false; if (!tr.present) return false; return true; } static bool ldtr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ldtr; vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); if (ldtr.unusable) return true; if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ return false; if (ldtr.type != 2) return false; if (!ldtr.present) return false; return true; } static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) { struct kvm_segment cs, ss; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); return ((cs.selector & SELECTOR_RPL_MASK) == (ss.selector & SELECTOR_RPL_MASK)); } /* * Check if guest state is valid. Returns true if valid, false if * not. * We assume that registers are always usable */ static bool guest_state_valid(struct kvm_vcpu *vcpu) { if (enable_unrestricted_guest) return true; /* real mode guest state checks */ if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) return false; } else { /* protected mode guest state checks */ if (!cs_ss_rpl_check(vcpu)) return false; if (!code_segment_valid(vcpu)) return false; if (!stack_segment_valid(vcpu)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_GS)) return false; if (!tr_valid(vcpu)) return false; if (!ldtr_valid(vcpu)) return false; } /* TODO: * - Add checks on RIP * - Add checks on RFLAGS */ return true; } static int init_rmode_tss(struct kvm *kvm) { gfn_t fn; u16 data = 0; int idx, r; idx = srcu_read_lock(&kvm->srcu); fn = kvm->arch.tss_addr >> PAGE_SHIFT; r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; r = kvm_write_guest_page(kvm, fn++, &data, TSS_IOPB_BASE_OFFSET, sizeof(u16)); if (r < 0) goto out; r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); if (r < 0) goto out; r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = ~0; r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, sizeof(u8)); out: srcu_read_unlock(&kvm->srcu, idx); return r; } static int init_rmode_identity_map(struct kvm *kvm) { int i, idx, r = 0; pfn_t identity_map_pfn; u32 tmp; if (!enable_ept) return 0; /* Protect kvm->arch.ept_identity_pagetable_done. */ mutex_lock(&kvm->slots_lock); if (likely(kvm->arch.ept_identity_pagetable_done)) goto out2; identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; r = alloc_identity_pagetable(kvm); if (r < 0) goto out2; idx = srcu_read_lock(&kvm->srcu); r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); if (r < 0) goto out; /* Set up identity-mapping pagetable for EPT in real mode */ for (i = 0; i < PT32_ENT_PER_PAGE; i++) { tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); r = kvm_write_guest_page(kvm, identity_map_pfn, &tmp, i * sizeof(tmp), sizeof(tmp)); if (r < 0) goto out; } kvm->arch.ept_identity_pagetable_done = true; out: srcu_read_unlock(&kvm->srcu, idx); out2: mutex_unlock(&kvm->slots_lock); return r; } static void seg_setup(int seg) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; unsigned int ar; vmcs_write16(sf->selector, 0); vmcs_writel(sf->base, 0); vmcs_write32(sf->limit, 0xffff); ar = 0x93; if (seg == VCPU_SREG_CS) ar |= 0x08; /* code segment */ vmcs_write32(sf->ar_bytes, ar); } static int alloc_apic_access_page(struct kvm *kvm) { struct page *page; struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; mutex_lock(&kvm->slots_lock); if (kvm->arch.apic_access_page_done) goto out; kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; kvm_userspace_mem.flags = 0; kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE; kvm_userspace_mem.memory_size = PAGE_SIZE; r = __kvm_set_memory_region(kvm, &kvm_userspace_mem); if (r) goto out; page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); if (is_error_page(page)) { r = -EFAULT; goto out; } /* * Do not pin the page in memory, so that memory hot-unplug * is able to migrate it. */ put_page(page); kvm->arch.apic_access_page_done = true; out: mutex_unlock(&kvm->slots_lock); return r; } static int alloc_identity_pagetable(struct kvm *kvm) { /* Called with kvm->slots_lock held. */ struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; BUG_ON(kvm->arch.ept_identity_pagetable_done); kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; kvm_userspace_mem.flags = 0; kvm_userspace_mem.guest_phys_addr = kvm->arch.ept_identity_map_addr; kvm_userspace_mem.memory_size = PAGE_SIZE; r = __kvm_set_memory_region(kvm, &kvm_userspace_mem); return r; } static void allocate_vpid(struct vcpu_vmx *vmx) { int vpid; vmx->vpid = 0; if (!enable_vpid) return; spin_lock(&vmx_vpid_lock); vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); if (vpid < VMX_NR_VPIDS) { vmx->vpid = vpid; __set_bit(vpid, vmx_vpid_bitmap); } spin_unlock(&vmx_vpid_lock); } static void free_vpid(struct vcpu_vmx *vmx) { if (!enable_vpid) return; spin_lock(&vmx_vpid_lock); if (vmx->vpid != 0) __clear_bit(vmx->vpid, vmx_vpid_bitmap); spin_unlock(&vmx_vpid_lock); } #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) return; /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R) /* read-low */ __clear_bit(msr, msr_bitmap + 0x000 / f); if (type & MSR_TYPE_W) /* write-low */ __clear_bit(msr, msr_bitmap + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R) /* read-high */ __clear_bit(msr, msr_bitmap + 0x400 / f); if (type & MSR_TYPE_W) /* write-high */ __clear_bit(msr, msr_bitmap + 0xc00 / f); } } static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) return; /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R) /* read-low */ __set_bit(msr, msr_bitmap + 0x000 / f); if (type & MSR_TYPE_W) /* write-low */ __set_bit(msr, msr_bitmap + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R) /* read-high */ __set_bit(msr, msr_bitmap + 0x400 / f); if (type & MSR_TYPE_W) /* write-high */ __set_bit(msr, msr_bitmap + 0xc00 / f); } } static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) { if (!longmode_only) __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr, MSR_TYPE_R | MSR_TYPE_W); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr, MSR_TYPE_R | MSR_TYPE_W); } static void vmx_enable_intercept_msr_read_x2apic(u32 msr) { __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, MSR_TYPE_R); __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, MSR_TYPE_R); } static void vmx_disable_intercept_msr_read_x2apic(u32 msr) { __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, MSR_TYPE_R); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, MSR_TYPE_R); } static void vmx_disable_intercept_msr_write_x2apic(u32 msr) { __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, MSR_TYPE_W); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, MSR_TYPE_W); } static int vmx_vm_has_apicv(struct kvm *kvm) { return enable_apicv && irqchip_in_kernel(kvm); } /* * Send interrupt to vcpu via posted interrupt way. * 1. If target vcpu is running(non-root mode), send posted interrupt * notification to vcpu and hardware will sync PIR to vIRR atomically. * 2. If target vcpu isn't running(root mode), kick it to pick up the * interrupt from PIR in next vmentry. */ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { struct vcpu_vmx *vmx = to_vmx(vcpu); int r; if (pi_test_and_set_pir(vector, &vmx->pi_desc)) return; r = pi_test_and_set_on(&vmx->pi_desc); kvm_make_request(KVM_REQ_EVENT, vcpu); #ifdef CONFIG_SMP if (!r && (vcpu->mode == IN_GUEST_MODE)) apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), POSTED_INTR_VECTOR); else #endif kvm_vcpu_kick(vcpu); } static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (!pi_test_and_clear_on(&vmx->pi_desc)) return; kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); } static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu) { return; } /* * Set up the vmcs's constant host-state fields, i.e., host-state fields that * will not change in the lifetime of the guest. * Note that host-state that does change is set elsewhere. E.g., host-state * that is set differently for each CPU is set in vmx_vcpu_load(), not here. */ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) { u32 low32, high32; unsigned long tmpl; struct desc_ptr dt; unsigned long cr4; vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ /* Save the most likely value for this task's CR4 in the VMCS. */ cr4 = read_cr4(); vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ vmx->host_state.vmcs_host_cr4 = cr4; vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ #ifdef CONFIG_X86_64 /* * Load null selectors, so we can avoid reloading them in * __vmx_load_host_state(), in case userspace uses the null selectors * too (the expected case). */ vmcs_write16(HOST_DS_SELECTOR, 0); vmcs_write16(HOST_ES_SELECTOR, 0); #else vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #endif vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ native_store_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ vmx->host_idt_base = dt.address; vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { rdmsr(MSR_IA32_CR_PAT, low32, high32); vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); } } static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) { vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; if (enable_ept) vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; if (is_guest_mode(&vmx->vcpu)) vmx->vcpu.arch.cr4_guest_owned_bits &= ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); } static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) { u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; return pin_based_exec_ctrl; } static u32 vmx_exec_control(struct vcpu_vmx *vmx) { u32 exec_control = vmcs_config.cpu_based_exec_ctrl; if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) exec_control &= ~CPU_BASED_MOV_DR_EXITING; if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { exec_control &= ~CPU_BASED_TPR_SHADOW; #ifdef CONFIG_X86_64 exec_control |= CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING; #endif } if (!enable_ept) exec_control |= CPU_BASED_CR3_STORE_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_INVLPG_EXITING; return exec_control; } static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) { u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if (vmx->vpid == 0) exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; if (!enable_ept) { exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; enable_unrestricted_guest = 0; /* Enable INVPCID for non-ept guests may cause performance regression. */ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; } if (!enable_unrestricted_guest) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; if (!ple_gap) exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD (handle_vmptrld). We can NOT enable shadow_vmcs here because we don't have yet a current VMCS12 */ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; return exec_control; } static void ept_set_mmio_spte_mask(void) { /* * EPT Misconfigurations can be generated if the value of bits 2:0 * of an EPT paging-structure entry is 110b (write/execute). * Also, magic bits (0x3ull << 62) is set to quickly identify mmio * spte. */ kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull); } /* * Sets up the vmcs for emulated real mode. */ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { #ifdef CONFIG_X86_64 unsigned long a; #endif int i; /* I/O */ vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); if (enable_shadow_vmcs) { vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); } if (cpu_has_vmx_msr_bitmap()) vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ /* Control */ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); if (cpu_has_secondary_exec_ctrls()) { vmcs_write32(SECONDARY_VM_EXEC_CONTROL, vmx_secondary_exec_control(vmx)); } if (vmx_vm_has_apicv(vmx->vcpu.kvm)) { vmcs_write64(EOI_EXIT_BITMAP0, 0); vmcs_write64(EOI_EXIT_BITMAP1, 0); vmcs_write64(EOI_EXIT_BITMAP2, 0); vmcs_write64(EOI_EXIT_BITMAP3, 0); vmcs_write16(GUEST_INTR_STATUS, 0); vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR); vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); } if (ple_gap) { vmcs_write32(PLE_GAP, ple_gap); vmx->ple_window = ple_window; vmx->ple_window_dirty = true; } vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmx_set_constant_host_state(vmx); #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ rdmsrl(MSR_GS_BASE, a); vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ #else vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ #endif vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { u32 msr_low, msr_high; u64 host_pat; rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); host_pat = msr_low | ((u64) msr_high << 32); /* Write the default value follow host pat */ vmcs_write64(GUEST_IA32_PAT, host_pat); /* Keep arch.pat sync with GUEST_IA32_PAT */ vmx->vcpu.arch.pat = host_pat; } for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { u32 index = vmx_msr_index[i]; u32 data_low, data_high; int j = vmx->nmsrs; if (rdmsr_safe(index, &data_low, &data_high) < 0) continue; if (wrmsr_safe(index, data_low, data_high) < 0) continue; vmx->guest_msrs[j].index = i; vmx->guest_msrs[j].data = 0; vmx->guest_msrs[j].mask = -1ull; ++vmx->nmsrs; } vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); /* 22.2.1, 20.8.1 */ vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); set_cr4_guest_host_mask(vmx); return 0; } static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct msr_data apic_base_msr; vmx->rmode.vm86_active = 0; vmx->soft_vnmi_blocked = 0; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(&vmx->vcpu, 0); apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_bsp(&vmx->vcpu)) apic_base_msr.data |= MSR_IA32_APICBASE_BSP; apic_base_msr.host_initiated = true; kvm_set_apic_base(&vmx->vcpu, &apic_base_msr); vmx_segment_cache_clear(vmx); seg_setup(VCPU_SREG_CS); vmcs_write16(GUEST_CS_SELECTOR, 0xf000); vmcs_write32(GUEST_CS_BASE, 0xffff0000); seg_setup(VCPU_SREG_DS); seg_setup(VCPU_SREG_ES); seg_setup(VCPU_SREG_FS); seg_setup(VCPU_SREG_GS); seg_setup(VCPU_SREG_SS); vmcs_write16(GUEST_TR_SELECTOR, 0); vmcs_writel(GUEST_TR_BASE, 0); vmcs_write32(GUEST_TR_LIMIT, 0xffff); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); vmcs_write16(GUEST_LDTR_SELECTOR, 0); vmcs_writel(GUEST_LDTR_BASE, 0); vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); vmcs_write32(GUEST_SYSENTER_CS, 0); vmcs_writel(GUEST_SYSENTER_ESP, 0); vmcs_writel(GUEST_SYSENTER_EIP, 0); vmcs_writel(GUEST_RFLAGS, 0x02); kvm_rip_write(vcpu, 0xfff0); vmcs_writel(GUEST_GDTR_BASE, 0); vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); vmcs_writel(GUEST_IDTR_BASE, 0); vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); /* Special registers */ vmcs_write64(GUEST_IA32_DEBUGCTL, 0); setup_msrs(vmx); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ if (cpu_has_vmx_tpr_shadow()) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); if (vm_need_tpr_shadow(vmx->vcpu.kvm)) vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, __pa(vmx->vcpu.arch.apic->regs)); vmcs_write32(TPR_THRESHOLD, 0); } kvm_vcpu_reload_apic_access_page(vcpu); if (vmx_vm_has_apicv(vcpu->kvm)) memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); if (vmx->vpid != 0) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ vmx_set_cr4(&vmx->vcpu, 0); vmx_set_efer(&vmx->vcpu, 0); vmx_fpu_activate(&vmx->vcpu); update_exception_bitmap(&vmx->vcpu); vpid_sync_context(vmx); } /* * In nested virtualization, check if L1 asked to exit on external interrupts. * For most existing hypervisors, this will always return true. */ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->pin_based_vm_exec_control & PIN_BASED_EXT_INTR_MASK; } /* * In nested virtualization, check if L1 has set * VM_EXIT_ACK_INTR_ON_EXIT */ static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_ACK_INTR_ON_EXIT; } static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING; } static void enable_irq_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); } static void enable_nmi_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; if (!cpu_has_virtual_nmis() || vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { enable_irq_window(vcpu); return; } cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); } static void vmx_inject_irq(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); uint32_t intr; int irq = vcpu->arch.interrupt.nr; trace_kvm_inj_virq(irq); ++vcpu->stat.irq_injections; if (vmx->rmode.vm86_active) { int inc_eip = 0; if (vcpu->arch.interrupt.soft) inc_eip = vcpu->arch.event_exit_inst_len; if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } intr = irq | INTR_INFO_VALID_MASK; if (vcpu->arch.interrupt.soft) { intr |= INTR_TYPE_SOFT_INTR; vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); } else intr |= INTR_TYPE_EXT_INTR; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); } static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (is_guest_mode(vcpu)) return; if (!cpu_has_virtual_nmis()) { /* * Tracking the NMI-blocked state in software is built upon * finding the next open IRQ window. This, in turn, depends on * well-behaving guests: They have to keep IRQs disabled at * least as long as the NMI handler runs. Otherwise we may * cause NMI nesting, maybe breaking the guest. But as this is * highly unlikely, we can live with the residual risk. */ vmx->soft_vnmi_blocked = 1; vmx->vnmi_blocked_time = 0; } ++vcpu->stat.nmi_injections; vmx->nmi_known_unmasked = false; if (vmx->rmode.vm86_active) { if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); } static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) { if (!cpu_has_virtual_nmis()) return to_vmx(vcpu)->soft_vnmi_blocked; if (to_vmx(vcpu)->nmi_known_unmasked) return false; return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; } static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (!cpu_has_virtual_nmis()) { if (vmx->soft_vnmi_blocked != masked) { vmx->soft_vnmi_blocked = masked; vmx->vnmi_blocked_time = 0; } } else { vmx->nmi_known_unmasked = !masked; if (masked) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); else vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); } } static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) { if (to_vmx(vcpu)->nested.nested_run_pending) return 0; if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) return 0; return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | GUEST_INTR_STATE_NMI)); } static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) { return (!to_vmx(vcpu)->nested.nested_run_pending && vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); } static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { int ret; struct kvm_userspace_memory_region tss_mem = { .slot = TSS_PRIVATE_MEMSLOT, .guest_phys_addr = addr, .memory_size = PAGE_SIZE * 3, .flags = 0, }; ret = kvm_set_memory_region(kvm, &tss_mem); if (ret) return ret; kvm->arch.tss_addr = addr; return init_rmode_tss(kvm); } static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) { switch (vec) { case BP_VECTOR: /* * Update instruction length as we may reinject the exception * from user space while in guest debugging mode. */ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; /* fall through */ case DB_VECTOR: if (vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) return false; /* fall through */ case DE_VECTOR: case OF_VECTOR: case BR_VECTOR: case UD_VECTOR: case DF_VECTOR: case SS_VECTOR: case GP_VECTOR: case MF_VECTOR: return true; break; } return false; } static int handle_rmode_exception(struct kvm_vcpu *vcpu, int vec, u32 err_code) { /* * Instruction with address size override prefix opcode 0x67 * Cause the #SS fault with 0 error code in VM86 mode. */ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; return kvm_emulate_halt(vcpu); } return 1; } return 0; } /* * Forward all other exceptions that are valid in real mode. * FIXME: Breaks guest debugging in real mode, needs to be fixed with * the required debugging infrastructure rework. */ kvm_queue_exception(vcpu, vec); return 1; } /* * Trigger machine check on the host. We assume all the MSRs are already set up * by the CPU and that we still run on the same CPU as the MCE occurred on. * We pass a fake environment to the machine check handler because we want * the guest to be always treated like user space, no matter what context * it used internally. */ static void kvm_machine_check(void) { #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) struct pt_regs regs = { .cs = 3, /* Fake ring 3 no matter what the guest ran on */ .flags = X86_EFLAGS_IF, }; do_machine_check(&regs, 0); #endif } static int handle_machine_check(struct kvm_vcpu *vcpu) { /* already handled by vcpu_run */ return 1; } static int handle_exception(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_run *kvm_run = vcpu->run; u32 intr_info, ex_no, error_code; unsigned long cr2, rip, dr6; u32 vect_info; enum emulation_result er; vect_info = vmx->idt_vectoring_info; intr_info = vmx->exit_intr_info; if (is_machine_check(intr_info)) return handle_machine_check(vcpu); if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) return 1; /* already handled by vmx_vcpu_run() */ if (is_no_device(intr_info)) { vmx_fpu_activate(vcpu); return 1; } if (is_invalid_opcode(intr_info)) { er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; } error_code = 0; if (intr_info & INTR_INFO_DELIVER_CODE_MASK) error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); /* * The #PF with PFEC.RSVD = 1 indicates the guest is accessing * MMIO, it is better to report an internal error. * See the comments in vmx_handle_exit. */ if ((vect_info & VECTORING_INFO_VALID_MASK) && !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = vect_info; vcpu->run->internal.data[1] = intr_info; return 0; } if (is_page_fault(intr_info)) { /* EPT won't cause page fault directly */ BUG_ON(enable_ept); cr2 = vmcs_readl(EXIT_QUALIFICATION); trace_kvm_page_fault(cr2, error_code); if (kvm_event_needs_reinjection(vcpu)) kvm_mmu_unprotect_page_virt(vcpu, cr2); return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0); } ex_no = intr_info & INTR_INFO_VECTOR_MASK; if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) return handle_rmode_exception(vcpu, ex_no, error_code); switch (ex_no) { case DB_VECTOR: dr6 = vmcs_readl(EXIT_QUALIFICATION); if (!(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; if (!(dr6 & ~DR6_RESERVED)) /* icebp */ skip_emulated_instruction(vcpu); kvm_queue_exception(vcpu, DB_VECTOR); return 1; } kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); /* fall through */ case BP_VECTOR: /* * Update instruction length as we may reinject #BP from * user space while in guest debugging mode. Reading it for * #DB as well causes no harm, it is not used in that case. */ vmx->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_run->exit_reason = KVM_EXIT_DEBUG; rip = kvm_rip_read(vcpu); kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; kvm_run->debug.arch.exception = ex_no; break; default: kvm_run->exit_reason = KVM_EXIT_EXCEPTION; kvm_run->ex.exception = ex_no; kvm_run->ex.error_code = error_code; break; } return 0; } static int handle_external_interrupt(struct kvm_vcpu *vcpu) { ++vcpu->stat.irq_exits; return 1; } static int handle_triple_fault(struct kvm_vcpu *vcpu) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } static int handle_io(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int size, in, string; unsigned port; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); string = (exit_qualification & 16) != 0; in = (exit_qualification & 8) != 0; ++vcpu->stat.io_exits; if (string || in) return emulate_instruction(vcpu, 0) == EMULATE_DONE; port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; skip_emulated_instruction(vcpu); return kvm_fast_pio_out(vcpu, size, port); } static void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) { /* * Patch in the VMCALL instruction: */ hypercall[0] = 0x0f; hypercall[1] = 0x01; hypercall[2] = 0xc1; } static bool nested_cr0_valid(struct vmcs12 *vmcs12, unsigned long val) { unsigned long always_on = VMXON_CR0_ALWAYSON; if (nested_vmx_secondary_ctls_high & SECONDARY_EXEC_UNRESTRICTED_GUEST && nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) always_on &= ~(X86_CR0_PE | X86_CR0_PG); return (val & always_on) == always_on; } /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned long orig_val = val; /* * We get here when L2 changed cr0 in a way that did not change * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), * but did change L0 shadowed bits. So we first calculate the * effective cr0 value that L1 would like to write into the * hardware. It consists of the L2-owned bits from the new * value combined with the L1-owned bits from L1's guest_cr0. */ val = (val & ~vmcs12->cr0_guest_host_mask) | (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); if (!nested_cr0_valid(vmcs12, val)) return 1; if (kvm_set_cr0(vcpu, val)) return 1; vmcs_writel(CR0_READ_SHADOW, orig_val); return 0; } else { if (to_vmx(vcpu)->nested.vmxon && ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)) return 1; return kvm_set_cr0(vcpu, val); } } static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned long orig_val = val; /* analogously to handle_set_cr0 */ val = (val & ~vmcs12->cr4_guest_host_mask) | (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); if (kvm_set_cr4(vcpu, val)) return 1; vmcs_writel(CR4_READ_SHADOW, orig_val); return 0; } else return kvm_set_cr4(vcpu, val); } /* called to set cr0 as approriate for clts instruction exit. */ static void handle_clts(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) { /* * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS * but we did (!fpu_active). We need to keep GUEST_CR0.TS on, * just pretend it's off (also in arch.cr0 for fpu_activate). */ vmcs_writel(CR0_READ_SHADOW, vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS); vcpu->arch.cr0 &= ~X86_CR0_TS; } else vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); } static int handle_cr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification, val; int cr; int reg; int err; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); cr = exit_qualification & 15; reg = (exit_qualification >> 8) & 15; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ val = kvm_register_readl(vcpu, reg); trace_kvm_cr_write(cr, val); switch (cr) { case 0: err = handle_set_cr0(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 3: err = kvm_set_cr3(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 4: err = handle_set_cr4(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 8: { u8 cr8_prev = kvm_get_cr8(vcpu); u8 cr8 = (u8)val; err = kvm_set_cr8(vcpu, cr8); kvm_complete_insn_gp(vcpu, err); if (irqchip_in_kernel(vcpu->kvm)) return 1; if (cr8_prev <= cr8) return 1; vcpu->run->exit_reason = KVM_EXIT_SET_TPR; return 0; } } break; case 2: /* clts */ handle_clts(vcpu); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); skip_emulated_instruction(vcpu); vmx_fpu_activate(vcpu); return 1; case 1: /*mov from cr*/ switch (cr) { case 3: val = kvm_read_cr3(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); skip_emulated_instruction(vcpu); return 1; case 8: val = kvm_get_cr8(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); skip_emulated_instruction(vcpu); return 1; } break; case 3: /* lmsw */ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); kvm_lmsw(vcpu, val); skip_emulated_instruction(vcpu); return 1; default: break; } vcpu->run->exit_reason = 0; vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", (int)(exit_qualification >> 4) & 3, cr); return 0; } static int handle_dr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int dr, reg; /* Do not handle if the CPL > 0, will trigger GP on re-entry */ if (!kvm_require_cpl(vcpu, 0)) return 1; dr = vmcs_readl(GUEST_DR7); if (dr & DR7_GD) { /* * As the vm-exit takes precedence over the debug trap, we * need to emulate the latter, either for the host or the * guest debugging itself. */ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; vcpu->run->debug.arch.dr7 = dr; vcpu->run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + vmcs_readl(GUEST_RIP); vcpu->run->debug.arch.exception = DB_VECTOR; vcpu->run->exit_reason = KVM_EXIT_DEBUG; return 0; } else { vcpu->arch.dr7 &= ~DR7_GD; vcpu->arch.dr6 |= DR6_BD | DR6_RTM; vmcs_writel(GUEST_DR7, vcpu->arch.dr7); kvm_queue_exception(vcpu, DB_VECTOR); return 1; } } if (vcpu->guest_debug == 0) { u32 cpu_based_vm_exec_control; cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control &= ~CPU_BASED_MOV_DR_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); /* * No more DR vmexits; force a reload of the debug registers * and reenter on this instruction. The next vmexit will * retrieve the full state of the debug registers. */ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; return 1; } exit_qualification = vmcs_readl(EXIT_QUALIFICATION); dr = exit_qualification & DEBUG_REG_ACCESS_NUM; reg = DEBUG_REG_ACCESS_REG(exit_qualification); if (exit_qualification & TYPE_MOV_FROM_DR) { unsigned long val; if (kvm_get_dr(vcpu, dr, &val)) return 1; kvm_register_write(vcpu, reg, val); } else if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) return 1; skip_emulated_instruction(vcpu); return 1; } static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) { return vcpu->arch.dr6; } static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) { } static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; get_debugreg(vcpu->arch.db[0], 0); get_debugreg(vcpu->arch.db[1], 1); get_debugreg(vcpu->arch.db[2], 2); get_debugreg(vcpu->arch.db[3], 3); get_debugreg(vcpu->arch.dr6, 6); vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); } static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) { vmcs_writel(GUEST_DR7, val); } static int handle_cpuid(struct kvm_vcpu *vcpu) { kvm_emulate_cpuid(vcpu); return 1; } static int handle_rdmsr(struct kvm_vcpu *vcpu) { u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u64 data; if (vmx_get_msr(vcpu, ecx, &data)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(vcpu, 0); return 1; } trace_kvm_msr_read(ecx, data); /* FIXME: handling of bits 32:63 of rax, rdx */ vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; skip_emulated_instruction(vcpu); return 1; } static int handle_wrmsr(struct kvm_vcpu *vcpu) { struct msr_data msr; u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); msr.data = data; msr.index = ecx; msr.host_initiated = false; if (kvm_set_msr(vcpu, &msr) != 0) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(vcpu, 0); return 1; } trace_kvm_msr_write(ecx, data); skip_emulated_instruction(vcpu); return 1; } static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) { kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; } static int handle_interrupt_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; /* clear pending irq */ cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); kvm_make_request(KVM_REQ_EVENT, vcpu); ++vcpu->stat.irq_window_exits; /* * If the user space waits to inject interrupts, exit as soon as * possible */ if (!irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window && !kvm_cpu_has_interrupt(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; return 0; } return 1; } static int handle_halt(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); return kvm_emulate_halt(vcpu); } static int handle_vmcall(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); kvm_emulate_hypercall(vcpu); return 1; } static int handle_invd(struct kvm_vcpu *vcpu) { return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_invlpg(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); kvm_mmu_invlpg(vcpu, exit_qualification); skip_emulated_instruction(vcpu); return 1; } static int handle_rdpmc(struct kvm_vcpu *vcpu) { int err; err = kvm_rdpmc(vcpu); kvm_complete_insn_gp(vcpu, err); return 1; } static int handle_wbinvd(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); kvm_emulate_wbinvd(vcpu); return 1; } static int handle_xsetbv(struct kvm_vcpu *vcpu) { u64 new_bv = kvm_read_edx_eax(vcpu); u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); if (kvm_set_xcr(vcpu, index, new_bv) == 0) skip_emulated_instruction(vcpu); return 1; } static int handle_apic_access(struct kvm_vcpu *vcpu) { if (likely(fasteoi)) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int access_type, offset; access_type = exit_qualification & APIC_ACCESS_TYPE; offset = exit_qualification & APIC_ACCESS_OFFSET; /* * Sane guest uses MOV to write EOI, with written value * not cared. So make a short-circuit here by avoiding * heavy instruction emulation. */ if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && (offset == APIC_EOI)) { kvm_lapic_set_eoi(vcpu); skip_emulated_instruction(vcpu); return 1; } } return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int vector = exit_qualification & 0xff; /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ kvm_apic_set_eoi_accelerated(vcpu, vector); return 1; } static int handle_apic_write(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 offset = exit_qualification & 0xfff; /* APIC-write VM exit is trap-like and thus no need to adjust IP */ kvm_apic_write_nodecode(vcpu, offset); return 1; } static int handle_task_switch(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qualification; bool has_error_code = false; u32 error_code = 0; u16 tss_selector; int reason, type, idt_v, idt_index; idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); reason = (u32)exit_qualification >> 30; if (reason == TASK_SWITCH_GATE && idt_v) { switch (type) { case INTR_TYPE_NMI_INTR: vcpu->arch.nmi_injected = false; vmx_set_nmi_mask(vcpu, true); break; case INTR_TYPE_EXT_INTR: case INTR_TYPE_SOFT_INTR: kvm_clear_interrupt_queue(vcpu); break; case INTR_TYPE_HARD_EXCEPTION: if (vmx->idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { has_error_code = true; error_code = vmcs_read32(IDT_VECTORING_ERROR_CODE); } /* fall through */ case INTR_TYPE_SOFT_EXCEPTION: kvm_clear_exception_queue(vcpu); break; default: break; } } tss_selector = exit_qualification; if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && type != INTR_TYPE_EXT_INTR && type != INTR_TYPE_NMI_INTR)) skip_emulated_instruction(vcpu); if (kvm_task_switch(vcpu, tss_selector, type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, has_error_code, error_code) == EMULATE_FAIL) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; return 0; } /* clear all local breakpoint enable flags */ vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~0x55); /* * TODO: What about debug traps on tss switch? * Are we supposed to inject them and update dr6? */ return 1; } static int handle_ept_violation(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; gpa_t gpa; u32 error_code; int gla_validity; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); gla_validity = (exit_qualification >> 7) & 0x3; if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) { printk(KERN_ERR "EPT: Handling EPT violation failed!\n"); printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), vmcs_readl(GUEST_LINEAR_ADDRESS)); printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", (long unsigned int)exit_qualification); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; return 0; } /* * EPT violation happened while executing iret from NMI, * "blocked by NMI" bit has to be set before next VM entry. * There are errata that may cause this bit to not be set: * AAK134, BY25. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); trace_kvm_page_fault(gpa, exit_qualification); /* It is a write fault? */ error_code = exit_qualification & (1U << 1); /* It is a fetch fault? */ error_code |= (exit_qualification & (1U << 2)) << 2; /* ept page table is present? */ error_code |= (exit_qualification >> 3) & 0x1; vcpu->arch.exit_qualification = exit_qualification; return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); } static u64 ept_rsvd_mask(u64 spte, int level) { int i; u64 mask = 0; for (i = 51; i > boot_cpu_data.x86_phys_bits; i--) mask |= (1ULL << i); if (level == 4) /* bits 7:3 reserved */ mask |= 0xf8; else if (spte & (1ULL << 7)) /* * 1GB/2MB page, bits 29:12 or 20:12 reserved respectively, * level == 1 if the hypervisor is using the ignored bit 7. */ mask |= (PAGE_SIZE << ((level - 1) * 9)) - PAGE_SIZE; else if (level > 1) /* bits 6:3 reserved */ mask |= 0x78; return mask; } static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, int level) { printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level); /* 010b (write-only) */ WARN_ON((spte & 0x7) == 0x2); /* 110b (write/execute) */ WARN_ON((spte & 0x7) == 0x6); /* 100b (execute-only) and value not supported by logical processor */ if (!cpu_has_vmx_ept_execute_only()) WARN_ON((spte & 0x7) == 0x4); /* not 000b */ if ((spte & 0x7)) { u64 rsvd_bits = spte & ept_rsvd_mask(spte, level); if (rsvd_bits != 0) { printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n", __func__, rsvd_bits); WARN_ON(1); } /* bits 5:3 are _not_ reserved for large page or leaf page */ if ((rsvd_bits & 0x38) == 0) { u64 ept_mem_type = (spte & 0x38) >> 3; if (ept_mem_type == 2 || ept_mem_type == 3 || ept_mem_type == 7) { printk(KERN_ERR "%s: ept_mem_type=0x%llx\n", __func__, ept_mem_type); WARN_ON(1); } } } } static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { u64 sptes[4]; int nr_sptes, i, ret; gpa_t gpa; gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); if (!kvm_io_bus_write(vcpu->kvm, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { skip_emulated_instruction(vcpu); return 1; } ret = handle_mmio_page_fault_common(vcpu, gpa, true); if (likely(ret == RET_MMIO_PF_EMULATE)) return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == EMULATE_DONE; if (unlikely(ret == RET_MMIO_PF_INVALID)) return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0); if (unlikely(ret == RET_MMIO_PF_RETRY)) return 1; /* It is the real ept misconfig */ printk(KERN_ERR "EPT: Misconfiguration.\n"); printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa); nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes); for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; return 0; } static int handle_nmi_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; /* clear pending NMI */ cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); ++vcpu->stat.nmi_window_exits; kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; } static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); enum emulation_result err = EMULATE_DONE; int ret = 1; u32 cpu_exec_ctrl; bool intr_window_requested; unsigned count = 130; cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; while (vmx->emulation_required && count-- != 0) { if (intr_window_requested && vmx_interrupt_allowed(vcpu)) return handle_interrupt_window(&vmx->vcpu); if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) return 1; err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); if (err == EMULATE_USER_EXIT) { ++vcpu->stat.mmio_exits; ret = 0; goto out; } if (err != EMULATE_DONE) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; return 0; } if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; ret = kvm_emulate_halt(vcpu); goto out; } if (signal_pending(current)) goto out; if (need_resched()) schedule(); } out: return ret; } static int __grow_ple_window(int val) { if (ple_window_grow < 1) return ple_window; val = min(val, ple_window_actual_max); if (ple_window_grow < ple_window) val *= ple_window_grow; else val += ple_window_grow; return val; } static int __shrink_ple_window(int val, int modifier, int minimum) { if (modifier < 1) return ple_window; if (modifier < ple_window) val /= modifier; else val -= modifier; return max(val, minimum); } static void grow_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int old = vmx->ple_window; vmx->ple_window = __grow_ple_window(old); if (vmx->ple_window != old) vmx->ple_window_dirty = true; trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); } static void shrink_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int old = vmx->ple_window; vmx->ple_window = __shrink_ple_window(old, ple_window_shrink, ple_window); if (vmx->ple_window != old) vmx->ple_window_dirty = true; trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); } /* * ple_window_actual_max is computed to be one grow_ple_window() below * ple_window_max. (See __grow_ple_window for the reason.) * This prevents overflows, because ple_window_max is int. * ple_window_max effectively rounded down to a multiple of ple_window_grow in * this process. * ple_window_max is also prevented from setting vmx->ple_window < ple_window. */ static void update_ple_window_actual_max(void) { ple_window_actual_max = __shrink_ple_window(max(ple_window_max, ple_window), ple_window_grow, INT_MIN); } /* * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE * exiting, so only get here on cpu with PAUSE-Loop-Exiting. */ static int handle_pause(struct kvm_vcpu *vcpu) { if (ple_gap) grow_ple_window(vcpu); skip_emulated_instruction(vcpu); kvm_vcpu_on_spin(vcpu); return 1; } static int handle_nop(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); return 1; } static int handle_mwait(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); return handle_nop(vcpu); } static int handle_monitor(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); return handle_nop(vcpu); } /* * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12. * We could reuse a single VMCS for all the L2 guests, but we also want the * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this * allows keeping them loaded on the processor, and in the future will allow * optimizations where prepare_vmcs02 doesn't need to set all the fields on * every entry if they never change. * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first. * * The following functions allocate and free a vmcs02 in this pool. */ /* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) { struct vmcs02_list *item; list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) if (item->vmptr == vmx->nested.current_vmptr) { list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; } if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { /* Recycle the least recently used VMCS. */ item = list_entry(vmx->nested.vmcs02_pool.prev, struct vmcs02_list, list); item->vmptr = vmx->nested.current_vmptr; list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; } /* Create a new VMCS */ item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); if (!item) return NULL; item->vmcs02.vmcs = alloc_vmcs(); if (!item->vmcs02.vmcs) { kfree(item); return NULL; } loaded_vmcs_init(&item->vmcs02); item->vmptr = vmx->nested.current_vmptr; list_add(&(item->list), &(vmx->nested.vmcs02_pool)); vmx->nested.vmcs02_num++; return &item->vmcs02; } /* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) { struct vmcs02_list *item; list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) if (item->vmptr == vmptr) { free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); vmx->nested.vmcs02_num--; return; } } /* * Free all VMCSs saved for this vcpu, except the one pointed by * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs * must be &vmx->vmcs01. */ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) { struct vmcs02_list *item, *n; WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { /* * Something will leak if the above WARN triggers. Better than * a use-after-free. */ if (vmx->loaded_vmcs == &item->vmcs02) continue; free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); vmx->nested.vmcs02_num--; } } /* * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), * set the success or error code of an emulated VMX instruction, as specified * by Vol 2B, VMX Instruction Reference, "Conventions". */ static void nested_vmx_succeed(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); } static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_CF); } static void nested_vmx_failValid(struct kvm_vcpu *vcpu, u32 vm_instruction_error) { if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { /* * failValid writes the error number to the current VMCS, which * can't be done there isn't a current VMCS. */ nested_vmx_failInvalid(vcpu); return; } vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_ZF); get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; /* * We don't need to force a shadow sync because * VM_INSTRUCTION_ERROR is not shadowed */ } static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) { struct vcpu_vmx *vmx = container_of(timer, struct vcpu_vmx, nested.preemption_timer); vmx->nested.preemption_timer_expired = true; kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); kvm_vcpu_kick(&vmx->vcpu); return HRTIMER_NORESTART; } /* * Decode the memory-address operand of a vmx instruction, as recorded on an * exit caused by such an instruction (run by a guest hypervisor). * On success, returns 0. When the operand is invalid, returns 1 and throws * #UD or #GP. */ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, gva_t *ret) { /* * According to Vol. 3B, "Information for VM Exits Due to Instruction * Execution", on an exit, vmx_instruction_info holds most of the * addressing components of the operand. Only the displacement part * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). * For how an actual address is calculated from all these components, * refer to Vol. 1, "Operand Addressing". */ int scaling = vmx_instruction_info & 3; int addr_size = (vmx_instruction_info >> 7) & 7; bool is_reg = vmx_instruction_info & (1u << 10); int seg_reg = (vmx_instruction_info >> 15) & 7; int index_reg = (vmx_instruction_info >> 18) & 0xf; bool index_is_valid = !(vmx_instruction_info & (1u << 22)); int base_reg = (vmx_instruction_info >> 23) & 0xf; bool base_is_valid = !(vmx_instruction_info & (1u << 27)); if (is_reg) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } /* Addr = segment_base + offset */ /* offset = base + [index * scale] + displacement */ *ret = vmx_get_segment_base(vcpu, seg_reg); if (base_is_valid) *ret += kvm_register_read(vcpu, base_reg); if (index_is_valid) *ret += kvm_register_read(vcpu, index_reg)<<scaling; *ret += exit_qualification; /* holds the displacement */ if (addr_size == 1) /* 32 bit */ *ret &= 0xffffffff; /* * TODO: throw #GP (and return 1) in various cases that the VM* * instructions require it - e.g., offset beyond segment limit, * unusable or unreadable/unwritable segment, non-canonical 64-bit * address, and so on. Currently these are not checked. */ return 0; } /* * This function performs the various checks including * - if it's 4KB aligned * - No bits beyond the physical address width are set * - Returns 0 on success or else 1 * (Intel SDM Section 30.3) */ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, gpa_t *vmpointer) { gva_t gva; gpa_t vmptr; struct x86_exception e; struct page *page; struct vcpu_vmx *vmx = to_vmx(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu); if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmcs_read32(VMX_INSTRUCTION_INFO), &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, sizeof(vmptr), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (exit_reason) { case EXIT_REASON_VMON: /* * SDM 3: 24.11.5 * The first 4 bytes of VMXON region contain the supported * VMCS revision identifier * * Note - IA32_VMX_BASIC[48] will never be 1 * for the nested case; * which replaces physical address width with 32 * */ if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failInvalid(vcpu); skip_emulated_instruction(vcpu); return 1; } page = nested_get_page(vcpu, vmptr); if (page == NULL || *(u32 *)kmap(page) != VMCS12_REVISION) { nested_vmx_failInvalid(vcpu); kunmap(page); skip_emulated_instruction(vcpu); return 1; } kunmap(page); vmx->nested.vmxon_ptr = vmptr; break; case EXIT_REASON_VMCLEAR: if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); skip_emulated_instruction(vcpu); return 1; } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); skip_emulated_instruction(vcpu); return 1; } break; case EXIT_REASON_VMPTRLD: if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); skip_emulated_instruction(vcpu); return 1; } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); skip_emulated_instruction(vcpu); return 1; } break; default: return 1; /* shouldn't happen */ } if (vmpointer) *vmpointer = vmptr; return 0; } /* * Emulate the VMXON instruction. * Currently, we just remember that VMX is active, and do not save or even * inspect the argument to VMXON (the so-called "VMXON pointer") because we * do not currently need to store anything in that guest-allocated memory * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their * argument is different from the VMXON pointer (which the spec says they do). */ static int handle_vmon(struct kvm_vcpu *vcpu) { struct kvm_segment cs; struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs *shadow_vmcs; const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; /* The Intel VMX Instruction Reference lists a bunch of bits that * are prerequisite to running VMXON, most notably cr4.VMXE must be * set to 1 (see vmx_set_cr4() for when we allow the guest to set this). * Otherwise, we should fail with #UD. We test these now: */ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) || !kvm_read_cr0_bits(vcpu, X86_CR0_PE) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); if (is_long_mode(vcpu) && !cs.l) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); return 1; } if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) return 1; if (vmx->nested.vmxon) { nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); skip_emulated_instruction(vcpu); return 1; } if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); return 1; } if (enable_shadow_vmcs) { shadow_vmcs = alloc_vmcs(); if (!shadow_vmcs) return -ENOMEM; /* mark vmcs as shadow */ shadow_vmcs->revision_id |= (1u << 31); /* init shadow vmcs */ vmcs_clear(shadow_vmcs); vmx->nested.current_shadow_vmcs = shadow_vmcs; } INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); vmx->nested.vmcs02_num = 0; hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; vmx->nested.vmxon = true; skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); return 1; } /* * Intel's VMX Instruction Reference specifies a common set of prerequisites * for running VMX instructions (except VMXON, whose prerequisites are * slightly different). It also specifies what exception to inject otherwise. */ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) { struct kvm_segment cs; struct vcpu_vmx *vmx = to_vmx(vcpu); if (!vmx->nested.vmxon) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) || (is_long_mode(vcpu) && !cs.l)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); return 0; } return 1; } static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) { u32 exec_control; if (vmx->nested.current_vmptr == -1ull) return; /* current_vmptr and current_vmcs12 are always set/reset together */ if (WARN_ON(vmx->nested.current_vmcs12 == NULL)) return; if (enable_shadow_vmcs) { /* copy to memory all shadowed fields in case they were modified */ copy_shadow_to_vmcs12(vmx); vmx->nested.sync_shadow_vmcs = false; exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); vmcs_write64(VMCS_LINK_POINTER, -1ull); } kunmap(vmx->nested.current_vmcs12_page); nested_release_page(vmx->nested.current_vmcs12_page); vmx->nested.current_vmptr = -1ull; vmx->nested.current_vmcs12 = NULL; } /* * Free whatever needs to be freed from vmx->nested when L1 goes down, or * just stops using VMX. */ static void free_nested(struct vcpu_vmx *vmx) { if (!vmx->nested.vmxon) return; vmx->nested.vmxon = false; nested_release_vmcs12(vmx); if (enable_shadow_vmcs) free_vmcs(vmx->nested.current_shadow_vmcs); /* Unpin physical memory we referred to in current vmcs02 */ if (vmx->nested.apic_access_page) { nested_release_page(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { nested_release_page(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } nested_free_all_saved_vmcss(vmx); } /* Emulate the VMXOFF instruction */ static int handle_vmoff(struct kvm_vcpu *vcpu) { if (!nested_vmx_check_permission(vcpu)) return 1; free_nested(to_vmx(vcpu)); skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); return 1; } /* Emulate the VMCLEAR instruction */ static int handle_vmclear(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; struct vmcs12 *vmcs12; struct page *page; if (!nested_vmx_check_permission(vcpu)) return 1; if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) return 1; if (vmptr == vmx->nested.current_vmptr) nested_release_vmcs12(vmx); page = nested_get_page(vcpu, vmptr); if (page == NULL) { /* * For accurate processor emulation, VMCLEAR beyond available * physical memory should do nothing at all. However, it is * possible that a nested vmx bug, not a guest hypervisor bug, * resulted in this case, so let's shut down before doing any * more damage: */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 1; } vmcs12 = kmap(page); vmcs12->launch_state = 0; kunmap(page); nested_release_page(page); nested_free_vmcs02(vmx, vmptr); skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); return 1; } static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); /* Emulate the VMLAUNCH instruction */ static int handle_vmlaunch(struct kvm_vcpu *vcpu) { return nested_vmx_run(vcpu, true); } /* Emulate the VMRESUME instruction */ static int handle_vmresume(struct kvm_vcpu *vcpu) { return nested_vmx_run(vcpu, false); } enum vmcs_field_type { VMCS_FIELD_TYPE_U16 = 0, VMCS_FIELD_TYPE_U64 = 1, VMCS_FIELD_TYPE_U32 = 2, VMCS_FIELD_TYPE_NATURAL_WIDTH = 3 }; static inline int vmcs_field_type(unsigned long field) { if (0x1 & field) /* the *_HIGH fields are all 32 bit */ return VMCS_FIELD_TYPE_U32; return (field >> 13) & 0x3 ; } static inline int vmcs_field_readonly(unsigned long field) { return (((field >> 10) & 0x3) == 1); } /* * Read a vmcs12 field. Since these can have varying lengths and we return * one type, we chose the biggest type (u64) and zero-extend the return value * to that size. Note that the caller, handle_vmread, might need to use only * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of * 64-bit fields are to be returned). */ static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu, unsigned long field, u64 *ret) { short offset = vmcs_field_to_offset(field); char *p; if (offset < 0) return 0; p = ((char *)(get_vmcs12(vcpu))) + offset; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_NATURAL_WIDTH: *ret = *((natural_width *)p); return 1; case VMCS_FIELD_TYPE_U16: *ret = *((u16 *)p); return 1; case VMCS_FIELD_TYPE_U32: *ret = *((u32 *)p); return 1; case VMCS_FIELD_TYPE_U64: *ret = *((u64 *)p); return 1; default: return 0; /* can never happen. */ } } static inline bool vmcs12_write_any(struct kvm_vcpu *vcpu, unsigned long field, u64 field_value){ short offset = vmcs_field_to_offset(field); char *p = ((char *) get_vmcs12(vcpu)) + offset; if (offset < 0) return false; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: *(u16 *)p = field_value; return true; case VMCS_FIELD_TYPE_U32: *(u32 *)p = field_value; return true; case VMCS_FIELD_TYPE_U64: *(u64 *)p = field_value; return true; case VMCS_FIELD_TYPE_NATURAL_WIDTH: *(natural_width *)p = field_value; return true; default: return false; /* can never happen. */ } } static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) { int i; unsigned long field; u64 field_value; struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; const unsigned long *fields = shadow_read_write_fields; const int num_fields = max_shadow_read_write_fields; vmcs_load(shadow_vmcs); for (i = 0; i < num_fields; i++) { field = fields[i]; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: field_value = vmcs_read16(field); break; case VMCS_FIELD_TYPE_U32: field_value = vmcs_read32(field); break; case VMCS_FIELD_TYPE_U64: field_value = vmcs_read64(field); break; case VMCS_FIELD_TYPE_NATURAL_WIDTH: field_value = vmcs_readl(field); break; } vmcs12_write_any(&vmx->vcpu, field, field_value); } vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); } static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) { const unsigned long *fields[] = { shadow_read_write_fields, shadow_read_only_fields }; const int max_fields[] = { max_shadow_read_write_fields, max_shadow_read_only_fields }; int i, q; unsigned long field; u64 field_value = 0; struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; vmcs_load(shadow_vmcs); for (q = 0; q < ARRAY_SIZE(fields); q++) { for (i = 0; i < max_fields[q]; i++) { field = fields[q][i]; vmcs12_read_any(&vmx->vcpu, field, &field_value); switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: vmcs_write16(field, (u16)field_value); break; case VMCS_FIELD_TYPE_U32: vmcs_write32(field, (u32)field_value); break; case VMCS_FIELD_TYPE_U64: vmcs_write64(field, (u64)field_value); break; case VMCS_FIELD_TYPE_NATURAL_WIDTH: vmcs_writel(field, (long)field_value); break; } } } vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); } /* * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was * used before) all generate the same failure when it is missing. */ static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmx->nested.current_vmptr == -1ull) { nested_vmx_failInvalid(vcpu); skip_emulated_instruction(vcpu); return 0; } return 1; } static int handle_vmread(struct kvm_vcpu *vcpu) { unsigned long field; u64 field_value; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t gva = 0; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) return 1; /* Decode instruction info and find the field to read */ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); /* Read the field, zero-extended to a u64 field_value */ if (!vmcs12_read_any(vcpu, field, &field_value)) { nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); skip_emulated_instruction(vcpu); return 1; } /* * Now copy part of this value to register or memory, as requested. * Note that the number of bits actually copied is 32 or 64 depending * on the guest's mode (32 or 64 bit), not on the given field's length. */ if (vmx_instruction_info & (1u << 10)) { kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), field_value); } else { if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, &gva)) return 1; /* _system ok, as nested_vmx_check_permission verified cpl=0 */ kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } static int handle_vmwrite(struct kvm_vcpu *vcpu) { unsigned long field; gva_t gva; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); /* The value to write might be 32 or 64 bits, depending on L1's long * mode, and eventually we need to write that into a field of several * possible lengths. The code below first zero-extends the value to 64 * bit (field_value), and then copies only the approriate number of * bits into the vmcs12 field. */ u64 field_value = 0; struct x86_exception e; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) return 1; if (vmx_instruction_info & (1u << 10)) field_value = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 3) & 0xf)); else { if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } } field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); if (vmcs_field_readonly(field)) { nested_vmx_failValid(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); skip_emulated_instruction(vcpu); return 1; } if (!vmcs12_write_any(vcpu, field, field_value)) { nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); skip_emulated_instruction(vcpu); return 1; } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } /* Emulate the VMPTRLD instruction */ static int handle_vmptrld(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; u32 exec_control; if (!nested_vmx_check_permission(vcpu)) return 1; if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) return 1; if (vmx->nested.current_vmptr != vmptr) { struct vmcs12 *new_vmcs12; struct page *page; page = nested_get_page(vcpu, vmptr); if (page == NULL) { nested_vmx_failInvalid(vcpu); skip_emulated_instruction(vcpu); return 1; } new_vmcs12 = kmap(page); if (new_vmcs12->revision_id != VMCS12_REVISION) { kunmap(page); nested_release_page_clean(page); nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); skip_emulated_instruction(vcpu); return 1; } nested_release_vmcs12(vmx); vmx->nested.current_vmptr = vmptr; vmx->nested.current_vmcs12 = new_vmcs12; vmx->nested.current_vmcs12_page = page; if (enable_shadow_vmcs) { exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); exec_control |= SECONDARY_EXEC_SHADOW_VMCS; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); vmcs_write64(VMCS_LINK_POINTER, __pa(vmx->nested.current_shadow_vmcs)); vmx->nested.sync_shadow_vmcs = true; } } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } /* Emulate the VMPTRST instruction */ static int handle_vmptrst(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t vmcs_gva; struct x86_exception e; if (!nested_vmx_check_permission(vcpu)) return 1; if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, &vmcs_gva)) return 1; /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, (void *)&to_vmx(vcpu)->nested.current_vmptr, sizeof(u64), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } /* Emulate the INVEPT instruction */ static int handle_invept(struct kvm_vcpu *vcpu) { u32 vmx_instruction_info, types; unsigned long type; gva_t gva; struct x86_exception e; struct { u64 eptp, gpa; } operand; if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) || !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (!nested_vmx_check_permission(vcpu)) return 1; if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; if (!(types & (1UL << type))) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return 1; } /* According to the Intel VMX instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (type) { case VMX_EPT_EXTENT_GLOBAL: kvm_mmu_sync_roots(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); nested_vmx_succeed(vcpu); break; default: /* Trap single context invalidation invept calls */ BUG_ON(1); break; } skip_emulated_instruction(vcpu); return 1; } /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs * to be done to userspace and return 0. */ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_EXCEPTION_NMI] = handle_exception, [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, [EXIT_REASON_IO_INSTRUCTION] = handle_io, [EXIT_REASON_CR_ACCESS] = handle_cr, [EXIT_REASON_DR_ACCESS] = handle_dr, [EXIT_REASON_CPUID] = handle_cpuid, [EXIT_REASON_MSR_READ] = handle_rdmsr, [EXIT_REASON_MSR_WRITE] = handle_wrmsr, [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, [EXIT_REASON_HLT] = handle_halt, [EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_RDPMC] = handle_rdpmc, [EXIT_REASON_VMCALL] = handle_vmcall, [EXIT_REASON_VMCLEAR] = handle_vmclear, [EXIT_REASON_VMLAUNCH] = handle_vmlaunch, [EXIT_REASON_VMPTRLD] = handle_vmptrld, [EXIT_REASON_VMPTRST] = handle_vmptrst, [EXIT_REASON_VMREAD] = handle_vmread, [EXIT_REASON_VMRESUME] = handle_vmresume, [EXIT_REASON_VMWRITE] = handle_vmwrite, [EXIT_REASON_VMOFF] = handle_vmoff, [EXIT_REASON_VMON] = handle_vmon, [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_APIC_WRITE] = handle_apic_write, [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, [EXIT_REASON_WBINVD] = handle_wbinvd, [EXIT_REASON_XSETBV] = handle_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, [EXIT_REASON_INVEPT] = handle_invept, }; static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { unsigned long exit_qualification; gpa_t bitmap, last_bitmap; unsigned int port; int size; u8 b; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; last_bitmap = (gpa_t)-1; b = -1; while (size > 0) { if (port < 0x8000) bitmap = vmcs12->io_bitmap_a; else if (port < 0x10000) bitmap = vmcs12->io_bitmap_b; else return 1; bitmap += (port & 0x7fff) / 8; if (last_bitmap != bitmap) if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1)) return 1; if (b & (1 << (port & 7))) return 1; port++; size--; last_bitmap = bitmap; } return 0; } /* * Return 1 if we should exit from L2 to L1 to handle an MSR access access, * rather than handle it ourselves in L0. I.e., check whether L1 expressed * disinterest in the current event (read or write a specific MSR) by using an * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. */ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason) { u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; gpa_t bitmap; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) return 1; /* * The MSR_BITMAP page is divided into four 1024-byte bitmaps, * for the four combinations of read/write and low/high MSR numbers. * First we need to figure out which of the four to use: */ bitmap = vmcs12->msr_bitmap; if (exit_reason == EXIT_REASON_MSR_WRITE) bitmap += 2048; if (msr_index >= 0xc0000000) { msr_index -= 0xc0000000; bitmap += 1024; } /* Then read the msr_index'th bit from this bitmap: */ if (msr_index < 1024*8) { unsigned char b; if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1)) return 1; return 1 & (b >> (msr_index & 7)); } else return 1; /* let L1 handle the wrong parameter */ } /* * Return 1 if we should exit from L2 to L1 to handle a CR access exit, * rather than handle it ourselves in L0. I.e., check if L1 wanted to * intercept (via guest_host_mask etc.) the current event. */ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int cr = exit_qualification & 15; int reg = (exit_qualification >> 8) & 15; unsigned long val = kvm_register_readl(vcpu, reg); switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ switch (cr) { case 0: if (vmcs12->cr0_guest_host_mask & (val ^ vmcs12->cr0_read_shadow)) return 1; break; case 3: if ((vmcs12->cr3_target_count >= 1 && vmcs12->cr3_target_value0 == val) || (vmcs12->cr3_target_count >= 2 && vmcs12->cr3_target_value1 == val) || (vmcs12->cr3_target_count >= 3 && vmcs12->cr3_target_value2 == val) || (vmcs12->cr3_target_count >= 4 && vmcs12->cr3_target_value3 == val)) return 0; if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) return 1; break; case 4: if (vmcs12->cr4_guest_host_mask & (vmcs12->cr4_read_shadow ^ val)) return 1; break; case 8: if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) return 1; break; } break; case 2: /* clts */ if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && (vmcs12->cr0_read_shadow & X86_CR0_TS)) return 1; break; case 1: /* mov from cr */ switch (cr) { case 3: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR3_STORE_EXITING) return 1; break; case 8: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR8_STORE_EXITING) return 1; break; } break; case 3: /* lmsw */ /* * lmsw can change bits 1..3 of cr0, and only set bit 0 of * cr0. Other attempted changes are ignored, with no exit. */ if (vmcs12->cr0_guest_host_mask & 0xe & (val ^ vmcs12->cr0_read_shadow)) return 1; if ((vmcs12->cr0_guest_host_mask & 0x1) && !(vmcs12->cr0_read_shadow & 0x1) && (val & 0x1)) return 1; break; } return 0; } /* * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we * should handle it ourselves in L0 (and then continue L2). Only call this * when in is_guest_mode (L2). */ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) { u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 exit_reason = vmx->exit_reason; trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, vmcs_readl(EXIT_QUALIFICATION), vmx->idt_vectoring_info, intr_info, vmcs_read32(VM_EXIT_INTR_ERROR_CODE), KVM_ISA_VMX); if (vmx->nested.nested_run_pending) return 0; if (unlikely(vmx->fail)) { pr_info_ratelimited("%s failed vm entry %x\n", __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); return 1; } switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: if (!is_exception(intr_info)) return 0; else if (is_page_fault(intr_info)) return enable_ept; else if (is_no_device(intr_info) && !(vmcs12->guest_cr0 & X86_CR0_TS)) return 0; return vmcs12->exception_bitmap & (1u << (intr_info & INTR_INFO_VECTOR_MASK)); case EXIT_REASON_EXTERNAL_INTERRUPT: return 0; case EXIT_REASON_TRIPLE_FAULT: return 1; case EXIT_REASON_PENDING_INTERRUPT: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); case EXIT_REASON_NMI_WINDOW: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); case EXIT_REASON_TASK_SWITCH: return 1; case EXIT_REASON_CPUID: if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) return 0; return 1; case EXIT_REASON_HLT: return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); case EXIT_REASON_INVD: return 1; case EXIT_REASON_INVLPG: return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); case EXIT_REASON_RDPMC: return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); case EXIT_REASON_RDTSC: return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: case EXIT_REASON_INVEPT: /* * VMX instructions trap unconditionally. This allows L1 to * emulate them for its L2 guest, i.e., allows 3-level nesting! */ return 1; case EXIT_REASON_CR_ACCESS: return nested_vmx_exit_handled_cr(vcpu, vmcs12); case EXIT_REASON_DR_ACCESS: return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); case EXIT_REASON_IO_INSTRUCTION: return nested_vmx_exit_handled_io(vcpu, vmcs12); case EXIT_REASON_MSR_READ: case EXIT_REASON_MSR_WRITE: return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); case EXIT_REASON_INVALID_STATE: return 1; case EXIT_REASON_MWAIT_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); case EXIT_REASON_MONITOR_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); case EXIT_REASON_PAUSE_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || nested_cpu_has2(vmcs12, SECONDARY_EXEC_PAUSE_LOOP_EXITING); case EXIT_REASON_MCE_DURING_VMENTRY: return 0; case EXIT_REASON_TPR_BELOW_THRESHOLD: return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); case EXIT_REASON_APIC_ACCESS: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); case EXIT_REASON_EPT_VIOLATION: /* * L0 always deals with the EPT violation. If nested EPT is * used, and the nested mmu code discovers that the address is * missing in the guest EPT table (EPT12), the EPT violation * will be injected with nested_ept_inject_page_fault() */ return 0; case EXIT_REASON_EPT_MISCONFIG: /* * L2 never uses directly L1's EPT, but rather L0's own EPT * table (shadow on EPT) or a merged EPT table that L0 built * (EPT on EPT). So any problems with the structure of the * table is L0's fault. */ return 0; case EXIT_REASON_WBINVD: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); case EXIT_REASON_XSETBV: return 1; default: return 1; } } static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { *info1 = vmcs_readl(EXIT_QUALIFICATION); *info2 = vmcs_read32(VM_EXIT_INTR_INFO); } /* * The guest has exited. See if we can fix it or if we need userspace * assistance. */ static int vmx_handle_exit(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; /* If guest state is invalid, start emulating */ if (vmx->emulation_required) return handle_invalid_guest_state(vcpu); if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { nested_vmx_vmexit(vcpu, exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; } if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason; return 0; } if (unlikely(vmx->fail)) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = vmcs_read32(VM_INSTRUCTION_ERROR); return 0; } /* * Note: * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by * delivery event since it indicates guest is accessing MMIO. * The vm-exit can be triggered again after return to guest that * will cause infinite loop. */ if ((vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = vectoring_info; vcpu->run->internal.data[1] = exit_reason; return 0; } if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( get_vmcs12(vcpu))))) { if (vmx_interrupt_allowed(vcpu)) { vmx->soft_vnmi_blocked = 0; } else if (vmx->vnmi_blocked_time > 1000000000LL && vcpu->arch.nmi_pending) { /* * This CPU don't support us in finding the end of an * NMI-blocked window if the guest runs with IRQs * disabled. So we pull the trigger after 1 s of * futile waiting, but inform the user about this. */ printk(KERN_WARNING "%s: Breaking out of NMI-blocked " "state on VCPU %d after 1 s timeout\n", __func__, vcpu->vcpu_id); vmx->soft_vnmi_blocked = 0; } } if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); else { vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = exit_reason; } return 0; } static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (is_guest_mode(vcpu) && nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return; if (irr == -1 || tpr < irr) { vmcs_write32(TPR_THRESHOLD, 0); return; } vmcs_write32(TPR_THRESHOLD, irr); } static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) { u32 sec_exec_control; /* * There is not point to enable virtualize x2apic without enable * apicv */ if (!cpu_has_vmx_virtualize_x2apic_mode() || !vmx_vm_has_apicv(vcpu->kvm)) return; if (!vm_need_tpr_shadow(vcpu->kvm)) return; sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); if (set) { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; } else { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); vmx_set_msr_bitmap(vcpu); } static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) { struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Currently we do not handle the nested case where L2 has an * APIC access page of its own; that page is still pinned. * Hence, we skip the case where the VCPU is in guest mode _and_ * L1 prepared an APIC access page for L2. * * For the case where L1 and L2 share the same APIC access page * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear * in the vmcs12), this function will only update either the vmcs01 * or the vmcs02. If the former, the vmcs02 will be updated by * prepare_vmcs02. If the latter, the vmcs01 will be updated in * the next L2->L1 exit. */ if (!is_guest_mode(vcpu) || !nested_cpu_has2(vmx->nested.current_vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) vmcs_write64(APIC_ACCESS_ADDR, hpa); } static void vmx_hwapic_isr_update(struct kvm *kvm, int isr) { u16 status; u8 old; if (!vmx_vm_has_apicv(kvm)) return; if (isr == -1) isr = 0; status = vmcs_read16(GUEST_INTR_STATUS); old = status >> 8; if (isr != old) { status &= 0xff; status |= isr << 8; vmcs_write16(GUEST_INTR_STATUS, status); } } static void vmx_set_rvi(int vector) { u16 status; u8 old; status = vmcs_read16(GUEST_INTR_STATUS); old = (u8)status & 0xff; if ((u8)vector != old) { status &= ~0xff; status |= (u8)vector; vmcs_write16(GUEST_INTR_STATUS, status); } } static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) { if (max_irr == -1) return; /* * If a vmexit is needed, vmx_check_nested_events handles it. */ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) return; if (!is_guest_mode(vcpu)) { vmx_set_rvi(max_irr); return; } /* * Fall back to pre-APICv interrupt injection since L2 * is run without virtual interrupt delivery. */ if (!kvm_event_needs_reinjection(vcpu) && vmx_interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, max_irr, false); vmx_inject_irq(vcpu); } } static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) { if (!vmx_vm_has_apicv(vcpu->kvm)) return; vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); } static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) { u32 exit_intr_info; if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)) return; vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); exit_intr_info = vmx->exit_intr_info; /* Handle machine checks before interrupts are enabled */ if (is_machine_check(exit_intr_info)) kvm_machine_check(); /* We need to handle NMIs before interrupts are enabled */ if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && (exit_intr_info & INTR_INFO_VALID_MASK)) { kvm_before_handle_nmi(&vmx->vcpu); asm("int $2"); kvm_after_handle_nmi(&vmx->vcpu); } } static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) { u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); /* * If external interrupt exists, IF bit is set in rflags/eflags on the * interrupt stack frame, and interrupt will be enabled on a return * from interrupt handler. */ if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { unsigned int vector; unsigned long entry; gate_desc *desc; struct vcpu_vmx *vmx = to_vmx(vcpu); #ifdef CONFIG_X86_64 unsigned long tmp; #endif vector = exit_intr_info & INTR_INFO_VECTOR_MASK; desc = (gate_desc *)vmx->host_idt_base + vector; entry = gate_offset(*desc); asm volatile( #ifdef CONFIG_X86_64 "mov %%" _ASM_SP ", %[sp]\n\t" "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" "push $%c[ss]\n\t" "push %[sp]\n\t" #endif "pushf\n\t" "orl $0x200, (%%" _ASM_SP ")\n\t" __ASM_SIZE(push) " $%c[cs]\n\t" "call *%[entry]\n\t" : #ifdef CONFIG_X86_64 [sp]"=&r"(tmp) #endif : [entry]"r"(entry), [ss]"i"(__KERNEL_DS), [cs]"i"(__KERNEL_CS) ); } else local_irq_enable(); } static bool vmx_mpx_supported(void) { return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) && (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS); } static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) { u32 exit_intr_info; bool unblock_nmi; u8 vector; bool idtv_info_valid; idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; if (cpu_has_virtual_nmis()) { if (vmx->nmi_known_unmasked) return; /* * Can't use vmx->exit_intr_info since we're not sure what * the exit reason is. */ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; vector = exit_intr_info & INTR_INFO_VECTOR_MASK; /* * SDM 3: 27.7.1.2 (September 2008) * Re-set bit "block by NMI" before VM entry if vmexit caused by * a guest IRET fault. * SDM 3: 23.2.2 (September 2008) * Bit 12 is undefined in any of the following cases: * If the VM exit sets the valid bit in the IDT-vectoring * information field. * If the VM exit is due to a double fault. */ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && vector != DF_VECTOR && !idtv_info_valid) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); else vmx->nmi_known_unmasked = !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI); } else if (unlikely(vmx->soft_vnmi_blocked)) vmx->vnmi_blocked_time += ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); } static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, u32 idt_vectoring_info, int instr_len_field, int error_code_field) { u8 vector; int type; bool idtv_info_valid; idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); if (!idtv_info_valid) return; kvm_make_request(KVM_REQ_EVENT, vcpu); vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; switch (type) { case INTR_TYPE_NMI_INTR: vcpu->arch.nmi_injected = true; /* * SDM 3: 27.7.1.2 (September 2008) * Clear bit "block by NMI" before VM entry if a NMI * delivery faulted. */ vmx_set_nmi_mask(vcpu, false); break; case INTR_TYPE_SOFT_EXCEPTION: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(error_code_field); kvm_requeue_exception_e(vcpu, vector, err); } else kvm_requeue_exception(vcpu, vector); break; case INTR_TYPE_SOFT_INTR: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_EXT_INTR: kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); break; default: break; } } static void vmx_complete_interrupts(struct vcpu_vmx *vmx) { __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_ERROR_CODE); } static void vmx_cancel_injection(struct kvm_vcpu *vcpu) { __vmx_complete_interrupts(vcpu, vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) { int i, nr_msrs; struct perf_guest_switch_msr *msrs; msrs = perf_guest_get_msrs(&nr_msrs); if (!msrs) return; for (i = 0; i < nr_msrs; i++) if (msrs[i].host == msrs[i].guest) clear_atomic_switch_msr(vmx, msrs[i].msr); else add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, msrs[i].host); } static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long debugctlmsr, cr4; /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) vmx->entry_time = ktime_get(); /* Don't enter VMX if guest state is invalid, let the exit handler start emulation until we arrive back to a valid state */ if (vmx->emulation_required) return; if (vmx->ple_window_dirty) { vmx->ple_window_dirty = false; vmcs_write32(PLE_WINDOW, vmx->ple_window); } if (vmx->nested.sync_shadow_vmcs) { copy_vmcs12_to_shadow(vmx); vmx->nested.sync_shadow_vmcs = false; } if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); cr4 = read_cr4(); if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { vmcs_writel(HOST_CR4, cr4); vmx->host_state.vmcs_host_cr4 = cr4; } /* When single-stepping over STI and MOV SS, we must clear the * corresponding interruptibility bits in the guest state. Otherwise * vmentry fails as it then expects bit 14 (BS) in pending debug * exceptions being set, but that's not correct for the guest debugging * case. */ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmx_set_interrupt_shadow(vcpu, 0); atomic_switch_perf_msrs(vmx); debugctlmsr = get_debugctlmsr(); vmx->__launched = vmx->loaded_vmcs->launched; asm( /* Store host registers */ "push %%" _ASM_DX "; push %%" _ASM_BP ";" "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */ "push %%" _ASM_CX " \n\t" "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t" "je 1f \n\t" "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t" __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" "1: \n\t" /* Reload cr2 if changed */ "mov %c[cr2](%0), %%" _ASM_AX " \n\t" "mov %%cr2, %%" _ASM_DX " \n\t" "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t" "je 2f \n\t" "mov %%" _ASM_AX", %%cr2 \n\t" "2: \n\t" /* Check if vmlaunch of vmresume is needed */ "cmpl $0, %c[launched](%0) \n\t" /* Load guest registers. Don't clobber flags. */ "mov %c[rax](%0), %%" _ASM_AX " \n\t" "mov %c[rbx](%0), %%" _ASM_BX " \n\t" "mov %c[rdx](%0), %%" _ASM_DX " \n\t" "mov %c[rsi](%0), %%" _ASM_SI " \n\t" "mov %c[rdi](%0), %%" _ASM_DI " \n\t" "mov %c[rbp](%0), %%" _ASM_BP " \n\t" #ifdef CONFIG_X86_64 "mov %c[r8](%0), %%r8 \n\t" "mov %c[r9](%0), %%r9 \n\t" "mov %c[r10](%0), %%r10 \n\t" "mov %c[r11](%0), %%r11 \n\t" "mov %c[r12](%0), %%r12 \n\t" "mov %c[r13](%0), %%r13 \n\t" "mov %c[r14](%0), %%r14 \n\t" "mov %c[r15](%0), %%r15 \n\t" #endif "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */ /* Enter guest mode */ "jne 1f \n\t" __ex(ASM_VMX_VMLAUNCH) "\n\t" "jmp 2f \n\t" "1: " __ex(ASM_VMX_VMRESUME) "\n\t" "2: " /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" "mov %%" _ASM_AX ", %c[rax](%0) \n\t" "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" __ASM_SIZE(pop) " %c[rcx](%0) \n\t" "mov %%" _ASM_DX ", %c[rdx](%0) \n\t" "mov %%" _ASM_SI ", %c[rsi](%0) \n\t" "mov %%" _ASM_DI ", %c[rdi](%0) \n\t" "mov %%" _ASM_BP ", %c[rbp](%0) \n\t" #ifdef CONFIG_X86_64 "mov %%r8, %c[r8](%0) \n\t" "mov %%r9, %c[r9](%0) \n\t" "mov %%r10, %c[r10](%0) \n\t" "mov %%r11, %c[r11](%0) \n\t" "mov %%r12, %c[r12](%0) \n\t" "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" #endif "mov %%cr2, %%" _ASM_AX " \n\t" "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" "setbe %c[fail](%0) \n\t" ".pushsection .rodata \n\t" ".global vmx_return \n\t" "vmx_return: " _ASM_PTR " 2b \n\t" ".popsection" : : "c"(vmx), "d"((unsigned long)HOST_RSP), [launched]"i"(offsetof(struct vcpu_vmx, __launched)), [fail]"i"(offsetof(struct vcpu_vmx, fail)), [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), #ifdef CONFIG_X86_64 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), [wordsize]"i"(sizeof(ulong)) : "cc", "memory" #ifdef CONFIG_X86_64 , "rax", "rbx", "rdi", "rsi" , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" #else , "eax", "ebx", "edi", "esi" #endif ); /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (debugctlmsr) update_debugctlmsr(debugctlmsr); #ifndef CONFIG_X86_64 /* * The sysexit path does not restore ds/es, so we must set them to * a reasonable value ourselves. * * We can't defer this to vmx_load_host_state() since that function * may be executed in interrupt context, which saves and restore segments * around it, nullifying its effect. */ loadsegment(ds, __USER_DS); loadsegment(es, __USER_DS); #endif vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) | (1 << VCPU_EXREG_RFLAGS) | (1 << VCPU_EXREG_PDPTR) | (1 << VCPU_EXREG_SEGMENTS) | (1 << VCPU_EXREG_CR3)); vcpu->arch.regs_dirty = 0; vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx->loaded_vmcs->launched = 1; vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); /* * the KVM_REQ_EVENT optimization bit is only on for one entry, and if * we did not inject a still-pending event to L1 now because of * nested_run_pending, we need to re-enable this bit. */ if (vmx->nested.nested_run_pending) kvm_make_request(KVM_REQ_EVENT, vcpu); vmx->nested.nested_run_pending = 0; vmx_complete_atomic_exit(vmx); vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); } static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; if (vmx->loaded_vmcs == &vmx->vmcs01) return; cpu = get_cpu(); vmx->loaded_vmcs = &vmx->vmcs01; vmx_vcpu_put(vcpu); vmx_vcpu_load(vcpu, cpu); vcpu->cpu = cpu; put_cpu(); } static void vmx_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); free_vpid(vmx); leave_guest_mode(vcpu); vmx_load_vmcs01(vcpu); free_nested(vmx); free_loaded_vmcs(vmx->loaded_vmcs); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); } static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) { int err; struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); int cpu; if (!vmx) return ERR_PTR(-ENOMEM); allocate_vpid(vmx); err = kvm_vcpu_init(&vmx->vcpu, kvm, id); if (err) goto free_vcpu; vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) > PAGE_SIZE); err = -ENOMEM; if (!vmx->guest_msrs) { goto uninit_vcpu; } vmx->loaded_vmcs = &vmx->vmcs01; vmx->loaded_vmcs->vmcs = alloc_vmcs(); if (!vmx->loaded_vmcs->vmcs) goto free_msrs; if (!vmm_exclusive) kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id()))); loaded_vmcs_init(vmx->loaded_vmcs); if (!vmm_exclusive) kvm_cpu_vmxoff(); cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); vmx->vcpu.cpu = cpu; err = vmx_vcpu_setup(vmx); vmx_vcpu_put(&vmx->vcpu); put_cpu(); if (err) goto free_vmcs; if (vm_need_virtualize_apic_accesses(kvm)) { err = alloc_apic_access_page(kvm); if (err) goto free_vmcs; } if (enable_ept) { if (!kvm->arch.ept_identity_map_addr) kvm->arch.ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; err = init_rmode_identity_map(kvm); if (err) goto free_vmcs; } vmx->nested.current_vmptr = -1ull; vmx->nested.current_vmcs12 = NULL; return &vmx->vcpu; free_vmcs: free_loaded_vmcs(vmx->loaded_vmcs); free_msrs: kfree(vmx->guest_msrs); uninit_vcpu: kvm_vcpu_uninit(&vmx->vcpu); free_vcpu: free_vpid(vmx); kmem_cache_free(kvm_vcpu_cache, vmx); return ERR_PTR(err); } static void __init vmx_check_processor_compat(void *rtn) { struct vmcs_config vmcs_conf; *(int *)rtn = 0; if (setup_vmcs_config(&vmcs_conf) < 0) *(int *)rtn = -EIO; if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", smp_processor_id()); *(int *)rtn = -EIO; } } static int get_ept_level(void) { return VMX_EPT_DEFAULT_GAW + 1; } static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { u64 ret; /* For VT-d and EPT combination * 1. MMIO: always map as UC * 2. EPT with VT-d: * a. VT-d without snooping control feature: can't guarantee the * result, try to trust guest. * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ if (is_mmio) ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; else if (kvm_arch_has_noncoherent_dma(vcpu->kvm)) ret = kvm_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT; else ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; return ret; } static int vmx_get_lpage_level(void) { if (enable_ept && !cpu_has_vmx_ept_1g_page()) return PT_DIRECTORY_LEVEL; else /* For shadow and EPT supported 1GB page */ return PT_PDPE_LEVEL; } static void vmx_cpuid_update(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control; vmx->rdtscp_enabled = false; if (vmx_rdtscp_supported()) { exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); if (exec_control & SECONDARY_EXEC_RDTSCP) { best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (best && (best->edx & bit(X86_FEATURE_RDTSCP))) vmx->rdtscp_enabled = true; else { exec_control &= ~SECONDARY_EXEC_RDTSCP; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } } } /* Exposing INVPCID only when PCID is exposed */ best = kvm_find_cpuid_entry(vcpu, 0x7, 0); if (vmx_invpcid_supported() && best && (best->ebx & bit(X86_FEATURE_INVPCID)) && guest_cpuid_has_pcid(vcpu)) { exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } else { if (cpu_has_secondary_exec_ctrls()) { exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } if (best) best->ebx &= ~bit(X86_FEATURE_INVPCID); } } static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { if (func == 1 && nested) entry->ecx |= bit(X86_FEATURE_VMX); } static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 exit_reason; if (fault->error_code & PFERR_RSVD_MASK) exit_reason = EXIT_REASON_EPT_MISCONFIG; else exit_reason = EXIT_REASON_EPT_VIOLATION; nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification); vmcs12->guest_physical_address = fault->address; } /* Callbacks for nested_ept_init_mmu_context: */ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) { /* return the page table to be shadowed - in our case, EPT12 */ return get_vmcs12(vcpu)->ept_pointer; } static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu, nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT); vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) { vcpu->arch.walk_mmu = &vcpu->arch.mmu; } static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); WARN_ON(!is_guest_mode(vcpu)); /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */ if (vmcs12->exception_bitmap & (1u << PF_VECTOR)) nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); else kvm_inject_page_fault(vcpu, fault); } static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { /* TODO: Also verify bits beyond physical address width are 0 */ if (!PAGE_ALIGNED(vmcs12->apic_access_addr)) return false; /* * Translate L1 physical address to host physical * address for vmcs02. Keep the page pinned, so this * physical address remains valid. We keep a reference * to it so we can release it later. */ if (vmx->nested.apic_access_page) /* shouldn't happen */ nested_release_page(vmx->nested.apic_access_page); vmx->nested.apic_access_page = nested_get_page(vcpu, vmcs12->apic_access_addr); } if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { /* TODO: Also verify bits beyond physical address width are 0 */ if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr)) return false; if (vmx->nested.virtual_apic_page) /* shouldn't happen */ nested_release_page(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = nested_get_page(vcpu, vmcs12->virtual_apic_page_addr); /* * Failing the vm entry is _not_ what the processor does * but it's basically the only possibility we have. * We could still enter the guest if CR8 load exits are * enabled, CR8 store exits are enabled, and virtualize APIC * access is disabled; in this case the processor would never * use the TPR shadow and we could simply clear the bit from * the execution control. But such a configuration is useless, * so let's keep the code simple. */ if (!vmx->nested.virtual_apic_page) return false; } return true; } static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) { u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; struct vcpu_vmx *vmx = to_vmx(vcpu); if (vcpu->arch.virtual_tsc_khz == 0) return; /* Make sure short timeouts reliably trigger an immediate vmexit. * hrtimer_start does not guarantee this. */ if (preemption_timeout <= 1) { vmx_preemption_timer_fn(&vmx->nested.preemption_timer); return; } preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; preemption_timeout *= 1000000; do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); hrtimer_start(&vmx->nested.preemption_timer, ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); } /* * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 * guest in a way that will both be appropriate to L1's requests, and our * needs. In addition to modifying the active vmcs (which is vmcs02), this * function also has additional necessary side-effects, like setting various * vcpu->arch fields. */ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control; vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); } else { kvm_set_dr(vcpu, 7, vcpu->arch.dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs12->vm_entry_intr_info_field); vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, vmcs12->vm_entry_exception_error_code); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmcs12->vm_entry_instruction_len); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, vmcs12->guest_interruptibility_info); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); vmx_set_rflags(vcpu, vmcs12->guest_rflags); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, vmcs12->guest_pending_dbg_exceptions); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); vmcs_write64(VMCS_LINK_POINTER, -1ull); exec_control = vmcs12->pin_based_vm_exec_control; exec_control |= vmcs_config.pin_based_exec_ctrl; exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER | PIN_BASED_POSTED_INTR); vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); vmx->nested.preemption_timer_expired = false; if (nested_cpu_has_preemption_timer(vmcs12)) vmx_start_preemption_timer(vcpu); /* * Whether page-faults are trapped is determined by a combination of * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. * If enable_ept, L0 doesn't care about page faults and we should * set all of these to L1's desires. However, if !enable_ept, L0 does * care about (at least some) page faults, and because it is not easy * (if at all possible?) to merge L0 and L1's desires, we simply ask * to exit on each and every L2 page fault. This is done by setting * MASK=MATCH=0 and (see below) EB.PF=1. * Note that below we don't need special code to set EB.PF beyond the * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when * !enable_ept, EB.PF is 1, so the "or" will always be 1. * * A problem with this approach (when !enable_ept) is that L1 may be * injected with more page faults than it asked for. This could have * caused problems, but in practice existing hypervisors don't care. * To fix this, we will need to emulate the PFEC checking (on the L1 * page tables), using walk_addr(), when injecting PFs to L1. */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, enable_ept ? vmcs12->page_fault_error_code_mask : 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, enable_ept ? vmcs12->page_fault_error_code_match : 0); if (cpu_has_secondary_exec_ctrls()) { exec_control = vmx_secondary_exec_control(vmx); if (!vmx->rdtscp_enabled) exec_control &= ~SECONDARY_EXEC_RDTSCP; /* Take the following fields only from vmcs12 */ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_APIC_REGISTER_VIRT); if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) exec_control |= vmcs12->secondary_vm_exec_control; if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) { /* * If translation failed, no matter: This feature asks * to exit when accessing the given address, and if it * can never be accessed, this feature won't do * anything anyway. */ if (!vmx->nested.apic_access_page) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; else vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(vmx->nested.apic_access_page)); } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) { exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; kvm_vcpu_reload_apic_access_page(vcpu); } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } /* * Set host-state according to L0's settings (vmcs12 is irrelevant here) * Some constant fields are set here by vmx_set_constant_host_state(). * Other fields are different per CPU, and will be set later when * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. */ vmx_set_constant_host_state(vmx); /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before * entry, but only if the current (host) sp changed from the value * we wrote last (vmx->host_rsp). This cache is no longer relevant * if we switch vmcs, and rather than hold a separate cache per vmcs, * here we just force the write to happen on entry. */ vmx->host_rsp = 0; exec_control = vmx_exec_control(vmx); /* L0's desires */ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; exec_control &= ~CPU_BASED_TPR_SHADOW; exec_control |= vmcs12->cpu_based_vm_exec_control; if (exec_control & CPU_BASED_TPR_SHADOW) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, page_to_phys(vmx->nested.virtual_apic_page)); vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); } /* * Merging of IO and MSR bitmaps not currently supported. * Rather, exit every time. */ exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; exec_control &= ~CPU_BASED_USE_IO_BITMAPS; exec_control |= CPU_BASED_UNCOND_IO_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the * bitwise-or of what L1 wants to trap for L2, and what we want to * trap. Note that CR0.TS also needs updating - we do this later. */ update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); /* L2->L1 exit controls are emulated - the hardware exit is to L0 so * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits are further modified by vmx_set_efer() below. */ vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are * emulated by vmx_set_efer(), below. */ vm_entry_controls_init(vmx, (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & ~VM_ENTRY_IA32E_MODE) | (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); vcpu->arch.pat = vmcs12->guest_ia32_pat; } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); set_cr4_guest_host_mask(vmx); if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); else vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); if (enable_vpid) { /* * Trivially support vpid by letting L2s share their parent * L1's vpid. TODO: move to a more elaborate solution, giving * each L2 its own vpid and exposing the vpid feature to L1. */ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx_flush_tlb(vcpu); } if (nested_cpu_has_ept(vmcs12)) { kvm_mmu_unload(vcpu); nested_ept_init_mmu_context(vcpu); } if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->guest_ia32_efer; else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); /* * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified * TS bit (for lazy fpu) and bits which we consider mandatory enabled. * The CR0_READ_SHADOW is what L2 should have expected to read given * the specifications by L1; It's not enough to take * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we * have more bits than L1 expected. */ vmx_set_cr0(vcpu, vmcs12->guest_cr0); vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); /* shadow page tables on either EPT or shadow page tables */ kvm_set_cr3(vcpu, vmcs12->guest_cr3); kvm_mmu_reset_context(vcpu); if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; /* * L1 may access the L2's PDPTR, so save them to construct vmcs12 */ if (enable_ept) { vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); } kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); } /* * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 * for running an L2 nested guest. */ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; struct loaded_vmcs *vmcs02; bool ia32e; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) return 1; skip_emulated_instruction(vcpu); vmcs12 = get_vmcs12(vcpu); if (enable_shadow_vmcs) copy_shadow_to_vmcs12(vmx); /* * The nested entry process starts with enforcing various prerequisites * on vmcs12 as required by the Intel SDM, and act appropriately when * they fail: As the SDM explains, some conditions should cause the * instruction to fail, while others will cause the instruction to seem * to succeed, but return an EXIT_REASON_INVALID_STATE. * To speed up the normal (success) code path, we should avoid checking * for misconfigurations which will anyway be caught by the processor * when using the merged vmcs02. */ if (vmcs12->launch_state == launch) { nested_vmx_failValid(vcpu, launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS : VMXERR_VMRESUME_NONLAUNCHED_VMCS); return 1; } if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) && !PAGE_ALIGNED(vmcs12->msr_bitmap)) { /*TODO: Also verify bits beyond physical address width are 0*/ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { /*TODO: Also verify bits beyond physical address width are 0*/ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (vmcs12->vm_entry_msr_load_count > 0 || vmcs12->vm_exit_msr_load_count > 0 || vmcs12->vm_exit_msr_store_count > 0) { pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n", __func__); nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, nested_vmx_true_procbased_ctls_low, nested_vmx_procbased_ctls_high) || !vmx_control_verify(vmcs12->secondary_vm_exec_control, nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) || !vmx_control_verify(vmcs12->pin_based_vm_exec_control, nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) || !vmx_control_verify(vmcs12->vm_exit_controls, nested_vmx_true_exit_ctls_low, nested_vmx_exit_ctls_high) || !vmx_control_verify(vmcs12->vm_entry_controls, nested_vmx_true_entry_ctls_low, nested_vmx_entry_ctls_high)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); return 1; } if (!nested_cr0_valid(vmcs12, vmcs12->guest_cr0) || ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); return 1; } if (vmcs12->vmcs_link_pointer != -1ull) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR); return 1; } /* * If the load IA32_EFER VM-entry control is 1, the following checks * are performed on the field for the IA32_EFER MSR: * - Bits reserved in the IA32_EFER MSR must be 0. * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of * the IA-32e mode guest VM-exit control. It must also be identical * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to * CR0.PG) is 1. */ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) { ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || ((vmcs12->guest_cr0 & X86_CR0_PG) && ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); return 1; } } /* * If the load IA32_EFER VM-exit control is 1, bits reserved in the * IA32_EFER MSR must be 0 in the field for that register. In addition, * the values of the LMA and LME bits in the field must each be that of * the host address-space size VM-exit control. */ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { ia32e = (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); return 1; } } /* * We're finally done with prerequisite checking, and can start with * the nested entry. */ vmcs02 = nested_get_current_vmcs02(vmx); if (!vmcs02) return -ENOMEM; enter_guest_mode(vcpu); vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); cpu = get_cpu(); vmx->loaded_vmcs = vmcs02; vmx_vcpu_put(vcpu); vmx_vcpu_load(vcpu, cpu); vcpu->cpu = cpu; put_cpu(); vmx_segment_cache_clear(vmx); vmcs12->launch_state = 1; prepare_vmcs02(vcpu, vmcs12); if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) return kvm_emulate_halt(vcpu); vmx->nested.nested_run_pending = 1; /* * Note no nested_vmx_succeed or nested_vmx_fail here. At this point * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet * returned as far as L1 is concerned. It will only return (and set * the success flag) when L2 exits (see nested_vmx_vmexit()). */ return 1; } /* * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). * This function returns the new value we should put in vmcs12.guest_cr0. * It's not enough to just return the vmcs02 GUEST_CR0. Rather, * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 * didn't trap the bit, because if L1 did, so would L0). * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have * been modified by L2, and L1 knows it. So just leave the old value of * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 * isn't relevant, because if L0 traps this bit it can set it to anything. * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have * changed these bits, and therefore they need to be updated, but L0 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. */ static inline unsigned long vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { return /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | vcpu->arch.cr0_guest_owned_bits)); } static inline unsigned long vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { return /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | vcpu->arch.cr4_guest_owned_bits)); } static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { u32 idt_vectoring; unsigned int nr; if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) { nr = vcpu->arch.exception.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; if (kvm_exception_is_soft(nr)) { vmcs12->vm_exit_instruction_len = vcpu->arch.event_exit_inst_len; idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; } else idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; if (vcpu->arch.exception.has_error_code) { idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; vmcs12->idt_vectoring_error_code = vcpu->arch.exception.error_code; } vmcs12->idt_vectoring_info_field = idt_vectoring; } else if (vcpu->arch.nmi_injected) { vmcs12->idt_vectoring_info_field = INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; } else if (vcpu->arch.interrupt.pending) { nr = vcpu->arch.interrupt.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; if (vcpu->arch.interrupt.soft) { idt_vectoring |= INTR_TYPE_SOFT_INTR; vmcs12->vm_entry_instruction_len = vcpu->arch.event_exit_inst_len; } else idt_vectoring |= INTR_TYPE_EXT_INTR; vmcs12->idt_vectoring_info_field = idt_vectoring; } } static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && vmx->nested.preemption_timer_expired) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); return 0; } if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { if (vmx->nested.nested_run_pending || vcpu->arch.interrupt.pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, NMI_VECTOR | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK, 0); /* * The NMI-triggered VM exit counts as injection: * clear this one and block further NMIs. */ vcpu->arch.nmi_pending = 0; vmx_set_nmi_mask(vcpu, true); return 0; } if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && nested_exit_on_intr(vcpu)) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); } return 0; } static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) { ktime_t remaining = hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); u64 value; if (ktime_to_ns(remaining) <= 0) return 0; value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; do_div(value, 1000000); return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; } /* * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), * and this function updates it to reflect the changes to the guest state while * L2 was running (and perhaps made some exits which were handled directly by L0 * without going back to L1), and to reflect the exit reason. * Note that we do not have to copy here all VMCS fields, just those that * could have changed by the L2 guest or the exit - i.e., the guest-state and * exit-information fields only. Other fields are modified by L1 with VMWRITE, * which already writes to vmcs12 directly. */ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { /* update guest state fields: */ vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); vmcs12->guest_interruptibility_info = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); vmcs12->guest_pending_dbg_exceptions = vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; else vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; if (nested_cpu_has_preemption_timer(vmcs12)) { if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) vmcs12->vmx_preemption_timer_value = vmx_get_preemption_timer_value(vcpu); hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); } /* * In some cases (usually, nested EPT), L2 is allowed to change its * own CR3 without exiting. If it has changed it, we must keep it. * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. * * Additionally, restore L2's PDPTR to vmcs12. */ if (enable_ept) { vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3); vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); } vmcs12->vm_entry_controls = (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); } /* TODO: These cannot have changed unless we have MSR bitmaps and * the relevant bit asks not to trap the change */ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) vmcs12->guest_ia32_efer = vcpu->arch.efer; vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); if (vmx_mpx_supported()) vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); /* update exit information fields: */ vmcs12->vm_exit_reason = exit_reason; vmcs12->exit_qualification = exit_qualification; vmcs12->vm_exit_intr_info = exit_intr_info; if ((vmcs12->vm_exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); vmcs12->idt_vectoring_info_field = 0; vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { /* vm_entry_intr_info_field is cleared on exit. Emulate this * instead of reading the real value. */ vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; /* * Transfer the event that L0 or L1 may wanted to inject into * L2 to IDT_VECTORING_INFO_FIELD. */ vmcs12_save_pending_event(vcpu, vmcs12); } /* * Drop what we picked up for L2 via vmx_complete_interrupts. It is * preserved above and would only end up incorrectly in L1. */ vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); } /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified * in vmcs12. * This function is to be called not only on normal nested exit, but also on * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry * Failures During or After Loading Guest State"). * This function should be called when the active VMCS is L1's (vmcs01). */ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); vmx_set_efer(vcpu, vcpu->arch.efer); kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); /* * Note that calling vmx_set_cr0 is important, even if cr0 hasn't * actually changed, because it depends on the current state of * fpu_active (which may have changed). * Note that vmx_set_cr0 refers to efer set above. */ vmx_set_cr0(vcpu, vmcs12->host_cr0); /* * If we did fpu_activate()/fpu_deactivate() during L2's run, we need * to apply the same changes to L1's vmcs. We just set cr0 correctly, * but we also need to update cr0_guest_host_mask and exception_bitmap. */ update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0); vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); /* * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01 * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask(); */ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); kvm_set_cr4(vcpu, vmcs12->host_cr4); nested_ept_uninit_mmu_context(vcpu); kvm_set_cr3(vcpu, vmcs12->host_cr3); kvm_mmu_reset_context(vcpu); if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; if (enable_vpid) { /* * Trivially support vpid by letting L2s share their parent * L1's vpid. TODO: move to a more elaborate solution, giving * each L2 its own vpid and exposing the vpid feature to L1. */ vmx_flush_tlb(vcpu); } vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, 0); if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); vcpu->arch.pat = vmcs12->host_ia32_pat; } if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, vmcs12->host_ia32_perf_global_ctrl); /* Set L1 segment info according to Intel SDM 27.5.2 Loading Host Segment and Descriptor-Table Registers */ seg = (struct kvm_segment) { .base = 0, .limit = 0xFFFFFFFF, .selector = vmcs12->host_cs_selector, .type = 11, .present = 1, .s = 1, .g = 1 }; if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) seg.l = 1; else seg.db = 1; vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); seg = (struct kvm_segment) { .base = 0, .limit = 0xFFFFFFFF, .type = 3, .present = 1, .s = 1, .db = 1, .g = 1 }; seg.selector = vmcs12->host_ds_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); seg.selector = vmcs12->host_es_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); seg.selector = vmcs12->host_ss_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); seg.selector = vmcs12->host_fs_selector; seg.base = vmcs12->host_fs_base; vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); seg.selector = vmcs12->host_gs_selector; seg.base = vmcs12->host_gs_base; vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); seg = (struct kvm_segment) { .base = vmcs12->host_tr_base, .limit = 0x67, .selector = vmcs12->host_tr_selector, .type = 11, .present = 1 }; vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); kvm_set_dr(vcpu, 7, 0x400); vmcs_write64(GUEST_IA32_DEBUGCTL, 0); } /* * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 * and modify vmcs12 to make it see what it would expect to see there if * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) */ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); /* trying to cancel vmlaunch/vmresume is a bug */ WARN_ON_ONCE(vmx->nested.nested_run_pending); leave_guest_mode(vcpu); prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, exit_qualification); vmx_load_vmcs01(vcpu); if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) && nested_exit_intr_ack_set(vcpu)) { int irq = kvm_cpu_get_interrupt(vcpu); WARN_ON(irq < 0); vmcs12->vm_exit_intr_info = irq | INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; } trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, vmcs12->exit_qualification, vmcs12->idt_vectoring_info_field, vmcs12->vm_exit_intr_info, vmcs12->vm_exit_intr_error_code, KVM_ISA_VMX); vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); vmx_segment_cache_clear(vmx); /* if no vmcs02 cache requested, remove the one we used */ if (VMCS02_POOL_SIZE == 0) nested_free_vmcs02(vmx, vmx->nested.current_vmptr); load_vmcs12_host_state(vcpu, vmcs12); /* Update TSC_OFFSET if TSC was changed while L2 ran */ vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); /* This is needed for same reason as it was needed in prepare_vmcs02 */ vmx->host_rsp = 0; /* Unpin physical memory we referred to in vmcs02 */ if (vmx->nested.apic_access_page) { nested_release_page(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { nested_release_page(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } /* * We are now running in L2, mmu_notifier will force to reload the * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. */ kvm_vcpu_reload_apic_access_page(vcpu); /* * Exiting from L2 to L1, we're now back to L1 which thinks it just * finished a VMLAUNCH or VMRESUME instruction, so we need to set the * success or failure flag accordingly. */ if (unlikely(vmx->fail)) { vmx->fail = 0; nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR)); } else nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) vmx->nested.sync_shadow_vmcs = true; /* in case we halted in L2 */ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } /* * Forcibly leave nested mode in order to be able to reset the VCPU later on. */ static void vmx_leave_nested(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) nested_vmx_vmexit(vcpu, -1, 0, 0); free_nested(to_vmx(vcpu)); } /* * L1's failure to enter L2 is a subset of a normal exit, as explained in * 23.7 "VM-entry failures during or after loading guest state" (this also * lists the acceptable exit-reason and exit-qualification parameters). * It should only be called before L2 actually succeeded to run, and when * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss). */ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification) { load_vmcs12_host_state(vcpu, vmcs12); vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; vmcs12->exit_qualification = qualification; nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) to_vmx(vcpu)->nested.sync_shadow_vmcs = true; } static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return X86EMUL_CONTINUE; } static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) { if (ple_gap) shrink_ple_window(vcpu); } static struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, .hardware_setup = hardware_setup, .hardware_unsetup = hardware_unsetup, .check_processor_compatibility = vmx_check_processor_compat, .hardware_enable = hardware_enable, .hardware_disable = hardware_disable, .cpu_has_accelerated_tpr = report_flexpriority, .vcpu_create = vmx_create_vcpu, .vcpu_free = vmx_free_vcpu, .vcpu_reset = vmx_vcpu_reset, .prepare_guest_switch = vmx_save_host_state, .vcpu_load = vmx_vcpu_load, .vcpu_put = vmx_vcpu_put, .update_db_bp_intercept = update_exception_bitmap, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, .get_segment = vmx_get_segment, .set_segment = vmx_set_segment, .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = vmx_get_cs_db_l_bits, .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, .decache_cr3 = vmx_decache_cr3, .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, .set_cr0 = vmx_set_cr0, .set_cr3 = vmx_set_cr3, .set_cr4 = vmx_set_cr4, .set_efer = vmx_set_efer, .get_idt = vmx_get_idt, .set_idt = vmx_set_idt, .get_gdt = vmx_get_gdt, .set_gdt = vmx_set_gdt, .get_dr6 = vmx_get_dr6, .set_dr6 = vmx_set_dr6, .set_dr7 = vmx_set_dr7, .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, .cache_reg = vmx_cache_reg, .get_rflags = vmx_get_rflags, .set_rflags = vmx_set_rflags, .fpu_deactivate = vmx_fpu_deactivate, .tlb_flush = vmx_flush_tlb, .run = vmx_vcpu_run, .handle_exit = vmx_handle_exit, .skip_emulated_instruction = skip_emulated_instruction, .set_interrupt_shadow = vmx_set_interrupt_shadow, .get_interrupt_shadow = vmx_get_interrupt_shadow, .patch_hypercall = vmx_patch_hypercall, .set_irq = vmx_inject_irq, .set_nmi = vmx_inject_nmi, .queue_exception = vmx_queue_exception, .cancel_injection = vmx_cancel_injection, .interrupt_allowed = vmx_interrupt_allowed, .nmi_allowed = vmx_nmi_allowed, .get_nmi_mask = vmx_get_nmi_mask, .set_nmi_mask = vmx_set_nmi_mask, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .vm_has_apicv = vmx_vm_has_apicv, .load_eoi_exitmap = vmx_load_eoi_exitmap, .hwapic_irr_update = vmx_hwapic_irr_update, .hwapic_isr_update = vmx_hwapic_isr_update, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, .get_mt_mask = vmx_get_mt_mask, .get_exit_info = vmx_get_exit_info, .get_lpage_level = vmx_get_lpage_level, .cpuid_update = vmx_cpuid_update, .rdtscp_supported = vmx_rdtscp_supported, .invpcid_supported = vmx_invpcid_supported, .set_supported_cpuid = vmx_set_supported_cpuid, .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, .set_tsc_khz = vmx_set_tsc_khz, .read_tsc_offset = vmx_read_tsc_offset, .write_tsc_offset = vmx_write_tsc_offset, .adjust_tsc_offset = vmx_adjust_tsc_offset, .compute_tsc_offset = vmx_compute_tsc_offset, .read_l1_tsc = vmx_read_l1_tsc, .set_tdp_cr3 = vmx_set_cr3, .check_intercept = vmx_check_intercept, .handle_external_intr = vmx_handle_external_intr, .mpx_supported = vmx_mpx_supported, .check_nested_events = vmx_check_nested_events, .sched_in = vmx_sched_in, }; static int __init vmx_init(void) { int r, i, msr; rdmsrl_safe(MSR_EFER, &host_efer); for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) kvm_define_shared_msr(i, vmx_msr_index[i]); vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_io_bitmap_a) return -ENOMEM; r = -ENOMEM; vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_io_bitmap_b) goto out; vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_legacy) goto out1; vmx_msr_bitmap_legacy_x2apic = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_legacy_x2apic) goto out2; vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_longmode) goto out3; vmx_msr_bitmap_longmode_x2apic = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_longmode_x2apic) goto out4; vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_vmread_bitmap) goto out5; vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_vmwrite_bitmap) goto out6; memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); /* * Allow direct access to the PC debug port (it is often used for I/O * delays, but the vmexits simply slow things down). */ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); clear_bit(0x80, vmx_io_bitmap_a); memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx), THIS_MODULE); if (r) goto out7; #ifdef CONFIG_KEXEC rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); #endif vmx_disable_intercept_for_msr(MSR_FS_BASE, false); vmx_disable_intercept_for_msr(MSR_GS_BASE, false); vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); memcpy(vmx_msr_bitmap_legacy_x2apic, vmx_msr_bitmap_legacy, PAGE_SIZE); memcpy(vmx_msr_bitmap_longmode_x2apic, vmx_msr_bitmap_longmode, PAGE_SIZE); if (enable_apicv) { for (msr = 0x800; msr <= 0x8ff; msr++) vmx_disable_intercept_msr_read_x2apic(msr); /* According SDM, in x2apic mode, the whole id reg is used. * But in KVM, it only use the highest eight bits. Need to * intercept it */ vmx_enable_intercept_msr_read_x2apic(0x802); /* TMCCT */ vmx_enable_intercept_msr_read_x2apic(0x839); /* TPR */ vmx_disable_intercept_msr_write_x2apic(0x808); /* EOI */ vmx_disable_intercept_msr_write_x2apic(0x80b); /* SELF-IPI */ vmx_disable_intercept_msr_write_x2apic(0x83f); } if (enable_ept) { kvm_mmu_set_mask_ptes(0ull, (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull, (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull, 0ull, VMX_EPT_EXECUTABLE_MASK); ept_set_mmio_spte_mask(); kvm_enable_tdp(); } else kvm_disable_tdp(); update_ple_window_actual_max(); return 0; out7: free_page((unsigned long)vmx_vmwrite_bitmap); out6: free_page((unsigned long)vmx_vmread_bitmap); out5: free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); out4: free_page((unsigned long)vmx_msr_bitmap_longmode); out3: free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic); out2: free_page((unsigned long)vmx_msr_bitmap_legacy); out1: free_page((unsigned long)vmx_io_bitmap_b); out: free_page((unsigned long)vmx_io_bitmap_a); return r; } static void __exit vmx_exit(void) { free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic); free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); free_page((unsigned long)vmx_msr_bitmap_legacy); free_page((unsigned long)vmx_msr_bitmap_longmode); free_page((unsigned long)vmx_io_bitmap_b); free_page((unsigned long)vmx_io_bitmap_a); free_page((unsigned long)vmx_vmwrite_bitmap); free_page((unsigned long)vmx_vmread_bitmap); #ifdef CONFIG_KEXEC RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); synchronize_rcu(); #endif kvm_exit(); } module_init(vmx_init) module_exit(vmx_exit)
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2163_1
crossvul-cpp_data_good_1861_0
/* * arch/arm/kernel/sys_oabi-compat.c * * Compatibility wrappers for syscalls that are used from * old ABI user space binaries with an EABI kernel. * * Author: Nicolas Pitre * Created: Oct 7, 2005 * Copyright: MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * The legacy ABI and the new ARM EABI have different rules making some * syscalls incompatible especially with structure arguments. * Most notably, Eabi says 64-bit members should be 64-bit aligned instead of * simply word aligned. EABI also pads structures to the size of the largest * member it contains instead of the invariant 32-bit. * * The following syscalls are affected: * * sys_stat64: * sys_lstat64: * sys_fstat64: * sys_fstatat64: * * struct stat64 has different sizes and some members are shifted * Compatibility wrappers are needed for them and provided below. * * sys_fcntl64: * * struct flock64 has different sizes and some members are shifted * A compatibility wrapper is needed and provided below. * * sys_statfs64: * sys_fstatfs64: * * struct statfs64 has extra padding with EABI growing its size from * 84 to 88. This struct is now __attribute__((packed,aligned(4))) * with a small assembly wrapper to force the sz argument to 84 if it is 88 * to avoid copying the extra padding over user space unexpecting it. * * sys_newuname: * * struct new_utsname has no padding with EABI. No problem there. * * sys_epoll_ctl: * sys_epoll_wait: * * struct epoll_event has its second member shifted also affecting the * structure size. Compatibility wrappers are needed and provided below. * * sys_ipc: * sys_semop: * sys_semtimedop: * * struct sembuf loses its padding with EABI. Since arrays of them are * used they have to be copyed to remove the padding. Compatibility wrappers * provided below. * * sys_bind: * sys_connect: * sys_sendmsg: * sys_sendto: * sys_socketcall: * * struct sockaddr_un loses its padding with EABI. Since the size of the * structure is used as a validation test in unix_mkname(), we need to * change the length argument to 110 whenever it is 112. Compatibility * wrappers provided below. */ #include <linux/syscalls.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/fcntl.h> #include <linux/eventpoll.h> #include <linux/sem.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/ipc.h> #include <linux/uaccess.h> #include <linux/slab.h> struct oldabi_stat64 { unsigned long long st_dev; unsigned int __pad1; unsigned long __st_ino; unsigned int st_mode; unsigned int st_nlink; unsigned long st_uid; unsigned long st_gid; unsigned long long st_rdev; unsigned int __pad2; long long st_size; unsigned long st_blksize; unsigned long long st_blocks; unsigned long st_atime; unsigned long st_atime_nsec; unsigned long st_mtime; unsigned long st_mtime_nsec; unsigned long st_ctime; unsigned long st_ctime_nsec; unsigned long long st_ino; } __attribute__ ((packed,aligned(4))); static long cp_oldabi_stat64(struct kstat *stat, struct oldabi_stat64 __user *statbuf) { struct oldabi_stat64 tmp; tmp.st_dev = huge_encode_dev(stat->dev); tmp.__pad1 = 0; tmp.__st_ino = stat->ino; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); tmp.st_rdev = huge_encode_dev(stat->rdev); tmp.st_size = stat->size; tmp.st_blocks = stat->blocks; tmp.__pad2 = 0; tmp.st_blksize = stat->blksize; tmp.st_atime = stat->atime.tv_sec; tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime = stat->ctime.tv_sec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; tmp.st_ino = stat->ino; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } asmlinkage long sys_oabi_stat64(const char __user * filename, struct oldabi_stat64 __user * statbuf) { struct kstat stat; int error = vfs_stat(filename, &stat); if (!error) error = cp_oldabi_stat64(&stat, statbuf); return error; } asmlinkage long sys_oabi_lstat64(const char __user * filename, struct oldabi_stat64 __user * statbuf) { struct kstat stat; int error = vfs_lstat(filename, &stat); if (!error) error = cp_oldabi_stat64(&stat, statbuf); return error; } asmlinkage long sys_oabi_fstat64(unsigned long fd, struct oldabi_stat64 __user * statbuf) { struct kstat stat; int error = vfs_fstat(fd, &stat); if (!error) error = cp_oldabi_stat64(&stat, statbuf); return error; } asmlinkage long sys_oabi_fstatat64(int dfd, const char __user *filename, struct oldabi_stat64 __user *statbuf, int flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_oldabi_stat64(&stat, statbuf); } struct oabi_flock64 { short l_type; short l_whence; loff_t l_start; loff_t l_len; pid_t l_pid; } __attribute__ ((packed,aligned(4))); static long do_locks(unsigned int fd, unsigned int cmd, unsigned long arg) { struct flock64 kernel; struct oabi_flock64 user; mm_segment_t fs; long ret; if (copy_from_user(&user, (struct oabi_flock64 __user *)arg, sizeof(user))) return -EFAULT; kernel.l_type = user.l_type; kernel.l_whence = user.l_whence; kernel.l_start = user.l_start; kernel.l_len = user.l_len; kernel.l_pid = user.l_pid; fs = get_fs(); set_fs(KERNEL_DS); ret = sys_fcntl64(fd, cmd, (unsigned long)&kernel); set_fs(fs); if (!ret && (cmd == F_GETLK64 || cmd == F_OFD_GETLK)) { user.l_type = kernel.l_type; user.l_whence = kernel.l_whence; user.l_start = kernel.l_start; user.l_len = kernel.l_len; user.l_pid = kernel.l_pid; if (copy_to_user((struct oabi_flock64 __user *)arg, &user, sizeof(user))) ret = -EFAULT; } return ret; } asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) { switch (cmd) { case F_OFD_GETLK: case F_OFD_SETLK: case F_OFD_SETLKW: case F_GETLK64: case F_SETLK64: case F_SETLKW64: return do_locks(fd, cmd, arg); default: return sys_fcntl64(fd, cmd, arg); } } struct oabi_epoll_event { __u32 events; __u64 data; } __attribute__ ((packed,aligned(4))); asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, struct oabi_epoll_event __user *event) { struct oabi_epoll_event user; struct epoll_event kernel; mm_segment_t fs; long ret; if (op == EPOLL_CTL_DEL) return sys_epoll_ctl(epfd, op, fd, NULL); if (copy_from_user(&user, event, sizeof(user))) return -EFAULT; kernel.events = user.events; kernel.data = user.data; fs = get_fs(); set_fs(KERNEL_DS); ret = sys_epoll_ctl(epfd, op, fd, &kernel); set_fs(fs); return ret; } asmlinkage long sys_oabi_epoll_wait(int epfd, struct oabi_epoll_event __user *events, int maxevents, int timeout) { struct epoll_event *kbuf; mm_segment_t fs; long ret, err, i; if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event))) return -EINVAL; kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); if (!kbuf) return -ENOMEM; fs = get_fs(); set_fs(KERNEL_DS); ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout); set_fs(fs); err = 0; for (i = 0; i < ret; i++) { __put_user_error(kbuf[i].events, &events->events, err); __put_user_error(kbuf[i].data, &events->data, err); events++; } kfree(kbuf); return err ? -EFAULT : ret; } struct oabi_sembuf { unsigned short sem_num; short sem_op; short sem_flg; unsigned short __pad; }; asmlinkage long sys_oabi_semtimedop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops, const struct timespec __user *timeout) { struct sembuf *sops; struct timespec local_timeout; long err; int i; if (nsops < 1 || nsops > SEMOPM) return -EINVAL; sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); if (!sops) return -ENOMEM; err = 0; for (i = 0; i < nsops; i++) { __get_user_error(sops[i].sem_num, &tsops->sem_num, err); __get_user_error(sops[i].sem_op, &tsops->sem_op, err); __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err); tsops++; } if (timeout) { /* copy this as well before changing domain protection */ err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout)); timeout = &local_timeout; } if (err) { err = -EFAULT; } else { mm_segment_t fs = get_fs(); set_fs(KERNEL_DS); err = sys_semtimedop(semid, sops, nsops, timeout); set_fs(fs); } kfree(sops); return err; } asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops) { return sys_oabi_semtimedop(semid, tsops, nsops, NULL); } asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, void __user *ptr, long fifth) { switch (call & 0xffff) { case SEMOP: return sys_oabi_semtimedop(first, (struct oabi_sembuf __user *)ptr, second, NULL); case SEMTIMEDOP: return sys_oabi_semtimedop(first, (struct oabi_sembuf __user *)ptr, second, (const struct timespec __user *)fifth); default: return sys_ipc(call, first, second, third, ptr, fifth); } } asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen) { sa_family_t sa_family; if (addrlen == 112 && get_user(sa_family, &addr->sa_family) == 0 && sa_family == AF_UNIX) addrlen = 110; return sys_bind(fd, addr, addrlen); } asmlinkage long sys_oabi_connect(int fd, struct sockaddr __user *addr, int addrlen) { sa_family_t sa_family; if (addrlen == 112 && get_user(sa_family, &addr->sa_family) == 0 && sa_family == AF_UNIX) addrlen = 110; return sys_connect(fd, addr, addrlen); } asmlinkage long sys_oabi_sendto(int fd, void __user *buff, size_t len, unsigned flags, struct sockaddr __user *addr, int addrlen) { sa_family_t sa_family; if (addrlen == 112 && get_user(sa_family, &addr->sa_family) == 0 && sa_family == AF_UNIX) addrlen = 110; return sys_sendto(fd, buff, len, flags, addr, addrlen); } asmlinkage long sys_oabi_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { struct sockaddr __user *addr; int msg_namelen; sa_family_t sa_family; if (msg && get_user(msg_namelen, &msg->msg_namelen) == 0 && msg_namelen == 112 && get_user(addr, &msg->msg_name) == 0 && get_user(sa_family, &addr->sa_family) == 0 && sa_family == AF_UNIX) { /* * HACK ALERT: there is a limit to how much backward bending * we should do for what is actually a transitional * compatibility layer. This already has known flaws with * a few ioctls that we don't intend to fix. Therefore * consider this blatent hack as another one... and take care * to run for cover. In most cases it will "just work fine". * If it doesn't, well, tough. */ put_user(110, &msg->msg_namelen); } return sys_sendmsg(fd, msg, flags); } asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args) { unsigned long r = -EFAULT, a[6]; switch (call) { case SYS_BIND: if (copy_from_user(a, args, 3 * sizeof(long)) == 0) r = sys_oabi_bind(a[0], (struct sockaddr __user *)a[1], a[2]); break; case SYS_CONNECT: if (copy_from_user(a, args, 3 * sizeof(long)) == 0) r = sys_oabi_connect(a[0], (struct sockaddr __user *)a[1], a[2]); break; case SYS_SENDTO: if (copy_from_user(a, args, 6 * sizeof(long)) == 0) r = sys_oabi_sendto(a[0], (void __user *)a[1], a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_SENDMSG: if (copy_from_user(a, args, 3 * sizeof(long)) == 0) r = sys_oabi_sendmsg(a[0], (struct user_msghdr __user *)a[1], a[2]); break; default: r = sys_socketcall(call, args); } return r; }
./CrossVul/dataset_final_sorted/CWE-264/c/good_1861_0
crossvul-cpp_data_bad_3526_0
/* * Copyright (C) 2003 Sistina Software (UK) Limited. * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include <linux/device-mapper.h> #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/slab.h> #define DM_MSG_PREFIX "flakey" #define all_corrupt_bio_flags_match(bio, fc) \ (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) /* * Flakey: Used for testing only, simulates intermittent, * catastrophic device failure. */ struct flakey_c { struct dm_dev *dev; unsigned long start_time; sector_t start; unsigned up_interval; unsigned down_interval; unsigned long flags; unsigned corrupt_bio_byte; unsigned corrupt_bio_rw; unsigned corrupt_bio_value; unsigned corrupt_bio_flags; }; enum feature_flag_bits { DROP_WRITES }; static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, struct dm_target *ti) { int r; unsigned argc; const char *arg_name; static struct dm_arg _args[] = { {0, 6, "Invalid number of feature args"}, {1, UINT_MAX, "Invalid corrupt bio byte"}, {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, {0, UINT_MAX, "Invalid corrupt bio flags mask"}, }; /* No feature arguments supplied. */ if (!as->argc) return 0; r = dm_read_arg_group(_args, as, &argc, &ti->error); if (r) return r; while (argc) { arg_name = dm_shift_arg(as); argc--; /* * drop_writes */ if (!strcasecmp(arg_name, "drop_writes")) { if (test_and_set_bit(DROP_WRITES, &fc->flags)) { ti->error = "Feature drop_writes duplicated"; return -EINVAL; } continue; } /* * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> */ if (!strcasecmp(arg_name, "corrupt_bio_byte")) { if (!argc) { ti->error = "Feature corrupt_bio_byte requires parameters"; return -EINVAL; } r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); if (r) return r; argc--; /* * Direction r or w? */ arg_name = dm_shift_arg(as); if (!strcasecmp(arg_name, "w")) fc->corrupt_bio_rw = WRITE; else if (!strcasecmp(arg_name, "r")) fc->corrupt_bio_rw = READ; else { ti->error = "Invalid corrupt bio direction (r or w)"; return -EINVAL; } argc--; /* * Value of byte (0-255) to write in place of correct one. */ r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error); if (r) return r; argc--; /* * Only corrupt bios with these flags set. */ r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error); if (r) return r; argc--; continue; } ti->error = "Unrecognised flakey feature requested"; return -EINVAL; } if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set"; return -EINVAL; } return 0; } /* * Construct a flakey mapping: * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*] * * Feature args: * [drop_writes] * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>] * * Nth_byte starts from 1 for the first byte. * Direction is r for READ or w for WRITE. * bio_flags is ignored if 0. */ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) { static struct dm_arg _args[] = { {0, UINT_MAX, "Invalid up interval"}, {0, UINT_MAX, "Invalid down interval"}, }; int r; struct flakey_c *fc; unsigned long long tmpll; struct dm_arg_set as; const char *devname; as.argc = argc; as.argv = argv; if (argc < 4) { ti->error = "Invalid argument count"; return -EINVAL; } fc = kzalloc(sizeof(*fc), GFP_KERNEL); if (!fc) { ti->error = "Cannot allocate linear context"; return -ENOMEM; } fc->start_time = jiffies; devname = dm_shift_arg(&as); if (sscanf(dm_shift_arg(&as), "%llu", &tmpll) != 1) { ti->error = "Invalid device sector"; goto bad; } fc->start = tmpll; r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error); if (r) goto bad; r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error); if (r) goto bad; if (!(fc->up_interval + fc->down_interval)) { ti->error = "Total (up + down) interval is zero"; goto bad; } if (fc->up_interval + fc->down_interval < fc->up_interval) { ti->error = "Interval overflow"; goto bad; } r = parse_features(&as, fc, ti); if (r) goto bad; if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) { ti->error = "Device lookup failed"; goto bad; } ti->num_flush_requests = 1; ti->num_discard_requests = 1; ti->private = fc; return 0; bad: kfree(fc); return -EINVAL; } static void flakey_dtr(struct dm_target *ti) { struct flakey_c *fc = ti->private; dm_put_device(ti, fc->dev); kfree(fc); } static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) { struct flakey_c *fc = ti->private; return fc->start + dm_target_offset(ti, bi_sector); } static void flakey_map_bio(struct dm_target *ti, struct bio *bio) { struct flakey_c *fc = ti->private; bio->bi_bdev = fc->dev->bdev; if (bio_sectors(bio)) bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); } static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) { unsigned bio_bytes = bio_cur_bytes(bio); char *data = bio_data(bio); /* * Overwrite the Nth byte of the data returned. */ if (data && bio_bytes >= fc->corrupt_bio_byte) { data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); } } static int flakey_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { struct flakey_c *fc = ti->private; unsigned elapsed; /* Are we alive ? */ elapsed = (jiffies - fc->start_time) / HZ; if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) { /* * Flag this bio as submitted while down. */ map_context->ll = 1; /* * Map reads as normal. */ if (bio_data_dir(bio) == READ) goto map_bio; /* * Drop writes? */ if (test_bit(DROP_WRITES, &fc->flags)) { bio_endio(bio, 0); return DM_MAPIO_SUBMITTED; } /* * Corrupt matching writes. */ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) { if (all_corrupt_bio_flags_match(bio, fc)) corrupt_bio_data(bio, fc); goto map_bio; } /* * By default, error all I/O. */ return -EIO; } map_bio: flakey_map_bio(ti, bio); return DM_MAPIO_REMAPPED; } static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error, union map_info *map_context) { struct flakey_c *fc = ti->private; unsigned bio_submitted_while_down = map_context->ll; /* * Corrupt successful READs while in down state. * If flags were specified, only corrupt those that match. */ if (!error && bio_submitted_while_down && (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) && all_corrupt_bio_flags_match(bio, fc)) corrupt_bio_data(bio, fc); return error; } static int flakey_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { unsigned sz = 0; struct flakey_c *fc = ti->private; unsigned drop_writes; switch (type) { case STATUSTYPE_INFO: result[0] = '\0'; break; case STATUSTYPE_TABLE: DMEMIT("%s %llu %u %u ", fc->dev->name, (unsigned long long)fc->start, fc->up_interval, fc->down_interval); drop_writes = test_bit(DROP_WRITES, &fc->flags); DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5); if (drop_writes) DMEMIT("drop_writes "); if (fc->corrupt_bio_byte) DMEMIT("corrupt_bio_byte %u %c %u %u ", fc->corrupt_bio_byte, (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r', fc->corrupt_bio_value, fc->corrupt_bio_flags); break; } return 0; } static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { struct flakey_c *fc = ti->private; return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg); } static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm, struct bio_vec *biovec, int max_size) { struct flakey_c *fc = ti->private; struct request_queue *q = bdev_get_queue(fc->dev->bdev); if (!q->merge_bvec_fn) return max_size; bvm->bi_bdev = fc->dev->bdev; bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector); return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); } static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct flakey_c *fc = ti->private; return fn(ti, fc->dev, fc->start, ti->len, data); } static struct target_type flakey_target = { .name = "flakey", .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = flakey_ctr, .dtr = flakey_dtr, .map = flakey_map, .end_io = flakey_end_io, .status = flakey_status, .ioctl = flakey_ioctl, .merge = flakey_merge, .iterate_devices = flakey_iterate_devices, }; static int __init dm_flakey_init(void) { int r = dm_register_target(&flakey_target); if (r < 0) DMERR("register failed %d", r); return r; } static void __exit dm_flakey_exit(void) { dm_unregister_target(&flakey_target); } /* Module hooks */ module_init(dm_flakey_init); module_exit(dm_flakey_exit); MODULE_DESCRIPTION(DM_NAME " flakey target"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3526_0
crossvul-cpp_data_bad_2243_0
/* * linux/fs/namespace.c * * (C) Copyright Al Viro 2000, 2001 * Released under GPL v2. * * Based on code from fs/super.c, copyright Linus Torvalds and others. * Heavily rewritten. */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/capability.h> #include <linux/mnt_namespace.h> #include <linux/user_namespace.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/idr.h> #include <linux/acct.h> /* acct_auto_close_mnt */ #include <linux/init.h> /* init_rootfs */ #include <linux/fs_struct.h> /* get_fs_root et.al. */ #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ #include <linux/uaccess.h> #include <linux/proc_ns.h> #include <linux/magic.h> #include <linux/bootmem.h> #include "pnode.h" #include "internal.h" static unsigned int m_hash_mask __read_mostly; static unsigned int m_hash_shift __read_mostly; static unsigned int mp_hash_mask __read_mostly; static unsigned int mp_hash_shift __read_mostly; static __initdata unsigned long mhash_entries; static int __init set_mhash_entries(char *str) { if (!str) return 0; mhash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("mhash_entries=", set_mhash_entries); static __initdata unsigned long mphash_entries; static int __init set_mphash_entries(char *str) { if (!str) return 0; mphash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("mphash_entries=", set_mphash_entries); static u64 event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); static DEFINE_SPINLOCK(mnt_id_lock); static int mnt_id_start = 0; static int mnt_group_start = 1; static struct hlist_head *mount_hashtable __read_mostly; static struct hlist_head *mountpoint_hashtable __read_mostly; static struct kmem_cache *mnt_cache __read_mostly; static DECLARE_RWSEM(namespace_sem); /* /sys/fs */ struct kobject *fs_kobj; EXPORT_SYMBOL_GPL(fs_kobj); /* * vfsmount lock may be taken for read to prevent changes to the * vfsmount hash, ie. during mountpoint lookups or walking back * up the tree. * * It should be taken for write in all cases where the vfsmount * tree or hash is modified or when a vfsmount structure is modified. */ __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) { unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); tmp += ((unsigned long)dentry / L1_CACHE_BYTES); tmp = tmp + (tmp >> m_hash_shift); return &mount_hashtable[tmp & m_hash_mask]; } static inline struct hlist_head *mp_hash(struct dentry *dentry) { unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); tmp = tmp + (tmp >> mp_hash_shift); return &mountpoint_hashtable[tmp & mp_hash_mask]; } /* * allocation is serialized by namespace_sem, but we need the spinlock to * serialize with freeing. */ static int mnt_alloc_id(struct mount *mnt) { int res; retry: ida_pre_get(&mnt_id_ida, GFP_KERNEL); spin_lock(&mnt_id_lock); res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); if (!res) mnt_id_start = mnt->mnt_id + 1; spin_unlock(&mnt_id_lock); if (res == -EAGAIN) goto retry; return res; } static void mnt_free_id(struct mount *mnt) { int id = mnt->mnt_id; spin_lock(&mnt_id_lock); ida_remove(&mnt_id_ida, id); if (mnt_id_start > id) mnt_id_start = id; spin_unlock(&mnt_id_lock); } /* * Allocate a new peer group ID * * mnt_group_ida is protected by namespace_sem */ static int mnt_alloc_group_id(struct mount *mnt) { int res; if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) return -ENOMEM; res = ida_get_new_above(&mnt_group_ida, mnt_group_start, &mnt->mnt_group_id); if (!res) mnt_group_start = mnt->mnt_group_id + 1; return res; } /* * Release a peer group ID */ void mnt_release_group_id(struct mount *mnt) { int id = mnt->mnt_group_id; ida_remove(&mnt_group_ida, id); if (mnt_group_start > id) mnt_group_start = id; mnt->mnt_group_id = 0; } /* * vfsmount lock must be held for read */ static inline void mnt_add_count(struct mount *mnt, int n) { #ifdef CONFIG_SMP this_cpu_add(mnt->mnt_pcp->mnt_count, n); #else preempt_disable(); mnt->mnt_count += n; preempt_enable(); #endif } /* * vfsmount lock must be held for write */ unsigned int mnt_get_count(struct mount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; } return count; #else return mnt->mnt_count; #endif } static struct mount *alloc_vfsmnt(const char *name) { struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); if (mnt) { int err; err = mnt_alloc_id(mnt); if (err) goto out_free_cache; if (name) { mnt->mnt_devname = kstrdup(name, GFP_KERNEL); if (!mnt->mnt_devname) goto out_free_id; } #ifdef CONFIG_SMP mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); if (!mnt->mnt_pcp) goto out_free_devname; this_cpu_add(mnt->mnt_pcp->mnt_count, 1); #else mnt->mnt_count = 1; mnt->mnt_writers = 0; #endif INIT_HLIST_NODE(&mnt->mnt_hash); INIT_LIST_HEAD(&mnt->mnt_child); INIT_LIST_HEAD(&mnt->mnt_mounts); INIT_LIST_HEAD(&mnt->mnt_list); INIT_LIST_HEAD(&mnt->mnt_expire); INIT_LIST_HEAD(&mnt->mnt_share); INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); #endif } return mnt; #ifdef CONFIG_SMP out_free_devname: kfree(mnt->mnt_devname); #endif out_free_id: mnt_free_id(mnt); out_free_cache: kmem_cache_free(mnt_cache, mnt); return NULL; } /* * Most r/o checks on a fs are for operations that take * discrete amounts of time, like a write() or unlink(). * We must keep track of when those operations start * (for permission checks) and when they end, so that * we can determine when writes are able to occur to * a filesystem. */ /* * __mnt_is_readonly: check whether a mount is read-only * @mnt: the mount to check for its write status * * This shouldn't be used directly ouside of the VFS. * It does not guarantee that the filesystem will stay * r/w, just that it is right *now*. This can not and * should not be used in place of IS_RDONLY(inode). * mnt_want/drop_write() will _keep_ the filesystem * r/w. */ int __mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_flags & MNT_READONLY) return 1; if (mnt->mnt_sb->s_flags & MS_RDONLY) return 1; return 0; } EXPORT_SYMBOL_GPL(__mnt_is_readonly); static inline void mnt_inc_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_inc(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers++; #endif } static inline void mnt_dec_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_dec(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers--; #endif } static unsigned int mnt_get_writers(struct mount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; } return count; #else return mnt->mnt_writers; #endif } static int mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_sb->s_readonly_remount) return 1; /* Order wrt setting s_flags/s_readonly_remount in do_remount() */ smp_rmb(); return __mnt_is_readonly(mnt); } /* * Most r/o & frozen checks on a fs are for operations that take discrete * amounts of time, like a write() or unlink(). We must keep track of when * those operations start (for permission checks) and when they end, so that we * can determine when writes are able to occur to a filesystem. */ /** * __mnt_want_write - get write access to a mount without freeze protection * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mnt it read-write) before * returning success. This operation does not protect against filesystem being * frozen. When the write operation is finished, __mnt_drop_write() must be * called. This is effectively a refcount. */ int __mnt_want_write(struct vfsmount *m) { struct mount *mnt = real_mount(m); int ret = 0; preempt_disable(); mnt_inc_writers(mnt); /* * The store to mnt_inc_writers must be visible before we pass * MNT_WRITE_HOLD loop below, so that the slowpath can see our * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) cpu_relax(); /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until * MNT_WRITE_HOLD is cleared. */ smp_rmb(); if (mnt_is_readonly(m)) { mnt_dec_writers(mnt); ret = -EROFS; } preempt_enable(); return ret; } /** * mnt_want_write - get write access to a mount * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mount is read-write, filesystem * is not frozen) before returning success. When the write operation is * finished, mnt_drop_write() must be called. This is effectively a refcount. */ int mnt_want_write(struct vfsmount *m) { int ret; sb_start_write(m->mnt_sb); ret = __mnt_want_write(m); if (ret) sb_end_write(m->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write); /** * mnt_clone_write - get write access to a mount * @mnt: the mount on which to take a write * * This is effectively like mnt_want_write, except * it must only be used to take an extra write reference * on a mountpoint that we already know has a write reference * on it. This allows some optimisation. * * After finished, mnt_drop_write must be called as usual to * drop the reference. */ int mnt_clone_write(struct vfsmount *mnt) { /* superblock may be r/o */ if (__mnt_is_readonly(mnt)) return -EROFS; preempt_disable(); mnt_inc_writers(real_mount(mnt)); preempt_enable(); return 0; } EXPORT_SYMBOL_GPL(mnt_clone_write); /** * __mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like __mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int __mnt_want_write_file(struct file *file) { if (!(file->f_mode & FMODE_WRITER)) return __mnt_want_write(file->f_path.mnt); else return mnt_clone_write(file->f_path.mnt); } /** * mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int mnt_want_write_file(struct file *file) { int ret; sb_start_write(file->f_path.mnt->mnt_sb); ret = __mnt_want_write_file(file); if (ret) sb_end_write(file->f_path.mnt->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write_file); /** * __mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done * performing writes to it. Must be matched with * __mnt_want_write() call above. */ void __mnt_drop_write(struct vfsmount *mnt) { preempt_disable(); mnt_dec_writers(real_mount(mnt)); preempt_enable(); } /** * mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done performing writes to it and * also allows filesystem to be frozen again. Must be matched with * mnt_want_write() call above. */ void mnt_drop_write(struct vfsmount *mnt) { __mnt_drop_write(mnt); sb_end_write(mnt->mnt_sb); } EXPORT_SYMBOL_GPL(mnt_drop_write); void __mnt_drop_write_file(struct file *file) { __mnt_drop_write(file->f_path.mnt); } void mnt_drop_write_file(struct file *file) { mnt_drop_write(file->f_path.mnt); } EXPORT_SYMBOL(mnt_drop_write_file); static int mnt_make_readonly(struct mount *mnt) { int ret = 0; lock_mount_hash(); mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; /* * After storing MNT_WRITE_HOLD, we'll read the counters. This store * should be visible before we do. */ smp_mb(); /* * With writers on hold, if this value is zero, then there are * definitely no active writers (although held writers may subsequently * increment the count, they'll have to wait, and decrement it after * seeing MNT_READONLY). * * It is OK to have counter incremented on one CPU and decremented on * another: the sum will add up correctly. The danger would be when we * sum up each counter, if we read a counter before it is incremented, * but then read another CPU's count which it has been subsequently * decremented from -- we would see more decrements than we should. * MNT_WRITE_HOLD protects against this scenario, because * mnt_want_write first increments count, then smp_mb, then spins on * MNT_WRITE_HOLD, so it can't be decremented by another CPU while * we're counting up here. */ if (mnt_get_writers(mnt) > 0) ret = -EBUSY; else mnt->mnt.mnt_flags |= MNT_READONLY; /* * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers * that become unheld will see MNT_READONLY. */ smp_wmb(); mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; unlock_mount_hash(); return ret; } static void __mnt_unmake_readonly(struct mount *mnt) { lock_mount_hash(); mnt->mnt.mnt_flags &= ~MNT_READONLY; unlock_mount_hash(); } int sb_prepare_remount_readonly(struct super_block *sb) { struct mount *mnt; int err = 0; /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ if (atomic_long_read(&sb->s_remove_count)) return -EBUSY; lock_mount_hash(); list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; smp_mb(); if (mnt_get_writers(mnt) > 0) { err = -EBUSY; break; } } } if (!err && atomic_long_read(&sb->s_remove_count)) err = -EBUSY; if (!err) { sb->s_readonly_remount = 1; smp_wmb(); } list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; } unlock_mount_hash(); return err; } static void free_vfsmnt(struct mount *mnt) { kfree(mnt->mnt_devname); #ifdef CONFIG_SMP free_percpu(mnt->mnt_pcp); #endif kmem_cache_free(mnt_cache, mnt); } static void delayed_free_vfsmnt(struct rcu_head *head) { free_vfsmnt(container_of(head, struct mount, mnt_rcu)); } /* call under rcu_read_lock */ bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) { struct mount *mnt; if (read_seqretry(&mount_lock, seq)) return false; if (bastard == NULL) return true; mnt = real_mount(bastard); mnt_add_count(mnt, 1); if (likely(!read_seqretry(&mount_lock, seq))) return true; if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { mnt_add_count(mnt, -1); return false; } rcu_read_unlock(); mntput(bastard); rcu_read_lock(); return false; } /* * find the first mount at @dentry on vfsmount @mnt. * call under rcu_read_lock() */ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) { struct hlist_head *head = m_hash(mnt, dentry); struct mount *p; hlist_for_each_entry_rcu(p, head, mnt_hash) if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) return p; return NULL; } /* * find the last mount at @dentry on vfsmount @mnt. * mount_lock must be held. */ struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) { struct mount *p, *res; res = p = __lookup_mnt(mnt, dentry); if (!p) goto out; hlist_for_each_entry_continue(p, mnt_hash) { if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) break; res = p; } out: return res; } /* * lookup_mnt - Return the first child mount mounted at path * * "First" means first mounted chronologically. If you create the * following mounts: * * mount /dev/sda1 /mnt * mount /dev/sda2 /mnt * mount /dev/sda3 /mnt * * Then lookup_mnt() on the base /mnt dentry in the root mount will * return successively the root dentry and vfsmount of /dev/sda1, then * /dev/sda2, then /dev/sda3, then NULL. * * lookup_mnt takes a reference to the found vfsmount. */ struct vfsmount *lookup_mnt(struct path *path) { struct mount *child_mnt; struct vfsmount *m; unsigned seq; rcu_read_lock(); do { seq = read_seqbegin(&mount_lock); child_mnt = __lookup_mnt(path->mnt, path->dentry); m = child_mnt ? &child_mnt->mnt : NULL; } while (!legitimize_mnt(m, seq)); rcu_read_unlock(); return m; } static struct mountpoint *new_mountpoint(struct dentry *dentry) { struct hlist_head *chain = mp_hash(dentry); struct mountpoint *mp; int ret; hlist_for_each_entry(mp, chain, m_hash) { if (mp->m_dentry == dentry) { /* might be worth a WARN_ON() */ if (d_unlinked(dentry)) return ERR_PTR(-ENOENT); mp->m_count++; return mp; } } mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); if (!mp) return ERR_PTR(-ENOMEM); ret = d_set_mounted(dentry); if (ret) { kfree(mp); return ERR_PTR(ret); } mp->m_dentry = dentry; mp->m_count = 1; hlist_add_head(&mp->m_hash, chain); return mp; } static void put_mountpoint(struct mountpoint *mp) { if (!--mp->m_count) { struct dentry *dentry = mp->m_dentry; spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_MOUNTED; spin_unlock(&dentry->d_lock); hlist_del(&mp->m_hash); kfree(mp); } } static inline int check_mnt(struct mount *mnt) { return mnt->mnt_ns == current->nsproxy->mnt_ns; } /* * vfsmount lock must be held for write */ static void touch_mnt_namespace(struct mnt_namespace *ns) { if (ns) { ns->event = ++event; wake_up_interruptible(&ns->poll); } } /* * vfsmount lock must be held for write */ static void __touch_mnt_namespace(struct mnt_namespace *ns) { if (ns && ns->event != event) { ns->event = event; wake_up_interruptible(&ns->poll); } } /* * vfsmount lock must be held for write */ static void detach_mnt(struct mount *mnt, struct path *old_path) { old_path->dentry = mnt->mnt_mountpoint; old_path->mnt = &mnt->mnt_parent->mnt; mnt->mnt_parent = mnt; mnt->mnt_mountpoint = mnt->mnt.mnt_root; list_del_init(&mnt->mnt_child); hlist_del_init_rcu(&mnt->mnt_hash); put_mountpoint(mnt->mnt_mp); mnt->mnt_mp = NULL; } /* * vfsmount lock must be held for write */ void mnt_set_mountpoint(struct mount *mnt, struct mountpoint *mp, struct mount *child_mnt) { mp->m_count++; mnt_add_count(mnt, 1); /* essentially, that's mntget */ child_mnt->mnt_mountpoint = dget(mp->m_dentry); child_mnt->mnt_parent = mnt; child_mnt->mnt_mp = mp; } /* * vfsmount lock must be held for write */ static void attach_mnt(struct mount *mnt, struct mount *parent, struct mountpoint *mp) { mnt_set_mountpoint(parent, mp, mnt); hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); } /* * vfsmount lock must be held for write */ static void commit_tree(struct mount *mnt, struct mount *shadows) { struct mount *parent = mnt->mnt_parent; struct mount *m; LIST_HEAD(head); struct mnt_namespace *n = parent->mnt_ns; BUG_ON(parent == mnt); list_add_tail(&head, &mnt->mnt_list); list_for_each_entry(m, &head, mnt_list) m->mnt_ns = n; list_splice(&head, n->list.prev); if (shadows) hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash); else hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mnt->mnt_mountpoint)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); touch_mnt_namespace(n); } static struct mount *next_mnt(struct mount *p, struct mount *root) { struct list_head *next = p->mnt_mounts.next; if (next == &p->mnt_mounts) { while (1) { if (p == root) return NULL; next = p->mnt_child.next; if (next != &p->mnt_parent->mnt_mounts) break; p = p->mnt_parent; } } return list_entry(next, struct mount, mnt_child); } static struct mount *skip_mnt_tree(struct mount *p) { struct list_head *prev = p->mnt_mounts.prev; while (prev != &p->mnt_mounts) { p = list_entry(prev, struct mount, mnt_child); prev = p->mnt_mounts.prev; } return p; } struct vfsmount * vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) { struct mount *mnt; struct dentry *root; if (!type) return ERR_PTR(-ENODEV); mnt = alloc_vfsmnt(name); if (!mnt) return ERR_PTR(-ENOMEM); if (flags & MS_KERNMOUNT) mnt->mnt.mnt_flags = MNT_INTERNAL; root = mount_fs(type, flags, name, data); if (IS_ERR(root)) { mnt_free_id(mnt); free_vfsmnt(mnt); return ERR_CAST(root); } mnt->mnt.mnt_root = root; mnt->mnt.mnt_sb = root->d_sb; mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; lock_mount_hash(); list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); unlock_mount_hash(); return &mnt->mnt; } EXPORT_SYMBOL_GPL(vfs_kern_mount); static struct mount *clone_mnt(struct mount *old, struct dentry *root, int flag) { struct super_block *sb = old->mnt.mnt_sb; struct mount *mnt; int err; mnt = alloc_vfsmnt(old->mnt_devname); if (!mnt) return ERR_PTR(-ENOMEM); if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) mnt->mnt_group_id = 0; /* not a peer of original */ else mnt->mnt_group_id = old->mnt_group_id; if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { err = mnt_alloc_group_id(mnt); if (err) goto out_free; } mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); /* Don't allow unprivileged users to change mount flags */ if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; /* Don't allow unprivileged users to reveal what is under a mount */ if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire)) mnt->mnt.mnt_flags |= MNT_LOCKED; atomic_inc(&sb->s_active); mnt->mnt.mnt_sb = sb; mnt->mnt.mnt_root = dget(root); mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; lock_mount_hash(); list_add_tail(&mnt->mnt_instance, &sb->s_mounts); unlock_mount_hash(); if ((flag & CL_SLAVE) || ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { list_add(&mnt->mnt_slave, &old->mnt_slave_list); mnt->mnt_master = old; CLEAR_MNT_SHARED(mnt); } else if (!(flag & CL_PRIVATE)) { if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) list_add(&mnt->mnt_share, &old->mnt_share); if (IS_MNT_SLAVE(old)) list_add(&mnt->mnt_slave, &old->mnt_slave); mnt->mnt_master = old->mnt_master; } if (flag & CL_MAKE_SHARED) set_mnt_shared(mnt); /* stick the duplicate mount on the same expiry list * as the original if that was on one */ if (flag & CL_EXPIRE) { if (!list_empty(&old->mnt_expire)) list_add(&mnt->mnt_expire, &old->mnt_expire); } return mnt; out_free: mnt_free_id(mnt); free_vfsmnt(mnt); return ERR_PTR(err); } static void mntput_no_expire(struct mount *mnt) { put_again: rcu_read_lock(); mnt_add_count(mnt, -1); if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ rcu_read_unlock(); return; } lock_mount_hash(); if (mnt_get_count(mnt)) { rcu_read_unlock(); unlock_mount_hash(); return; } if (unlikely(mnt->mnt_pinned)) { mnt_add_count(mnt, mnt->mnt_pinned + 1); mnt->mnt_pinned = 0; rcu_read_unlock(); unlock_mount_hash(); acct_auto_close_mnt(&mnt->mnt); goto put_again; } if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { rcu_read_unlock(); unlock_mount_hash(); return; } mnt->mnt.mnt_flags |= MNT_DOOMED; rcu_read_unlock(); list_del(&mnt->mnt_instance); unlock_mount_hash(); /* * This probably indicates that somebody messed * up a mnt_want/drop_write() pair. If this * happens, the filesystem was probably unable * to make r/w->r/o transitions. */ /* * The locking used to deal with mnt_count decrement provides barriers, * so mnt_get_writers() below is safe. */ WARN_ON(mnt_get_writers(mnt)); fsnotify_vfsmount_delete(&mnt->mnt); dput(mnt->mnt.mnt_root); deactivate_super(mnt->mnt.mnt_sb); mnt_free_id(mnt); call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); } void mntput(struct vfsmount *mnt) { if (mnt) { struct mount *m = real_mount(mnt); /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ if (unlikely(m->mnt_expiry_mark)) m->mnt_expiry_mark = 0; mntput_no_expire(m); } } EXPORT_SYMBOL(mntput); struct vfsmount *mntget(struct vfsmount *mnt) { if (mnt) mnt_add_count(real_mount(mnt), 1); return mnt; } EXPORT_SYMBOL(mntget); void mnt_pin(struct vfsmount *mnt) { lock_mount_hash(); real_mount(mnt)->mnt_pinned++; unlock_mount_hash(); } EXPORT_SYMBOL(mnt_pin); void mnt_unpin(struct vfsmount *m) { struct mount *mnt = real_mount(m); lock_mount_hash(); if (mnt->mnt_pinned) { mnt_add_count(mnt, 1); mnt->mnt_pinned--; } unlock_mount_hash(); } EXPORT_SYMBOL(mnt_unpin); static inline void mangle(struct seq_file *m, const char *s) { seq_escape(m, s, " \t\n\\"); } /* * Simple .show_options callback for filesystems which don't want to * implement more complex mount option showing. * * See also save_mount_options(). */ int generic_show_options(struct seq_file *m, struct dentry *root) { const char *options; rcu_read_lock(); options = rcu_dereference(root->d_sb->s_options); if (options != NULL && options[0]) { seq_putc(m, ','); mangle(m, options); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(generic_show_options); /* * If filesystem uses generic_show_options(), this function should be * called from the fill_super() callback. * * The .remount_fs callback usually needs to be handled in a special * way, to make sure, that previous options are not overwritten if the * remount fails. * * Also note, that if the filesystem's .remount_fs function doesn't * reset all options to their default value, but changes only newly * given options, then the displayed options will not reflect reality * any more. */ void save_mount_options(struct super_block *sb, char *options) { BUG_ON(sb->s_options); rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL)); } EXPORT_SYMBOL(save_mount_options); void replace_mount_options(struct super_block *sb, char *options) { char *old = sb->s_options; rcu_assign_pointer(sb->s_options, options); if (old) { synchronize_rcu(); kfree(old); } } EXPORT_SYMBOL(replace_mount_options); #ifdef CONFIG_PROC_FS /* iterator; we want it to have access to namespace_sem, thus here... */ static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_mounts *p = proc_mounts(m); down_read(&namespace_sem); if (p->cached_event == p->ns->event) { void *v = p->cached_mount; if (*pos == p->cached_index) return v; if (*pos == p->cached_index + 1) { v = seq_list_next(v, &p->ns->list, &p->cached_index); return p->cached_mount = v; } } p->cached_event = p->ns->event; p->cached_mount = seq_list_start(&p->ns->list, *pos); p->cached_index = *pos; return p->cached_mount; } static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_mounts *p = proc_mounts(m); p->cached_mount = seq_list_next(v, &p->ns->list, pos); p->cached_index = *pos; return p->cached_mount; } static void m_stop(struct seq_file *m, void *v) { up_read(&namespace_sem); } static int m_show(struct seq_file *m, void *v) { struct proc_mounts *p = proc_mounts(m); struct mount *r = list_entry(v, struct mount, mnt_list); return p->show(m, &r->mnt); } const struct seq_operations mounts_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = m_show, }; #endif /* CONFIG_PROC_FS */ /** * may_umount_tree - check if a mount tree is busy * @mnt: root of mount tree * * This is called to check if a tree of mounts has any * open files, pwds, chroots or sub mounts that are * busy. */ int may_umount_tree(struct vfsmount *m) { struct mount *mnt = real_mount(m); int actual_refs = 0; int minimum_refs = 0; struct mount *p; BUG_ON(!m); /* write lock needed for mnt_get_count */ lock_mount_hash(); for (p = mnt; p; p = next_mnt(p, mnt)) { actual_refs += mnt_get_count(p); minimum_refs += 2; } unlock_mount_hash(); if (actual_refs > minimum_refs) return 0; return 1; } EXPORT_SYMBOL(may_umount_tree); /** * may_umount - check if a mount point is busy * @mnt: root of mount * * This is called to check if a mount point has any * open files, pwds, chroots or sub mounts. If the * mount has sub mounts this will return busy * regardless of whether the sub mounts are busy. * * Doesn't take quota and stuff into account. IOW, in some cases it will * give false negatives. The main reason why it's here is that we need * a non-destructive way to look for easily umountable filesystems. */ int may_umount(struct vfsmount *mnt) { int ret = 1; down_read(&namespace_sem); lock_mount_hash(); if (propagate_mount_busy(real_mount(mnt), 2)) ret = 0; unlock_mount_hash(); up_read(&namespace_sem); return ret; } EXPORT_SYMBOL(may_umount); static HLIST_HEAD(unmounted); /* protected by namespace_sem */ static void namespace_unlock(void) { struct mount *mnt; struct hlist_head head = unmounted; if (likely(hlist_empty(&head))) { up_write(&namespace_sem); return; } head.first->pprev = &head.first; INIT_HLIST_HEAD(&unmounted); up_write(&namespace_sem); synchronize_rcu(); while (!hlist_empty(&head)) { mnt = hlist_entry(head.first, struct mount, mnt_hash); hlist_del_init(&mnt->mnt_hash); if (mnt->mnt_ex_mountpoint.mnt) path_put(&mnt->mnt_ex_mountpoint); mntput(&mnt->mnt); } } static inline void namespace_lock(void) { down_write(&namespace_sem); } /* * mount_lock must be held * namespace_sem must be held for write * how = 0 => just this tree, don't propagate * how = 1 => propagate; we know that nobody else has reference to any victims * how = 2 => lazy umount */ void umount_tree(struct mount *mnt, int how) { HLIST_HEAD(tmp_list); struct mount *p; struct mount *last = NULL; for (p = mnt; p; p = next_mnt(p, mnt)) { hlist_del_init_rcu(&p->mnt_hash); hlist_add_head(&p->mnt_hash, &tmp_list); } if (how) propagate_umount(&tmp_list); hlist_for_each_entry(p, &tmp_list, mnt_hash) { list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; if (how < 2) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; list_del_init(&p->mnt_child); if (mnt_has_parent(p)) { put_mountpoint(p->mnt_mp); /* move the reference to mountpoint into ->mnt_ex_mountpoint */ p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint; p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt; p->mnt_mountpoint = p->mnt.mnt_root; p->mnt_parent = p; p->mnt_mp = NULL; } change_mnt_propagation(p, MS_PRIVATE); last = p; } if (last) { last->mnt_hash.next = unmounted.first; unmounted.first = tmp_list.first; unmounted.first->pprev = &unmounted.first; } } static void shrink_submounts(struct mount *mnt); static int do_umount(struct mount *mnt, int flags) { struct super_block *sb = mnt->mnt.mnt_sb; int retval; retval = security_sb_umount(&mnt->mnt, flags); if (retval) return retval; /* * Allow userspace to request a mountpoint be expired rather than * unmounting unconditionally. Unmount only happens if: * (1) the mark is already set (the mark is cleared by mntput()) * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] */ if (flags & MNT_EXPIRE) { if (&mnt->mnt == current->fs->root.mnt || flags & (MNT_FORCE | MNT_DETACH)) return -EINVAL; /* * probably don't strictly need the lock here if we examined * all race cases, but it's a slowpath. */ lock_mount_hash(); if (mnt_get_count(mnt) != 2) { unlock_mount_hash(); return -EBUSY; } unlock_mount_hash(); if (!xchg(&mnt->mnt_expiry_mark, 1)) return -EAGAIN; } /* * If we may have to abort operations to get out of this * mount, and they will themselves hold resources we must * allow the fs to do things. In the Unix tradition of * 'Gee thats tricky lets do it in userspace' the umount_begin * might fail to complete on the first run through as other tasks * must return, and the like. Thats for the mount program to worry * about for the moment. */ if (flags & MNT_FORCE && sb->s_op->umount_begin) { sb->s_op->umount_begin(sb); } /* * No sense to grab the lock for this test, but test itself looks * somewhat bogus. Suggestions for better replacement? * Ho-hum... In principle, we might treat that as umount + switch * to rootfs. GC would eventually take care of the old vfsmount. * Actually it makes sense, especially if rootfs would contain a * /reboot - static binary that would close all descriptors and * call reboot(9). Then init(8) could umount root and exec /reboot. */ if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { /* * Special case for "unmounting" root ... * we just try to remount it readonly. */ down_write(&sb->s_umount); if (!(sb->s_flags & MS_RDONLY)) retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); up_write(&sb->s_umount); return retval; } namespace_lock(); lock_mount_hash(); event++; if (flags & MNT_DETACH) { if (!list_empty(&mnt->mnt_list)) umount_tree(mnt, 2); retval = 0; } else { shrink_submounts(mnt); retval = -EBUSY; if (!propagate_mount_busy(mnt, 2)) { if (!list_empty(&mnt->mnt_list)) umount_tree(mnt, 1); retval = 0; } } unlock_mount_hash(); namespace_unlock(); return retval; } /* * Is the caller allowed to modify his namespace? */ static inline bool may_mount(void) { return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); } /* * Now umount can handle mount points as well as block devices. * This is important for filesystems which use unnamed block devices. * * We now support a flag for forced unmount like the other 'big iron' * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD */ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) { struct path path; struct mount *mnt; int retval; int lookup_flags = 0; if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) return -EINVAL; if (!may_mount()) return -EPERM; if (!(flags & UMOUNT_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path); if (retval) goto out; mnt = real_mount(path.mnt); retval = -EINVAL; if (path.dentry != path.mnt->mnt_root) goto dput_and_out; if (!check_mnt(mnt)) goto dput_and_out; if (mnt->mnt.mnt_flags & MNT_LOCKED) goto dput_and_out; retval = do_umount(mnt, flags); dput_and_out: /* we mustn't call path_put() as that would clear mnt_expiry_mark */ dput(path.dentry); mntput_no_expire(mnt); out: return retval; } #ifdef __ARCH_WANT_SYS_OLDUMOUNT /* * The 2.0 compatible umount. No flags. */ SYSCALL_DEFINE1(oldumount, char __user *, name) { return sys_umount(name, 0); } #endif static bool is_mnt_ns_file(struct dentry *dentry) { /* Is this a proxy for a mount namespace? */ struct inode *inode = dentry->d_inode; struct proc_ns *ei; if (!proc_ns_inode(inode)) return false; ei = get_proc_ns(inode); if (ei->ns_ops != &mntns_operations) return false; return true; } static bool mnt_ns_loop(struct dentry *dentry) { /* Could bind mounting the mount namespace inode cause a * mount namespace loop? */ struct mnt_namespace *mnt_ns; if (!is_mnt_ns_file(dentry)) return false; mnt_ns = get_proc_ns(dentry->d_inode)->ns; return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; } struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, int flag) { struct mount *res, *p, *q, *r, *parent; if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) return ERR_PTR(-EINVAL); if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) return ERR_PTR(-EINVAL); res = q = clone_mnt(mnt, dentry, flag); if (IS_ERR(q)) return q; q->mnt.mnt_flags &= ~MNT_LOCKED; q->mnt_mountpoint = mnt->mnt_mountpoint; p = mnt; list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { struct mount *s; if (!is_subdir(r->mnt_mountpoint, dentry)) continue; for (s = r; s; s = next_mnt(s, r)) { if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(s)) { s = skip_mnt_tree(s); continue; } if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(s->mnt.mnt_root)) { s = skip_mnt_tree(s); continue; } while (p != s->mnt_parent) { p = p->mnt_parent; q = q->mnt_parent; } p = s; parent = q; q = clone_mnt(p, p->mnt.mnt_root, flag); if (IS_ERR(q)) goto out; lock_mount_hash(); list_add_tail(&q->mnt_list, &res->mnt_list); attach_mnt(q, parent, p->mnt_mp); unlock_mount_hash(); } } return res; out: if (res) { lock_mount_hash(); umount_tree(res, 0); unlock_mount_hash(); } return q; } /* Caller should check returned pointer for errors */ struct vfsmount *collect_mounts(struct path *path) { struct mount *tree; namespace_lock(); tree = copy_tree(real_mount(path->mnt), path->dentry, CL_COPY_ALL | CL_PRIVATE); namespace_unlock(); if (IS_ERR(tree)) return ERR_CAST(tree); return &tree->mnt; } void drop_collected_mounts(struct vfsmount *mnt) { namespace_lock(); lock_mount_hash(); umount_tree(real_mount(mnt), 0); unlock_mount_hash(); namespace_unlock(); } int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, struct vfsmount *root) { struct mount *mnt; int res = f(root, arg); if (res) return res; list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { res = f(&mnt->mnt, arg); if (res) return res; } return 0; } static void cleanup_group_ids(struct mount *mnt, struct mount *end) { struct mount *p; for (p = mnt; p != end; p = next_mnt(p, mnt)) { if (p->mnt_group_id && !IS_MNT_SHARED(p)) mnt_release_group_id(p); } } static int invent_group_ids(struct mount *mnt, bool recurse) { struct mount *p; for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { int err = mnt_alloc_group_id(p); if (err) { cleanup_group_ids(mnt, p); return err; } } } return 0; } /* * @source_mnt : mount tree to be attached * @nd : place the mount tree @source_mnt is attached * @parent_nd : if non-null, detach the source_mnt from its parent and * store the parent mount and mountpoint dentry. * (done when source_mnt is moved) * * NOTE: in the table below explains the semantics when a source mount * of a given type is attached to a destination mount of a given type. * --------------------------------------------------------------------------- * | BIND MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (++) | shared (+) | shared(+++)| invalid | * | | | | | | * |non-shared| shared (+) | private | slave (*) | invalid | * *************************************************************************** * A bind operation clones the source mount and mounts the clone on the * destination mount. * * (++) the cloned mount is propagated to all the mounts in the propagation * tree of the destination mount and the cloned mount is added to * the peer group of the source mount. * (+) the cloned mount is created under the destination mount and is marked * as shared. The cloned mount is added to the peer group of the source * mount. * (+++) the mount is propagated to all the mounts in the propagation tree * of the destination mount and the cloned mount is made slave * of the same master as that of the source mount. The cloned mount * is marked as 'shared and slave'. * (*) the cloned mount is made a slave of the same master as that of the * source mount. * * --------------------------------------------------------------------------- * | MOVE MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (+) | shared (+) | shared(+++) | invalid | * | | | | | | * |non-shared| shared (+*) | private | slave (*) | unbindable | * *************************************************************************** * * (+) the mount is moved to the destination. And is then propagated to * all the mounts in the propagation tree of the destination mount. * (+*) the mount is moved to the destination. * (+++) the mount is moved to the destination and is then propagated to * all the mounts belonging to the destination mount's propagation tree. * the mount is marked as 'shared and slave'. * (*) the mount continues to be a slave at the new location. * * if the source mount is a tree, the operations explained above is * applied to each mount in the tree. * Must be called without spinlocks held, since this function can sleep * in allocations. */ static int attach_recursive_mnt(struct mount *source_mnt, struct mount *dest_mnt, struct mountpoint *dest_mp, struct path *parent_path) { HLIST_HEAD(tree_list); struct mount *child, *p; struct hlist_node *n; int err; if (IS_MNT_SHARED(dest_mnt)) { err = invent_group_ids(source_mnt, true); if (err) goto out; err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); lock_mount_hash(); if (err) goto out_cleanup_ids; for (p = source_mnt; p; p = next_mnt(p, source_mnt)) set_mnt_shared(p); } else { lock_mount_hash(); } if (parent_path) { detach_mnt(source_mnt, parent_path); attach_mnt(source_mnt, dest_mnt, dest_mp); touch_mnt_namespace(source_mnt->mnt_ns); } else { mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); commit_tree(source_mnt, NULL); } hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { struct mount *q; hlist_del_init(&child->mnt_hash); q = __lookup_mnt_last(&child->mnt_parent->mnt, child->mnt_mountpoint); commit_tree(child, q); } unlock_mount_hash(); return 0; out_cleanup_ids: while (!hlist_empty(&tree_list)) { child = hlist_entry(tree_list.first, struct mount, mnt_hash); umount_tree(child, 0); } unlock_mount_hash(); cleanup_group_ids(source_mnt, NULL); out: return err; } static struct mountpoint *lock_mount(struct path *path) { struct vfsmount *mnt; struct dentry *dentry = path->dentry; retry: mutex_lock(&dentry->d_inode->i_mutex); if (unlikely(cant_mount(dentry))) { mutex_unlock(&dentry->d_inode->i_mutex); return ERR_PTR(-ENOENT); } namespace_lock(); mnt = lookup_mnt(path); if (likely(!mnt)) { struct mountpoint *mp = new_mountpoint(dentry); if (IS_ERR(mp)) { namespace_unlock(); mutex_unlock(&dentry->d_inode->i_mutex); return mp; } return mp; } namespace_unlock(); mutex_unlock(&path->dentry->d_inode->i_mutex); path_put(path); path->mnt = mnt; dentry = path->dentry = dget(mnt->mnt_root); goto retry; } static void unlock_mount(struct mountpoint *where) { struct dentry *dentry = where->m_dentry; put_mountpoint(where); namespace_unlock(); mutex_unlock(&dentry->d_inode->i_mutex); } static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) { if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) return -EINVAL; if (S_ISDIR(mp->m_dentry->d_inode->i_mode) != S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode)) return -ENOTDIR; return attach_recursive_mnt(mnt, p, mp, NULL); } /* * Sanity check the flags to change_mnt_propagation. */ static int flags_to_propagation_type(int flags) { int type = flags & ~(MS_REC | MS_SILENT); /* Fail if any non-propagation flags are set */ if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) return 0; /* Only one propagation flag should be set */ if (!is_power_of_2(type)) return 0; return type; } /* * recursively change the type of the mountpoint. */ static int do_change_type(struct path *path, int flag) { struct mount *m; struct mount *mnt = real_mount(path->mnt); int recurse = flag & MS_REC; int type; int err = 0; if (path->dentry != path->mnt->mnt_root) return -EINVAL; type = flags_to_propagation_type(flag); if (!type) return -EINVAL; namespace_lock(); if (type == MS_SHARED) { err = invent_group_ids(mnt, recurse); if (err) goto out_unlock; } lock_mount_hash(); for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) change_mnt_propagation(m, type); unlock_mount_hash(); out_unlock: namespace_unlock(); return err; } static bool has_locked_children(struct mount *mnt, struct dentry *dentry) { struct mount *child; list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { if (!is_subdir(child->mnt_mountpoint, dentry)) continue; if (child->mnt.mnt_flags & MNT_LOCKED) return true; } return false; } /* * do loopback mount. */ static int do_loopback(struct path *path, const char *old_name, int recurse) { struct path old_path; struct mount *mnt = NULL, *old, *parent; struct mountpoint *mp; int err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); if (err) return err; err = -EINVAL; if (mnt_ns_loop(old_path.dentry)) goto out; mp = lock_mount(path); err = PTR_ERR(mp); if (IS_ERR(mp)) goto out; old = real_mount(old_path.mnt); parent = real_mount(path->mnt); err = -EINVAL; if (IS_MNT_UNBINDABLE(old)) goto out2; if (!check_mnt(parent) || !check_mnt(old)) goto out2; if (!recurse && has_locked_children(old, old_path.dentry)) goto out2; if (recurse) mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE); else mnt = clone_mnt(old, old_path.dentry, 0); if (IS_ERR(mnt)) { err = PTR_ERR(mnt); goto out2; } mnt->mnt.mnt_flags &= ~MNT_LOCKED; err = graft_tree(mnt, parent, mp); if (err) { lock_mount_hash(); umount_tree(mnt, 0); unlock_mount_hash(); } out2: unlock_mount(mp); out: path_put(&old_path); return err; } static int change_mount_flags(struct vfsmount *mnt, int ms_flags) { int error = 0; int readonly_request = 0; if (ms_flags & MS_RDONLY) readonly_request = 1; if (readonly_request == __mnt_is_readonly(mnt)) return 0; if (mnt->mnt_flags & MNT_LOCK_READONLY) return -EPERM; if (readonly_request) error = mnt_make_readonly(real_mount(mnt)); else __mnt_unmake_readonly(real_mount(mnt)); return error; } /* * change filesystem flags. dir should be a physical root of filesystem. * If you've mounted a non-root directory somewhere and want to do remount * on it - tough luck. */ static int do_remount(struct path *path, int flags, int mnt_flags, void *data) { int err; struct super_block *sb = path->mnt->mnt_sb; struct mount *mnt = real_mount(path->mnt); if (!check_mnt(mnt)) return -EINVAL; if (path->dentry != path->mnt->mnt_root) return -EINVAL; err = security_sb_remount(sb, data); if (err) return err; down_write(&sb->s_umount); if (flags & MS_BIND) err = change_mount_flags(path->mnt, flags); else if (!capable(CAP_SYS_ADMIN)) err = -EPERM; else err = do_remount_sb(sb, flags, data, 0); if (!err) { lock_mount_hash(); mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; mnt->mnt.mnt_flags = mnt_flags; touch_mnt_namespace(mnt->mnt_ns); unlock_mount_hash(); } up_write(&sb->s_umount); return err; } static inline int tree_contains_unbindable(struct mount *mnt) { struct mount *p; for (p = mnt; p; p = next_mnt(p, mnt)) { if (IS_MNT_UNBINDABLE(p)) return 1; } return 0; } static int do_move_mount(struct path *path, const char *old_name) { struct path old_path, parent_path; struct mount *p; struct mount *old; struct mountpoint *mp; int err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); if (err) return err; mp = lock_mount(path); err = PTR_ERR(mp); if (IS_ERR(mp)) goto out; old = real_mount(old_path.mnt); p = real_mount(path->mnt); err = -EINVAL; if (!check_mnt(p) || !check_mnt(old)) goto out1; if (old->mnt.mnt_flags & MNT_LOCKED) goto out1; err = -EINVAL; if (old_path.dentry != old_path.mnt->mnt_root) goto out1; if (!mnt_has_parent(old)) goto out1; if (S_ISDIR(path->dentry->d_inode->i_mode) != S_ISDIR(old_path.dentry->d_inode->i_mode)) goto out1; /* * Don't move a mount residing in a shared parent. */ if (IS_MNT_SHARED(old->mnt_parent)) goto out1; /* * Don't move a mount tree containing unbindable mounts to a destination * mount which is shared. */ if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) goto out1; err = -ELOOP; for (; mnt_has_parent(p); p = p->mnt_parent) if (p == old) goto out1; err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path); if (err) goto out1; /* if the mount is moved, it should no longer be expire * automatically */ list_del_init(&old->mnt_expire); out1: unlock_mount(mp); out: if (!err) path_put(&parent_path); path_put(&old_path); return err; } static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) { int err; const char *subtype = strchr(fstype, '.'); if (subtype) { subtype++; err = -EINVAL; if (!subtype[0]) goto err; } else subtype = ""; mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); err = -ENOMEM; if (!mnt->mnt_sb->s_subtype) goto err; return mnt; err: mntput(mnt); return ERR_PTR(err); } /* * add a mount into a namespace's mount tree */ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) { struct mountpoint *mp; struct mount *parent; int err; mnt_flags &= ~MNT_INTERNAL_FLAGS; mp = lock_mount(path); if (IS_ERR(mp)) return PTR_ERR(mp); parent = real_mount(path->mnt); err = -EINVAL; if (unlikely(!check_mnt(parent))) { /* that's acceptable only for automounts done in private ns */ if (!(mnt_flags & MNT_SHRINKABLE)) goto unlock; /* ... and for those we'd better have mountpoint still alive */ if (!parent->mnt_ns) goto unlock; } /* Refuse the same filesystem on the same mount point */ err = -EBUSY; if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path->mnt->mnt_root == path->dentry) goto unlock; err = -EINVAL; if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode)) goto unlock; newmnt->mnt.mnt_flags = mnt_flags; err = graft_tree(newmnt, parent, mp); unlock: unlock_mount(mp); return err; } /* * create a new mount for userspace and request it to be added into the * namespace's tree */ static int do_new_mount(struct path *path, const char *fstype, int flags, int mnt_flags, const char *name, void *data) { struct file_system_type *type; struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; struct vfsmount *mnt; int err; if (!fstype) return -EINVAL; type = get_fs_type(fstype); if (!type) return -ENODEV; if (user_ns != &init_user_ns) { if (!(type->fs_flags & FS_USERNS_MOUNT)) { put_filesystem(type); return -EPERM; } /* Only in special cases allow devices from mounts * created outside the initial user namespace. */ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { flags |= MS_NODEV; mnt_flags |= MNT_NODEV; } } mnt = vfs_kern_mount(type, flags, name, data); if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && !mnt->mnt_sb->s_subtype) mnt = fs_set_subtype(mnt, fstype); put_filesystem(type); if (IS_ERR(mnt)) return PTR_ERR(mnt); err = do_add_mount(real_mount(mnt), path, mnt_flags); if (err) mntput(mnt); return err; } int finish_automount(struct vfsmount *m, struct path *path) { struct mount *mnt = real_mount(m); int err; /* The new mount record should have at least 2 refs to prevent it being * expired before we get a chance to add it */ BUG_ON(mnt_get_count(mnt) < 2); if (m->mnt_sb == path->mnt->mnt_sb && m->mnt_root == path->dentry) { err = -ELOOP; goto fail; } err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); if (!err) return 0; fail: /* remove m from any expiration list it may be on */ if (!list_empty(&mnt->mnt_expire)) { namespace_lock(); list_del_init(&mnt->mnt_expire); namespace_unlock(); } mntput(m); mntput(m); return err; } /** * mnt_set_expiry - Put a mount on an expiration list * @mnt: The mount to list. * @expiry_list: The list to add the mount to. */ void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) { namespace_lock(); list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); namespace_unlock(); } EXPORT_SYMBOL(mnt_set_expiry); /* * process a list of expirable mountpoints with the intent of discarding any * mountpoints that aren't in use and haven't been touched since last we came * here */ void mark_mounts_for_expiry(struct list_head *mounts) { struct mount *mnt, *next; LIST_HEAD(graveyard); if (list_empty(mounts)) return; namespace_lock(); lock_mount_hash(); /* extract from the expiration list every vfsmount that matches the * following criteria: * - only referenced by its parent vfsmount * - still marked for expiry (marked on the last call here; marks are * cleared by mntput()) */ list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { if (!xchg(&mnt->mnt_expiry_mark, 1) || propagate_mount_busy(mnt, 1)) continue; list_move(&mnt->mnt_expire, &graveyard); } while (!list_empty(&graveyard)) { mnt = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(mnt->mnt_ns); umount_tree(mnt, 1); } unlock_mount_hash(); namespace_unlock(); } EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); /* * Ripoff of 'select_parent()' * * search the list of submounts for a given mountpoint, and move any * shrinkable submounts to the 'graveyard' list. */ static int select_submounts(struct mount *parent, struct list_head *graveyard) { struct mount *this_parent = parent; struct list_head *next; int found = 0; repeat: next = this_parent->mnt_mounts.next; resume: while (next != &this_parent->mnt_mounts) { struct list_head *tmp = next; struct mount *mnt = list_entry(tmp, struct mount, mnt_child); next = tmp->next; if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) continue; /* * Descend a level if the d_mounts list is non-empty. */ if (!list_empty(&mnt->mnt_mounts)) { this_parent = mnt; goto repeat; } if (!propagate_mount_busy(mnt, 1)) { list_move_tail(&mnt->mnt_expire, graveyard); found++; } } /* * All done at this level ... ascend and resume the search */ if (this_parent != parent) { next = this_parent->mnt_child.next; this_parent = this_parent->mnt_parent; goto resume; } return found; } /* * process a list of expirable mountpoints with the intent of discarding any * submounts of a specific parent mountpoint * * mount_lock must be held for write */ static void shrink_submounts(struct mount *mnt) { LIST_HEAD(graveyard); struct mount *m; /* extract submounts of 'mountpoint' from the expiration list */ while (select_submounts(mnt, &graveyard)) { while (!list_empty(&graveyard)) { m = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(m->mnt_ns); umount_tree(m, 1); } } } /* * Some copy_from_user() implementations do not return the exact number of * bytes remaining to copy on a fault. But copy_mount_options() requires that. * Note that this function differs from copy_from_user() in that it will oops * on bad values of `to', rather than returning a short copy. */ static long exact_copy_from_user(void *to, const void __user * from, unsigned long n) { char *t = to; const char __user *f = from; char c; if (!access_ok(VERIFY_READ, from, n)) return n; while (n) { if (__get_user(c, f)) { memset(t, 0, n); break; } *t++ = c; f++; n--; } return n; } int copy_mount_options(const void __user * data, unsigned long *where) { int i; unsigned long page; unsigned long size; *where = 0; if (!data) return 0; if (!(page = __get_free_page(GFP_KERNEL))) return -ENOMEM; /* We only care that *some* data at the address the user * gave us is valid. Just in case, we'll zero * the remainder of the page. */ /* copy_from_user cannot cross TASK_SIZE ! */ size = TASK_SIZE - (unsigned long)data; if (size > PAGE_SIZE) size = PAGE_SIZE; i = size - exact_copy_from_user((void *)page, data, size); if (!i) { free_page(page); return -EFAULT; } if (i != PAGE_SIZE) memset((char *)page + i, 0, PAGE_SIZE - i); *where = page; return 0; } int copy_mount_string(const void __user *data, char **where) { char *tmp; if (!data) { *where = NULL; return 0; } tmp = strndup_user(data, PAGE_SIZE); if (IS_ERR(tmp)) return PTR_ERR(tmp); *where = tmp; return 0; } /* * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to * be given to the mount() call (ie: read-only, no-dev, no-suid etc). * * data is a (void *) that can point to any structure up to * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent * information (or be NULL). * * Pre-0.97 versions of mount() didn't have a flags word. * When the flags word was introduced its top half was required * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. * Therefore, if this magic number is present, it carries no information * and must be discarded. */ long do_mount(const char *dev_name, const char *dir_name, const char *type_page, unsigned long flags, void *data_page) { struct path path; int retval = 0; int mnt_flags = 0; /* Discard magic */ if ((flags & MS_MGC_MSK) == MS_MGC_VAL) flags &= ~MS_MGC_MSK; /* Basic sanity checks */ if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE)) return -EINVAL; if (data_page) ((char *)data_page)[PAGE_SIZE - 1] = 0; /* ... and get the mountpoint */ retval = kern_path(dir_name, LOOKUP_FOLLOW, &path); if (retval) return retval; retval = security_sb_mount(dev_name, &path, type_page, flags, data_page); if (!retval && !may_mount()) retval = -EPERM; if (retval) goto dput_out; /* Default to relatime unless overriden */ if (!(flags & MS_NOATIME)) mnt_flags |= MNT_RELATIME; /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) mnt_flags |= MNT_NOSUID; if (flags & MS_NODEV) mnt_flags |= MNT_NODEV; if (flags & MS_NOEXEC) mnt_flags |= MNT_NOEXEC; if (flags & MS_NOATIME) mnt_flags |= MNT_NOATIME; if (flags & MS_NODIRATIME) mnt_flags |= MNT_NODIRATIME; if (flags & MS_STRICTATIME) mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, data_page); else if (flags & MS_BIND) retval = do_loopback(&path, dev_name, flags & MS_REC); else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) retval = do_change_type(&path, flags); else if (flags & MS_MOVE) retval = do_move_mount(&path, dev_name); else retval = do_new_mount(&path, type_page, flags, mnt_flags, dev_name, data_page); dput_out: path_put(&path); return retval; } static void free_mnt_ns(struct mnt_namespace *ns) { proc_free_inum(ns->proc_inum); put_user_ns(ns->user_ns); kfree(ns); } /* * Assign a sequence number so we can detect when we attempt to bind * mount a reference to an older mount namespace into the current * mount namespace, preventing reference counting loops. A 64bit * number incrementing at 10Ghz will take 12,427 years to wrap which * is effectively never, so we can ignore the possibility. */ static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) { struct mnt_namespace *new_ns; int ret; new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL); if (!new_ns) return ERR_PTR(-ENOMEM); ret = proc_alloc_inum(&new_ns->proc_inum); if (ret) { kfree(new_ns); return ERR_PTR(ret); } new_ns->seq = atomic64_add_return(1, &mnt_ns_seq); atomic_set(&new_ns->count, 1); new_ns->root = NULL; INIT_LIST_HEAD(&new_ns->list); init_waitqueue_head(&new_ns->poll); new_ns->event = 0; new_ns->user_ns = get_user_ns(user_ns); return new_ns; } struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; struct mount *p, *q; struct mount *old; struct mount *new; int copy_flags; BUG_ON(!ns); if (likely(!(flags & CLONE_NEWNS))) { get_mnt_ns(ns); return ns; } old = ns->root; new_ns = alloc_mnt_ns(user_ns); if (IS_ERR(new_ns)) return new_ns; namespace_lock(); /* First pass: copy the tree topology */ copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; if (user_ns != ns->user_ns) copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; new = copy_tree(old, old->mnt.mnt_root, copy_flags); if (IS_ERR(new)) { namespace_unlock(); free_mnt_ns(new_ns); return ERR_CAST(new); } new_ns->root = new; list_add_tail(&new_ns->list, &new->mnt_list); /* * Second pass: switch the tsk->fs->* elements and mark new vfsmounts * as belonging to new namespace. We have already acquired a private * fs_struct, so tsk->fs->lock is not needed. */ p = old; q = new; while (p) { q->mnt_ns = new_ns; if (new_fs) { if (&p->mnt == new_fs->root.mnt) { new_fs->root.mnt = mntget(&q->mnt); rootmnt = &p->mnt; } if (&p->mnt == new_fs->pwd.mnt) { new_fs->pwd.mnt = mntget(&q->mnt); pwdmnt = &p->mnt; } } p = next_mnt(p, old); q = next_mnt(q, new); if (!q) break; while (p->mnt.mnt_root != q->mnt.mnt_root) p = next_mnt(p, old); } namespace_unlock(); if (rootmnt) mntput(rootmnt); if (pwdmnt) mntput(pwdmnt); return new_ns; } /** * create_mnt_ns - creates a private namespace and adds a root filesystem * @mnt: pointer to the new root filesystem mountpoint */ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m) { struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns); if (!IS_ERR(new_ns)) { struct mount *mnt = real_mount(m); mnt->mnt_ns = new_ns; new_ns->root = mnt; list_add(&mnt->mnt_list, &new_ns->list); } else { mntput(m); } return new_ns; } struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) { struct mnt_namespace *ns; struct super_block *s; struct path path; int err; ns = create_mnt_ns(mnt); if (IS_ERR(ns)) return ERR_CAST(ns); err = vfs_path_lookup(mnt->mnt_root, mnt, name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); put_mnt_ns(ns); if (err) return ERR_PTR(err); /* trade a vfsmount reference for active sb one */ s = path.mnt->mnt_sb; atomic_inc(&s->s_active); mntput(path.mnt); /* lock the sucker */ down_write(&s->s_umount); /* ... and return the root of (sub)tree on it */ return path.dentry; } EXPORT_SYMBOL(mount_subtree); SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, char __user *, type, unsigned long, flags, void __user *, data) { int ret; char *kernel_type; struct filename *kernel_dir; char *kernel_dev; unsigned long data_page; ret = copy_mount_string(type, &kernel_type); if (ret < 0) goto out_type; kernel_dir = getname(dir_name); if (IS_ERR(kernel_dir)) { ret = PTR_ERR(kernel_dir); goto out_dir; } ret = copy_mount_string(dev_name, &kernel_dev); if (ret < 0) goto out_dev; ret = copy_mount_options(data, &data_page); if (ret < 0) goto out_data; ret = do_mount(kernel_dev, kernel_dir->name, kernel_type, flags, (void *) data_page); free_page(data_page); out_data: kfree(kernel_dev); out_dev: putname(kernel_dir); out_dir: kfree(kernel_type); out_type: return ret; } /* * Return true if path is reachable from root * * namespace_sem or mount_lock is held */ bool is_path_reachable(struct mount *mnt, struct dentry *dentry, const struct path *root) { while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { dentry = mnt->mnt_mountpoint; mnt = mnt->mnt_parent; } return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); } int path_is_under(struct path *path1, struct path *path2) { int res; read_seqlock_excl(&mount_lock); res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); read_sequnlock_excl(&mount_lock); return res; } EXPORT_SYMBOL(path_is_under); /* * pivot_root Semantics: * Moves the root file system of the current process to the directory put_old, * makes new_root as the new root file system of the current process, and sets * root/cwd of all processes which had them on the current root to new_root. * * Restrictions: * The new_root and put_old must be directories, and must not be on the * same file system as the current process root. The put_old must be * underneath new_root, i.e. adding a non-zero number of /.. to the string * pointed to by put_old must yield the same directory as new_root. No other * file system may be mounted on put_old. After all, new_root is a mountpoint. * * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives * in this situation. * * Notes: * - we don't move root/cwd if they are not at the root (reason: if something * cared enough to change them, it's probably wrong to force them elsewhere) * - it's okay to pick a root that isn't the root of a file system, e.g. * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root * first. */ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, const char __user *, put_old) { struct path new, old, parent_path, root_parent, root; struct mount *new_mnt, *root_mnt, *old_mnt; struct mountpoint *old_mp, *root_mp; int error; if (!may_mount()) return -EPERM; error = user_path_dir(new_root, &new); if (error) goto out0; error = user_path_dir(put_old, &old); if (error) goto out1; error = security_sb_pivotroot(&old, &new); if (error) goto out2; get_fs_root(current->fs, &root); old_mp = lock_mount(&old); error = PTR_ERR(old_mp); if (IS_ERR(old_mp)) goto out3; error = -EINVAL; new_mnt = real_mount(new.mnt); root_mnt = real_mount(root.mnt); old_mnt = real_mount(old.mnt); if (IS_MNT_SHARED(old_mnt) || IS_MNT_SHARED(new_mnt->mnt_parent) || IS_MNT_SHARED(root_mnt->mnt_parent)) goto out4; if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) goto out4; if (new_mnt->mnt.mnt_flags & MNT_LOCKED) goto out4; error = -ENOENT; if (d_unlinked(new.dentry)) goto out4; error = -EBUSY; if (new_mnt == root_mnt || old_mnt == root_mnt) goto out4; /* loop, on the same file system */ error = -EINVAL; if (root.mnt->mnt_root != root.dentry) goto out4; /* not a mountpoint */ if (!mnt_has_parent(root_mnt)) goto out4; /* not attached */ root_mp = root_mnt->mnt_mp; if (new.mnt->mnt_root != new.dentry) goto out4; /* not a mountpoint */ if (!mnt_has_parent(new_mnt)) goto out4; /* not attached */ /* make sure we can reach put_old from new_root */ if (!is_path_reachable(old_mnt, old.dentry, &new)) goto out4; root_mp->m_count++; /* pin it so it won't go away */ lock_mount_hash(); detach_mnt(new_mnt, &parent_path); detach_mnt(root_mnt, &root_parent); if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { new_mnt->mnt.mnt_flags |= MNT_LOCKED; root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; } /* mount old root on put_old */ attach_mnt(root_mnt, old_mnt, old_mp); /* mount new_root on / */ attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp); touch_mnt_namespace(current->nsproxy->mnt_ns); unlock_mount_hash(); chroot_fs_refs(&root, &new); put_mountpoint(root_mp); error = 0; out4: unlock_mount(old_mp); if (!error) { path_put(&root_parent); path_put(&parent_path); } out3: path_put(&root); out2: path_put(&old); out1: path_put(&new); out0: return error; } static void __init init_mount_tree(void) { struct vfsmount *mnt; struct mnt_namespace *ns; struct path root; struct file_system_type *type; type = get_fs_type("rootfs"); if (!type) panic("Can't find rootfs type"); mnt = vfs_kern_mount(type, 0, "rootfs", NULL); put_filesystem(type); if (IS_ERR(mnt)) panic("Can't create rootfs"); ns = create_mnt_ns(mnt); if (IS_ERR(ns)) panic("Can't allocate initial namespace"); init_task.nsproxy->mnt_ns = ns; get_mnt_ns(ns); root.mnt = mnt; root.dentry = mnt->mnt_root; set_fs_pwd(current->fs, &root); set_fs_root(current->fs, &root); } void __init mnt_init(void) { unsigned u; int err; mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); mount_hashtable = alloc_large_system_hash("Mount-cache", sizeof(struct hlist_head), mhash_entries, 19, 0, &m_hash_shift, &m_hash_mask, 0, 0); mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", sizeof(struct hlist_head), mphash_entries, 19, 0, &mp_hash_shift, &mp_hash_mask, 0, 0); if (!mount_hashtable || !mountpoint_hashtable) panic("Failed to allocate mount hash table\n"); for (u = 0; u <= m_hash_mask; u++) INIT_HLIST_HEAD(&mount_hashtable[u]); for (u = 0; u <= mp_hash_mask; u++) INIT_HLIST_HEAD(&mountpoint_hashtable[u]); kernfs_init(); err = sysfs_init(); if (err) printk(KERN_WARNING "%s: sysfs_init error: %d\n", __func__, err); fs_kobj = kobject_create_and_add("fs", NULL); if (!fs_kobj) printk(KERN_WARNING "%s: kobj create error\n", __func__); init_rootfs(); init_mount_tree(); } void put_mnt_ns(struct mnt_namespace *ns) { if (!atomic_dec_and_test(&ns->count)) return; drop_collected_mounts(&ns->root->mnt); free_mnt_ns(ns); } struct vfsmount *kern_mount_data(struct file_system_type *type, void *data) { struct vfsmount *mnt; mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data); if (!IS_ERR(mnt)) { /* * it is a longterm mount, don't release mnt until * we unmount before file sys is unregistered */ real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; } return mnt; } EXPORT_SYMBOL_GPL(kern_mount_data); void kern_unmount(struct vfsmount *mnt) { /* release long term mount so mount point can be released */ if (!IS_ERR_OR_NULL(mnt)) { real_mount(mnt)->mnt_ns = NULL; synchronize_rcu(); /* yecchhh... */ mntput(mnt); } } EXPORT_SYMBOL(kern_unmount); bool our_mnt(struct vfsmount *mnt) { return check_mnt(real_mount(mnt)); } bool current_chrooted(void) { /* Does the current process have a non-standard root */ struct path ns_root; struct path fs_root; bool chrooted; /* Find the namespace root */ ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt; ns_root.dentry = ns_root.mnt->mnt_root; path_get(&ns_root); while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) ; get_fs_root(current->fs, &fs_root); chrooted = !path_equal(&fs_root, &ns_root); path_put(&fs_root); path_put(&ns_root); return chrooted; } bool fs_fully_visible(struct file_system_type *type) { struct mnt_namespace *ns = current->nsproxy->mnt_ns; struct mount *mnt; bool visible = false; if (unlikely(!ns)) return false; down_read(&namespace_sem); list_for_each_entry(mnt, &ns->list, mnt_list) { struct mount *child; if (mnt->mnt.mnt_sb->s_type != type) continue; /* This mount is not fully visible if there are any child mounts * that cover anything except for empty directories. */ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { struct inode *inode = child->mnt_mountpoint->d_inode; if (!S_ISDIR(inode->i_mode)) goto next; if (inode->i_nlink > 2) goto next; } visible = true; goto found; next: ; } found: up_read(&namespace_sem); return visible; } static void *mntns_get(struct task_struct *task) { struct mnt_namespace *ns = NULL; struct nsproxy *nsproxy; task_lock(task); nsproxy = task->nsproxy; if (nsproxy) { ns = nsproxy->mnt_ns; get_mnt_ns(ns); } task_unlock(task); return ns; } static void mntns_put(void *ns) { put_mnt_ns(ns); } static int mntns_install(struct nsproxy *nsproxy, void *ns) { struct fs_struct *fs = current->fs; struct mnt_namespace *mnt_ns = ns; struct path root; if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || !ns_capable(current_user_ns(), CAP_SYS_CHROOT) || !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; if (fs->users != 1) return -EINVAL; get_mnt_ns(mnt_ns); put_mnt_ns(nsproxy->mnt_ns); nsproxy->mnt_ns = mnt_ns; /* Find the root */ root.mnt = &mnt_ns->root->mnt; root.dentry = mnt_ns->root->mnt.mnt_root; path_get(&root); while(d_mountpoint(root.dentry) && follow_down_one(&root)) ; /* Update the pwd and root */ set_fs_pwd(fs, &root); set_fs_root(fs, &root); path_put(&root); return 0; } static unsigned int mntns_inum(void *ns) { struct mnt_namespace *mnt_ns = ns; return mnt_ns->proc_inum; } const struct proc_ns_operations mntns_operations = { .name = "mnt", .type = CLONE_NEWNS, .get = mntns_get, .put = mntns_put, .install = mntns_install, .inum = mntns_inum, };
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2243_0
crossvul-cpp_data_bad_3439_4
/* * debugfs.c - ACPI debugfs interface to userspace. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/uaccess.h> #include <linux/debugfs.h> #include <acpi/acpi_drivers.h> #define _COMPONENT ACPI_SYSTEM_COMPONENT ACPI_MODULE_NAME("debugfs"); struct dentry *acpi_debugfs_dir; static struct dentry *cm_dentry; /* /sys/kernel/debug/acpi/custom_method */ static ssize_t cm_write(struct file *file, const char __user * user_buf, size_t count, loff_t *ppos) { static char *buf; static u32 max_size; static u32 uncopied_bytes; struct acpi_table_header table; acpi_status status; if (!(*ppos)) { /* parse the table header to get the table length */ if (count <= sizeof(struct acpi_table_header)) return -EINVAL; if (copy_from_user(&table, user_buf, sizeof(struct acpi_table_header))) return -EFAULT; uncopied_bytes = max_size = table.length; buf = kzalloc(max_size, GFP_KERNEL); if (!buf) return -ENOMEM; } if (buf == NULL) return -EINVAL; if ((*ppos > max_size) || (*ppos + count > max_size) || (*ppos + count < count) || (count > uncopied_bytes)) return -EINVAL; if (copy_from_user(buf + (*ppos), user_buf, count)) { kfree(buf); buf = NULL; return -EFAULT; } uncopied_bytes -= count; *ppos += count; if (!uncopied_bytes) { status = acpi_install_method(buf); kfree(buf); buf = NULL; if (ACPI_FAILURE(status)) return -EINVAL; add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); } return count; } static const struct file_operations cm_fops = { .write = cm_write, .llseek = default_llseek, }; static int __init acpi_custom_method_init(void) { if (!acpi_debugfs_dir) return -ENOENT; cm_dentry = debugfs_create_file("custom_method", S_IWUSR, acpi_debugfs_dir, NULL, &cm_fops); if (!cm_dentry) return -ENODEV; return 0; } void __init acpi_debugfs_init(void) { acpi_debugfs_dir = debugfs_create_dir("acpi", NULL); acpi_custom_method_init(); }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3439_4
crossvul-cpp_data_bad_5861_14
/* Glue code for CAMELLIA encryption optimized for sparc64 crypto opcodes. * * Copyright (C) 2012 David S. Miller <davem@davemloft.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/algapi.h> #include <asm/fpumacro.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" #define CAMELLIA_MIN_KEY_SIZE 16 #define CAMELLIA_MAX_KEY_SIZE 32 #define CAMELLIA_BLOCK_SIZE 16 #define CAMELLIA_TABLE_BYTE_LEN 272 struct camellia_sparc64_ctx { u64 encrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)]; u64 decrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)]; int key_len; }; extern void camellia_sparc64_key_expand(const u32 *in_key, u64 *encrypt_key, unsigned int key_len, u64 *decrypt_key); static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key, unsigned int key_len) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); const u32 *in_key = (const u32 *) _in_key; u32 *flags = &tfm->crt_flags; if (key_len != 16 && key_len != 24 && key_len != 32) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } ctx->key_len = key_len; camellia_sparc64_key_expand(in_key, &ctx->encrypt_key[0], key_len, &ctx->decrypt_key[0]); return 0; } extern void camellia_sparc64_crypt(const u64 *key, const u32 *input, u32 *output, unsigned int key_len); static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); camellia_sparc64_crypt(&ctx->encrypt_key[0], (const u32 *) src, (u32 *) dst, ctx->key_len); } static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); camellia_sparc64_crypt(&ctx->decrypt_key[0], (const u32 *) src, (u32 *) dst, ctx->key_len); } extern void camellia_sparc64_load_keys(const u64 *key, unsigned int key_len); typedef void ecb_crypt_op(const u64 *input, u64 *output, unsigned int len, const u64 *key); extern ecb_crypt_op camellia_sparc64_ecb_crypt_3_grand_rounds; extern ecb_crypt_op camellia_sparc64_ecb_crypt_4_grand_rounds; #define CAMELLIA_BLOCK_MASK (~(CAMELLIA_BLOCK_SIZE - 1)) static int __ecb_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, bool encrypt) { struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; ecb_crypt_op *op; const u64 *key; int err; op = camellia_sparc64_ecb_crypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_ecb_crypt_4_grand_rounds; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; if (encrypt) key = &ctx->encrypt_key[0]; else key = &ctx->decrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); while ((nbytes = walk.nbytes)) { unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK; if (likely(block_len)) { const u64 *src64; u64 *dst64; src64 = (const u64 *)walk.src.virt.addr; dst64 = (u64 *) walk.dst.virt.addr; op(src64, dst64, block_len, key); } nbytes &= CAMELLIA_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } fprs_write(0); return err; } static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return __ecb_crypt(desc, dst, src, nbytes, true); } static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return __ecb_crypt(desc, dst, src, nbytes, false); } typedef void cbc_crypt_op(const u64 *input, u64 *output, unsigned int len, const u64 *key, u64 *iv); extern cbc_crypt_op camellia_sparc64_cbc_encrypt_3_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_encrypt_4_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_decrypt_3_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_decrypt_4_grand_rounds; static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; cbc_crypt_op *op; const u64 *key; int err; op = camellia_sparc64_cbc_encrypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_cbc_encrypt_4_grand_rounds; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; key = &ctx->encrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); while ((nbytes = walk.nbytes)) { unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK; if (likely(block_len)) { const u64 *src64; u64 *dst64; src64 = (const u64 *)walk.src.virt.addr; dst64 = (u64 *) walk.dst.virt.addr; op(src64, dst64, block_len, key, (u64 *) walk.iv); } nbytes &= CAMELLIA_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } fprs_write(0); return err; } static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; cbc_crypt_op *op; const u64 *key; int err; op = camellia_sparc64_cbc_decrypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_cbc_decrypt_4_grand_rounds; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; key = &ctx->decrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); while ((nbytes = walk.nbytes)) { unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK; if (likely(block_len)) { const u64 *src64; u64 *dst64; src64 = (const u64 *)walk.src.virt.addr; dst64 = (u64 *) walk.dst.virt.addr; op(src64, dst64, block_len, key, (u64 *) walk.iv); } nbytes &= CAMELLIA_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } fprs_write(0); return err; } static struct crypto_alg algs[] = { { .cra_name = "camellia", .cra_driver_name = "camellia-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_sparc64_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = CAMELLIA_MIN_KEY_SIZE, .cia_max_keysize = CAMELLIA_MAX_KEY_SIZE, .cia_setkey = camellia_set_key, .cia_encrypt = camellia_encrypt, .cia_decrypt = camellia_decrypt } } }, { .cra_name = "ecb(camellia)", .cra_driver_name = "ecb-camellia-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_sparc64_ctx), .cra_alignmask = 7, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, .setkey = camellia_set_key, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, }, }, { .cra_name = "cbc(camellia)", .cra_driver_name = "cbc-camellia-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_sparc64_ctx), .cra_alignmask = 7, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, .setkey = camellia_set_key, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }, } }; static bool __init sparc64_has_camellia_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_CAMELLIA)) return false; return true; } static int __init camellia_sparc64_mod_init(void) { int i; for (i = 0; i < ARRAY_SIZE(algs); i++) INIT_LIST_HEAD(&algs[i].cra_list); if (sparc64_has_camellia_opcode()) { pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n"); return crypto_register_algs(algs, ARRAY_SIZE(algs)); } pr_info("sparc64 camellia opcodes not available.\n"); return -ENODEV; } static void __exit camellia_sparc64_mod_fini(void) { crypto_unregister_algs(algs, ARRAY_SIZE(algs)); } module_init(camellia_sparc64_mod_init); module_exit(camellia_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); MODULE_ALIAS("aes"); #include "crop_devid.c"
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5861_14
crossvul-cpp_data_bad_2399_5
/* * CCM: Counter with CBC-MAC * * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include "internal.h" struct ccm_instance_ctx { struct crypto_skcipher_spawn ctr; struct crypto_spawn cipher; }; struct crypto_ccm_ctx { struct crypto_cipher *cipher; struct crypto_ablkcipher *ctr; }; struct crypto_rfc4309_ctx { struct crypto_aead *child; u8 nonce[3]; }; struct crypto_ccm_req_priv_ctx { u8 odata[16]; u8 idata[16]; u8 auth_tag[16]; u32 ilen; u32 flags; struct scatterlist src[2]; struct scatterlist dst[2]; struct ablkcipher_request abreq; }; static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( struct aead_request *req) { unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); } static int set_msg_len(u8 *block, unsigned int msglen, int csize) { __be32 data; memset(block, 0, csize); block += csize; if (csize >= 4) csize = 4; else if (msglen > (1 << (8 * csize))) return -EOVERFLOW; data = cpu_to_be32(msglen); memcpy(block - csize, (u8 *)&data + 4 - csize, csize); return 0; } static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ablkcipher *ctr = ctx->ctr; struct crypto_cipher *tfm = ctx->cipher; int err = 0; crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_ablkcipher_setkey(ctr, key, keylen); crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & CRYPTO_TFM_RES_MASK); if (err) goto out; crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(tfm, key, keylen); crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) & CRYPTO_TFM_RES_MASK); out: return err; } static int crypto_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 4: case 6: case 8: case 10: case 12: case 14: case 16: break; default: return -EINVAL; } return 0; } static int format_input(u8 *info, struct aead_request *req, unsigned int cryptlen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); unsigned int lp = req->iv[0]; unsigned int l = lp + 1; unsigned int m; m = crypto_aead_authsize(aead); memcpy(info, req->iv, 16); /* format control info per RFC 3610 and * NIST Special Publication 800-38C */ *info |= (8 * ((m - 2) / 2)); if (req->assoclen) *info |= 64; return set_msg_len(info + 16 - l, cryptlen, l); } static int format_adata(u8 *adata, unsigned int a) { int len = 0; /* add control info for associated data * RFC 3610 and NIST Special Publication 800-38C */ if (a < 65280) { *(__be16 *)adata = cpu_to_be16(a); len = 2; } else { *(__be16 *)adata = cpu_to_be16(0xfffe); *(__be32 *)&adata[2] = cpu_to_be32(a); len = 6; } return len; } static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n, struct crypto_ccm_req_priv_ctx *pctx) { unsigned int bs = 16; u8 *odata = pctx->odata; u8 *idata = pctx->idata; int datalen, getlen; datalen = n; /* first time in here, block may be partially filled. */ getlen = bs - pctx->ilen; if (datalen >= getlen) { memcpy(idata + pctx->ilen, data, getlen); crypto_xor(odata, idata, bs); crypto_cipher_encrypt_one(tfm, odata, odata); datalen -= getlen; data += getlen; pctx->ilen = 0; } /* now encrypt rest of data */ while (datalen >= bs) { crypto_xor(odata, data, bs); crypto_cipher_encrypt_one(tfm, odata, odata); datalen -= bs; data += bs; } /* check and see if there's leftover data that wasn't * enough to fill a block. */ if (datalen) { memcpy(idata + pctx->ilen, data, datalen); pctx->ilen += datalen; } } static void get_data_to_compute(struct crypto_cipher *tfm, struct crypto_ccm_req_priv_ctx *pctx, struct scatterlist *sg, unsigned int len) { struct scatter_walk walk; u8 *data_src; int n; scatterwalk_start(&walk, sg); while (len) { n = scatterwalk_clamp(&walk, len); if (!n) { scatterwalk_start(&walk, sg_next(walk.sg)); n = scatterwalk_clamp(&walk, len); } data_src = scatterwalk_map(&walk); compute_mac(tfm, data_src, n, pctx); len -= n; scatterwalk_unmap(data_src); scatterwalk_advance(&walk, n); scatterwalk_done(&walk, 0, len); if (len) crypto_yield(pctx->flags); } /* any leftover needs padding and then encrypted */ if (pctx->ilen) { int padlen; u8 *odata = pctx->odata; u8 *idata = pctx->idata; padlen = 16 - pctx->ilen; memset(idata + pctx->ilen, 0, padlen); crypto_xor(odata, idata, 16); crypto_cipher_encrypt_one(tfm, odata, odata); pctx->ilen = 0; } } static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, unsigned int cryptlen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct crypto_cipher *cipher = ctx->cipher; unsigned int assoclen = req->assoclen; u8 *odata = pctx->odata; u8 *idata = pctx->idata; int err; /* format control data for input */ err = format_input(odata, req, cryptlen); if (err) goto out; /* encrypt first block to use as start in computing mac */ crypto_cipher_encrypt_one(cipher, odata, odata); /* format associated data and compute into mac */ if (assoclen) { pctx->ilen = format_adata(idata, assoclen); get_data_to_compute(cipher, pctx, req->assoc, req->assoclen); } else { pctx->ilen = 0; } /* compute plaintext into mac */ if (cryptlen) get_data_to_compute(cipher, pctx, plain, cryptlen); out: return err; } static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); u8 *odata = pctx->odata; if (!err) scatterwalk_map_and_copy(odata, req->dst, req->cryptlen, crypto_aead_authsize(aead), 1); aead_request_complete(req, err); } static inline int crypto_ccm_check_iv(const u8 *iv) { /* 2 <= L <= 8, so 1 <= L' <= 7. */ if (1 > iv[0] || iv[0] > 7) return -EINVAL; return 0; } static int crypto_ccm_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; struct scatterlist *dst; unsigned int cryptlen = req->cryptlen; u8 *odata = pctx->odata; u8 *iv = req->iv; int err; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); err = crypto_ccm_auth(req, req->src, cryptlen); if (err) return err; /* Note: rfc 3610 and NIST 800-38C require counter of * zero to encrypt auth tag. */ memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, odata, 16); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, odata, 16); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(abreq, ctx->ctr); ablkcipher_request_set_callback(abreq, pctx->flags, crypto_ccm_encrypt_done, req); ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); err = crypto_ablkcipher_encrypt(abreq); if (err) return err; /* copy authtag to end of dst */ scatterwalk_map_and_copy(odata, req->dst, cryptlen, crypto_aead_authsize(aead), 1); return err; } static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen - authsize; if (!err) { err = crypto_ccm_auth(req, req->dst, cryptlen); if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) err = -EBADMSG; } aead_request_complete(req, err); } static int crypto_ccm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; struct scatterlist *dst; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen; u8 *authtag = pctx->auth_tag; u8 *odata = pctx->odata; u8 *iv = req->iv; int err; if (cryptlen < authsize) return -EINVAL; cryptlen -= authsize; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, authtag, 16); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, authtag, 16); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(abreq, ctx->ctr); ablkcipher_request_set_callback(abreq, pctx->flags, crypto_ccm_decrypt_done, req); ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); err = crypto_ablkcipher_decrypt(abreq); if (err) return err; err = crypto_ccm_auth(req, req->dst, cryptlen); if (err) return err; /* verify */ if (crypto_memneq(authtag, odata, authsize)) return -EBADMSG; return err; } static int crypto_ccm_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst); struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_cipher *cipher; struct crypto_ablkcipher *ctr; unsigned long align; int err; cipher = crypto_spawn_cipher(&ictx->cipher); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_cipher; ctx->cipher = cipher; ctx->ctr = ctr; align = crypto_tfm_alg_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_aead.reqsize = align + sizeof(struct crypto_ccm_req_priv_ctx) + crypto_ablkcipher_reqsize(ctr); return 0; err_free_cipher: crypto_free_cipher(cipher); return err; } static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm) { struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_cipher(ctx->cipher); crypto_free_ablkcipher(ctx->ctr); } static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, const char *full_name, const char *ctr_name, const char *cipher_name) { struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *ctr; struct crypto_alg *cipher; struct ccm_instance_ctx *ictx; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(cipher)) return ERR_CAST(cipher); err = -EINVAL; if (cipher->cra_blocksize != 16) goto out_put_cipher; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); err = -ENOMEM; if (!inst) goto out_put_cipher; ictx = crypto_instance_ctx(inst); err = crypto_init_spawn(&ictx->cipher, cipher, inst, CRYPTO_ALG_TYPE_MASK); if (err) goto err_free_inst; crypto_set_skcipher_spawn(&ictx->ctr, inst); err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, crypto_requires_sync(algt->type, algt->mask)); if (err) goto err_drop_cipher; ctr = crypto_skcipher_spawn_alg(&ictx->ctr); /* Not a stream cipher? */ err = -EINVAL; if (ctr->cra_blocksize != 1) goto err_drop_ctr; /* We want the real thing! */ if (ctr->cra_ablkcipher.ivsize != 16) goto err_drop_ctr; err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", ctr->cra_driver_name, cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_ctr; memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority; inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask | (__alignof__(u32) - 1); inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = 16; inst->alg.cra_aead.maxauthsize = 16; inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx); inst->alg.cra_init = crypto_ccm_init_tfm; inst->alg.cra_exit = crypto_ccm_exit_tfm; inst->alg.cra_aead.setkey = crypto_ccm_setkey; inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize; inst->alg.cra_aead.encrypt = crypto_ccm_encrypt; inst->alg.cra_aead.decrypt = crypto_ccm_decrypt; out: crypto_mod_put(cipher); return inst; err_drop_ctr: crypto_drop_skcipher(&ictx->ctr); err_drop_cipher: crypto_drop_spawn(&ictx->cipher); err_free_inst: kfree(inst); out_put_cipher: inst = ERR_PTR(err); goto out; } static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb) { const char *cipher_name; char ctr_name[CRYPTO_MAX_ALG_NAME]; char full_name[CRYPTO_MAX_ALG_NAME]; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) return ERR_CAST(cipher_name); if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) return ERR_PTR(-ENAMETOOLONG); if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) return ERR_PTR(-ENAMETOOLONG); return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); } static void crypto_ccm_free(struct crypto_instance *inst) { struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst); crypto_drop_spawn(&ctx->cipher); crypto_drop_skcipher(&ctx->ctr); kfree(inst); } static struct crypto_template crypto_ccm_tmpl = { .name = "ccm", .alloc = crypto_ccm_alloc, .free = crypto_ccm_free, .module = THIS_MODULE, }; static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb) { const char *ctr_name; const char *cipher_name; char full_name[CRYPTO_MAX_ALG_NAME]; ctr_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ctr_name)) return ERR_CAST(ctr_name); cipher_name = crypto_attr_alg_name(tb[2]); if (IS_ERR(cipher_name)) return ERR_CAST(cipher_name); if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME) return ERR_PTR(-ENAMETOOLONG); return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); } static struct crypto_template crypto_ccm_base_tmpl = { .name = "ccm_base", .alloc = crypto_ccm_base_alloc, .free = crypto_ccm_free, .module = THIS_MODULE, }; static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); struct crypto_aead *child = ctx->child; int err; if (keylen < 3) return -EINVAL; keylen -= 3; memcpy(ctx->nonce, key + keylen, 3); crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(child, key, keylen); crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; } static int crypto_rfc4309_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return crypto_aead_setauthsize(ctx->child, authsize); } static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) { struct aead_request *subreq = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead); struct crypto_aead *child = ctx->child; u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), crypto_aead_alignmask(child) + 1); /* L' */ iv[0] = 3; memcpy(iv + 1, ctx->nonce, 3); memcpy(iv + 4, req->iv, 8); aead_request_set_tfm(subreq, child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); aead_request_set_assoc(subreq, req->assoc, req->assoclen); return subreq; } static int crypto_rfc4309_encrypt(struct aead_request *req) { req = crypto_rfc4309_crypt(req); return crypto_aead_encrypt(req); } static int crypto_rfc4309_decrypt(struct aead_request *req) { req = crypto_rfc4309_crypt(req); return crypto_aead_decrypt(req); } static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_aead *aead; unsigned long align; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); ctx->child = aead; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_aead.reqsize = sizeof(struct aead_request) + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + align + 16; return 0; } static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm) { struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_aead(ctx->child); } static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb) { struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_aead_spawn *spawn; struct crypto_alg *alg; const char *ccm_name; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); ccm_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ccm_name)) return ERR_CAST(ccm_name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return ERR_PTR(-ENOMEM); spawn = crypto_instance_ctx(inst); crypto_set_aead_spawn(spawn, inst); err = crypto_grab_aead(spawn, ccm_name, 0, crypto_requires_sync(algt->type, algt->mask)); if (err) goto out_free_inst; alg = crypto_aead_spawn_alg(spawn); err = -EINVAL; /* We only support 16-byte blocks. */ if (alg->cra_aead.ivsize != 16) goto out_drop_alg; /* Not a stream cipher? */ if (alg->cra_blocksize != 1) goto out_drop_alg; err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc4309(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_drop_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_nivaead_type; inst->alg.cra_aead.ivsize = 8; inst->alg.cra_aead.maxauthsize = 16; inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); inst->alg.cra_init = crypto_rfc4309_init_tfm; inst->alg.cra_exit = crypto_rfc4309_exit_tfm; inst->alg.cra_aead.setkey = crypto_rfc4309_setkey; inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize; inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt; inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt; inst->alg.cra_aead.geniv = "seqiv"; out: return inst; out_drop_alg: crypto_drop_aead(spawn); out_free_inst: kfree(inst); inst = ERR_PTR(err); goto out; } static void crypto_rfc4309_free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); } static struct crypto_template crypto_rfc4309_tmpl = { .name = "rfc4309", .alloc = crypto_rfc4309_alloc, .free = crypto_rfc4309_free, .module = THIS_MODULE, }; static int __init crypto_ccm_module_init(void) { int err; err = crypto_register_template(&crypto_ccm_base_tmpl); if (err) goto out; err = crypto_register_template(&crypto_ccm_tmpl); if (err) goto out_undo_base; err = crypto_register_template(&crypto_rfc4309_tmpl); if (err) goto out_undo_ccm; out: return err; out_undo_ccm: crypto_unregister_template(&crypto_ccm_tmpl); out_undo_base: crypto_unregister_template(&crypto_ccm_base_tmpl); goto out; } static void __exit crypto_ccm_module_exit(void) { crypto_unregister_template(&crypto_rfc4309_tmpl); crypto_unregister_template(&crypto_ccm_tmpl); crypto_unregister_template(&crypto_ccm_base_tmpl); } module_init(crypto_ccm_module_init); module_exit(crypto_ccm_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Counter with CBC MAC"); MODULE_ALIAS_CRYPTO("ccm_base"); MODULE_ALIAS_CRYPTO("rfc4309");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2399_5
crossvul-cpp_data_bad_5054_1
/* * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/file.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/idr.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/module.h> #include <linux/nsproxy.h> #include <rdma/rdma_user_cm.h> #include <rdma/ib_marshall.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_cm_ib.h> #include <rdma/ib_addr.h> #include <rdma/ib.h> MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); MODULE_LICENSE("Dual BSD/GPL"); static unsigned int max_backlog = 1024; static struct ctl_table_header *ucma_ctl_table_hdr; static struct ctl_table ucma_ctl_table[] = { { .procname = "max_backlog", .data = &max_backlog, .maxlen = sizeof max_backlog, .mode = 0644, .proc_handler = proc_dointvec, }, { } }; struct ucma_file { struct mutex mut; struct file *filp; struct list_head ctx_list; struct list_head event_list; wait_queue_head_t poll_wait; struct workqueue_struct *close_wq; }; struct ucma_context { int id; struct completion comp; atomic_t ref; int events_reported; int backlog; struct ucma_file *file; struct rdma_cm_id *cm_id; u64 uid; struct list_head list; struct list_head mc_list; /* mark that device is in process of destroying the internal HW * resources, protected by the global mut */ int closing; /* sync between removal event and id destroy, protected by file mut */ int destroying; struct work_struct close_work; }; struct ucma_multicast { struct ucma_context *ctx; int id; int events_reported; u64 uid; struct list_head list; struct sockaddr_storage addr; }; struct ucma_event { struct ucma_context *ctx; struct ucma_multicast *mc; struct list_head list; struct rdma_cm_id *cm_id; struct rdma_ucm_event_resp resp; struct work_struct close_work; }; static DEFINE_MUTEX(mut); static DEFINE_IDR(ctx_idr); static DEFINE_IDR(multicast_idr); static inline struct ucma_context *_ucma_find_context(int id, struct ucma_file *file) { struct ucma_context *ctx; ctx = idr_find(&ctx_idr, id); if (!ctx) ctx = ERR_PTR(-ENOENT); else if (ctx->file != file) ctx = ERR_PTR(-EINVAL); return ctx; } static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) { struct ucma_context *ctx; mutex_lock(&mut); ctx = _ucma_find_context(id, file); if (!IS_ERR(ctx)) { if (ctx->closing) ctx = ERR_PTR(-EIO); else atomic_inc(&ctx->ref); } mutex_unlock(&mut); return ctx; } static void ucma_put_ctx(struct ucma_context *ctx) { if (atomic_dec_and_test(&ctx->ref)) complete(&ctx->comp); } static void ucma_close_event_id(struct work_struct *work) { struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); rdma_destroy_id(uevent_close->cm_id); kfree(uevent_close); } static void ucma_close_id(struct work_struct *work) { struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); /* once all inflight tasks are finished, we close all underlying * resources. The context is still alive till its explicit destryoing * by its creator. */ ucma_put_ctx(ctx); wait_for_completion(&ctx->comp); /* No new events will be generated after destroying the id. */ rdma_destroy_id(ctx->cm_id); } static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) { struct ucma_context *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; INIT_WORK(&ctx->close_work, ucma_close_id); atomic_set(&ctx->ref, 1); init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); ctx->file = file; mutex_lock(&mut); ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); mutex_unlock(&mut); if (ctx->id < 0) goto error; list_add_tail(&ctx->list, &file->ctx_list); return ctx; error: kfree(ctx); return NULL; } static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) { struct ucma_multicast *mc; mc = kzalloc(sizeof(*mc), GFP_KERNEL); if (!mc) return NULL; mutex_lock(&mut); mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); mutex_unlock(&mut); if (mc->id < 0) goto error; mc->ctx = ctx; list_add_tail(&mc->list, &ctx->mc_list); return mc; error: kfree(mc); return NULL; } static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, struct rdma_conn_param *src) { if (src->private_data_len) memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; dst->responder_resources =src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num; } static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst, struct rdma_ud_param *src) { if (src->private_data_len) memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr); dst->qp_num = src->qp_num; dst->qkey = src->qkey; } static void ucma_set_event_context(struct ucma_context *ctx, struct rdma_cm_event *event, struct ucma_event *uevent) { uevent->ctx = ctx; switch (event->event) { case RDMA_CM_EVENT_MULTICAST_JOIN: case RDMA_CM_EVENT_MULTICAST_ERROR: uevent->mc = (struct ucma_multicast *) event->param.ud.private_data; uevent->resp.uid = uevent->mc->uid; uevent->resp.id = uevent->mc->id; break; default: uevent->resp.uid = ctx->uid; uevent->resp.id = ctx->id; break; } } /* Called with file->mut locked for the relevant context. */ static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) { struct ucma_context *ctx = cm_id->context; struct ucma_event *con_req_eve; int event_found = 0; if (ctx->destroying) return; /* only if context is pointing to cm_id that it owns it and can be * queued to be closed, otherwise that cm_id is an inflight one that * is part of that context event list pending to be detached and * reattached to its new context as part of ucma_get_event, * handled separately below. */ if (ctx->cm_id == cm_id) { mutex_lock(&mut); ctx->closing = 1; mutex_unlock(&mut); queue_work(ctx->file->close_wq, &ctx->close_work); return; } list_for_each_entry(con_req_eve, &ctx->file->event_list, list) { if (con_req_eve->cm_id == cm_id && con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { list_del(&con_req_eve->list); INIT_WORK(&con_req_eve->close_work, ucma_close_event_id); queue_work(ctx->file->close_wq, &con_req_eve->close_work); event_found = 1; break; } } if (!event_found) pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n"); } static int ucma_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct ucma_event *uevent; struct ucma_context *ctx = cm_id->context; int ret = 0; uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); if (!uevent) return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; mutex_lock(&ctx->file->mut); uevent->cm_id = cm_id; ucma_set_event_context(ctx, event, uevent); uevent->resp.event = event->event; uevent->resp.status = event->status; if (cm_id->qp_type == IB_QPT_UD) ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); else ucma_copy_conn_event(&uevent->resp.param.conn, &event->param.conn); if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { if (!ctx->backlog) { ret = -ENOMEM; kfree(uevent); goto out; } ctx->backlog--; } else if (!ctx->uid || ctx->cm_id != cm_id) { /* * We ignore events for new connections until userspace has set * their context. This can only happen if an error occurs on a * new connection before the user accepts it. This is okay, * since the accept will just fail later. However, we do need * to release the underlying HW resources in case of a device * removal event. */ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) ucma_removal_event_handler(cm_id); kfree(uevent); goto out; } list_add_tail(&uevent->list, &ctx->file->event_list); wake_up_interruptible(&ctx->file->poll_wait); if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) ucma_removal_event_handler(cm_id); out: mutex_unlock(&ctx->file->mut); return ret; } static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct ucma_context *ctx; struct rdma_ucm_get_event cmd; struct ucma_event *uevent; int ret = 0; if (out_len < sizeof uevent->resp) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&file->mut); while (list_empty(&file->event_list)) { mutex_unlock(&file->mut); if (file->filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(file->poll_wait, !list_empty(&file->event_list))) return -ERESTARTSYS; mutex_lock(&file->mut); } uevent = list_entry(file->event_list.next, struct ucma_event, list); if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { ctx = ucma_alloc_ctx(file); if (!ctx) { ret = -ENOMEM; goto done; } uevent->ctx->backlog++; ctx->cm_id = uevent->cm_id; ctx->cm_id->context = ctx; uevent->resp.id = ctx->id; } if (copy_to_user((void __user *)(unsigned long)cmd.response, &uevent->resp, sizeof uevent->resp)) { ret = -EFAULT; goto done; } list_del(&uevent->list); uevent->ctx->events_reported++; if (uevent->mc) uevent->mc->events_reported++; kfree(uevent); done: mutex_unlock(&file->mut); return ret; } static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) { switch (cmd->ps) { case RDMA_PS_TCP: *qp_type = IB_QPT_RC; return 0; case RDMA_PS_UDP: case RDMA_PS_IPOIB: *qp_type = IB_QPT_UD; return 0; case RDMA_PS_IB: *qp_type = cmd->qp_type; return 0; default: return -EINVAL; } } static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; enum ib_qp_type qp_type; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ret = ucma_get_qp_type(&cmd, &qp_type); if (ret) return ret; mutex_lock(&file->mut); ctx = ucma_alloc_ctx(file); mutex_unlock(&file->mut); if (!ctx) return -ENOMEM; ctx->uid = cmd.uid; ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, ucma_event_handler, ctx, cmd.ps, qp_type); if (IS_ERR(ctx->cm_id)) { ret = PTR_ERR(ctx->cm_id); goto err1; } resp.id = ctx->id; if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) { ret = -EFAULT; goto err2; } return 0; err2: rdma_destroy_id(ctx->cm_id); err1: mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); kfree(ctx); return ret; } static void ucma_cleanup_multicast(struct ucma_context *ctx) { struct ucma_multicast *mc, *tmp; mutex_lock(&mut); list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { list_del(&mc->list); idr_remove(&multicast_idr, mc->id); kfree(mc); } mutex_unlock(&mut); } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) { struct ucma_event *uevent, *tmp; list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { if (uevent->mc != mc) continue; list_del(&uevent->list); kfree(uevent); } } /* * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At * this point, no new events will be reported from the hardware. However, we * still need to cleanup the UCMA context for this ID. Specifically, there * might be events that have not yet been consumed by the user space software. * These might include pending connect requests which we have not completed * processing. We cannot call rdma_destroy_id while holding the lock of the * context (file->mut), as it might cause a deadlock. We therefore extract all * relevant events from the context pending events list while holding the * mutex. After that we release them as needed. */ static int ucma_free_ctx(struct ucma_context *ctx) { int events_reported; struct ucma_event *uevent, *tmp; LIST_HEAD(list); ucma_cleanup_multicast(ctx); /* Cleanup events not yet reported to the user. */ mutex_lock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { if (uevent->ctx == ctx) list_move_tail(&uevent->list, &list); } list_del(&ctx->list); mutex_unlock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &list, list) { list_del(&uevent->list); if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) rdma_destroy_id(uevent->cm_id); kfree(uevent); } events_reported = ctx->events_reported; kfree(ctx); return events_reported; } static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_destroy_id cmd; struct rdma_ucm_destroy_id_resp resp; struct ucma_context *ctx; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&mut); ctx = _ucma_find_context(cmd.id, file); if (!IS_ERR(ctx)) idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->file->mut); ctx->destroying = 1; mutex_unlock(&ctx->file->mut); flush_workqueue(ctx->file->close_wq); /* At this point it's guaranteed that there is no inflight * closing task */ mutex_lock(&mut); if (!ctx->closing) { mutex_unlock(&mut); ucma_put_ctx(ctx); wait_for_completion(&ctx->comp); rdma_destroy_id(ctx->cm_id); } else { mutex_unlock(&mut); } resp.events_reported = ucma_free_ctx(ctx); if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind_ip cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind cmd; struct sockaddr *addr; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; addr = (struct sockaddr *) &cmd.addr; if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_bind_addr(ctx->cm_id, addr); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_ip cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_addr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_addr cmd; struct sockaddr *src, *dst; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; src = (struct sockaddr *) &cmd.src_addr; dst = (struct sockaddr *) &cmd.dst_addr; if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) || !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst))) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_route cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { struct rdma_dev_addr *dev_addr; resp->num_paths = route->num_paths; switch (route->num_paths) { case 0: dev_addr = &route->addr.dev_addr; rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); break; case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); /* fall through */ case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); break; default: break; } } static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { resp->num_paths = route->num_paths; switch (route->num_paths) { case 0: rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, (union ib_gid *)&resp->ib_route[0].dgid); rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, (union ib_gid *)&resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(0xffff); break; case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); /* fall through */ case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); break; default: break; } } static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { struct rdma_dev_addr *dev_addr; dev_addr = &route->addr.dev_addr; rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); } static ssize_t ucma_query_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_query cmd; struct rdma_ucm_query_route_resp resp; struct ucma_context *ctx; struct sockaddr *addr; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); if (!ctx->cm_id->device) goto out; resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; resp.port_num = ctx->cm_id->port_num; if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_ib_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iboe_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iw_route(&resp, &ctx->cm_id->route); out: if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) ret = -EFAULT; ucma_put_ctx(ctx); return ret; } static void ucma_query_device_addr(struct rdma_cm_id *cm_id, struct rdma_ucm_query_addr_resp *resp) { if (!cm_id->device) return; resp->node_guid = (__force __u64) cm_id->device->node_guid; resp->port_num = cm_id->port_num; resp->pkey = (__force __u16) cpu_to_be16( ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); } static ssize_t ucma_query_addr(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_addr_resp resp; struct sockaddr *addr; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; resp.src_size = rdma_addr_size(addr); memcpy(&resp.src_addr, addr, resp.src_size); addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; resp.dst_size = rdma_addr_size(addr); memcpy(&resp.dst_addr, addr, resp.dst_size); ucma_query_device_addr(ctx->cm_id, &resp); if (copy_to_user(response, &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_query_path(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_path_resp *resp; int i, ret = 0; if (out_len < sizeof(*resp)) return -ENOSPC; resp = kzalloc(out_len, GFP_KERNEL); if (!resp) return -ENOMEM; resp->num_paths = ctx->cm_id->route.num_paths; for (i = 0, out_len -= sizeof(*resp); i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); i++, out_len -= sizeof(struct ib_path_rec_data)) { resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL; ib_sa_pack_path(&ctx->cm_id->route.path_rec[i], &resp->path_data[i].path_rec); } if (copy_to_user(response, resp, sizeof(*resp) + (i * sizeof(struct ib_path_rec_data)))) ret = -EFAULT; kfree(resp); return ret; } static ssize_t ucma_query_gid(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_addr_resp resp; struct sockaddr_ib *addr; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; memset(&resp, 0, sizeof resp); ucma_query_device_addr(ctx->cm_id, &resp); addr = (struct sockaddr_ib *) &resp.src_addr; resp.src_size = sizeof(*addr); if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); } else { addr->sib_family = AF_IB; addr->sib_pkey = (__force __be16) resp.pkey; rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr, (union ib_gid *) &addr->sib_addr); addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) &ctx->cm_id->route.addr.src_addr); } addr = (struct sockaddr_ib *) &resp.dst_addr; resp.dst_size = sizeof(*addr); if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); } else { addr->sib_family = AF_IB; addr->sib_pkey = (__force __be16) resp.pkey; rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr, (union ib_gid *) &addr->sib_addr); addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr); } if (copy_to_user(response, &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_query(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_query cmd; struct ucma_context *ctx; void __user *response; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; response = (void __user *)(unsigned long) cmd.response; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); switch (cmd.option) { case RDMA_USER_CM_QUERY_ADDR: ret = ucma_query_addr(ctx, response, out_len); break; case RDMA_USER_CM_QUERY_PATH: ret = ucma_query_path(ctx, response, out_len); break; case RDMA_USER_CM_QUERY_GID: ret = ucma_query_gid(ctx, response, out_len); break; default: ret = -ENOSYS; break; } ucma_put_ctx(ctx); return ret; } static void ucma_copy_conn_param(struct rdma_cm_id *id, struct rdma_conn_param *dst, struct rdma_ucm_conn_param *src) { dst->private_data = src->private_data; dst->private_data_len = src->private_data_len; dst->responder_resources =src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num; dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; } static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_connect cmd; struct rdma_conn_param conn_param; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!cmd.conn_param.valid) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); ret = rdma_connect(ctx->cm_id, &conn_param); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_listen cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? cmd.backlog : max_backlog; ret = rdma_listen(ctx->cm_id, ctx->backlog); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_accept cmd; struct rdma_conn_param conn_param; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); if (cmd.conn_param.valid) { ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&file->mut); ret = rdma_accept(ctx->cm_id, &conn_param); if (!ret) ctx->uid = cmd.uid; mutex_unlock(&file->mut); } else ret = rdma_accept(ctx->cm_id, NULL); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_reject cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_disconnect cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_disconnect(ctx->cm_id); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_init_qp_attr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_init_qp_attr cmd; struct ib_uverbs_qp_attr resp; struct ucma_context *ctx; struct ib_qp_attr qp_attr; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); if (ret) goto out; ib_copy_qp_attr_to_user(&resp, &qp_attr); if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) ret = -EFAULT; out: ucma_put_ctx(ctx); return ret; } static int ucma_set_option_id(struct ucma_context *ctx, int optname, void *optval, size_t optlen) { int ret = 0; switch (optname) { case RDMA_OPTION_ID_TOS: if (optlen != sizeof(u8)) { ret = -EINVAL; break; } rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); break; case RDMA_OPTION_ID_REUSEADDR: if (optlen != sizeof(int)) { ret = -EINVAL; break; } ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); break; case RDMA_OPTION_ID_AFONLY: if (optlen != sizeof(int)) { ret = -EINVAL; break; } ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); break; default: ret = -ENOSYS; } return ret; } static int ucma_set_ib_path(struct ucma_context *ctx, struct ib_path_rec_data *path_data, size_t optlen) { struct ib_sa_path_rec sa_path; struct rdma_cm_event event; int ret; if (optlen % sizeof(*path_data)) return -EINVAL; for (; optlen; optlen -= sizeof(*path_data), path_data++) { if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL)) break; } if (!optlen) return -EINVAL; memset(&sa_path, 0, sizeof(sa_path)); ib_sa_unpack_path(path_data->path_rec, &sa_path); ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); if (ret) return ret; memset(&event, 0, sizeof event); event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; return ucma_event_handler(ctx->cm_id, &event); } static int ucma_set_option_ib(struct ucma_context *ctx, int optname, void *optval, size_t optlen) { int ret; switch (optname) { case RDMA_OPTION_IB_PATH: ret = ucma_set_ib_path(ctx, optval, optlen); break; default: ret = -ENOSYS; } return ret; } static int ucma_set_option_level(struct ucma_context *ctx, int level, int optname, void *optval, size_t optlen) { int ret; switch (level) { case RDMA_OPTION_ID: ret = ucma_set_option_id(ctx, optname, optval, optlen); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); break; default: ret = -ENOSYS; } return ret; } static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_set_option cmd; struct ucma_context *ctx; void *optval; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); optval = memdup_user((void __user *) (unsigned long) cmd.optval, cmd.optlen); if (IS_ERR(optval)) { ret = PTR_ERR(optval); goto out; } ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, cmd.optlen); kfree(optval); out: ucma_put_ctx(ctx); return ret; } static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_notify cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_process_join(struct ucma_file *file, struct rdma_ucm_join_mcast *cmd, int out_len) { struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; struct ucma_multicast *mc; struct sockaddr *addr; int ret; if (out_len < sizeof(resp)) return -ENOSPC; addr = (struct sockaddr *) &cmd->addr; if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd->id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&file->mut); mc = ucma_alloc_multicast(ctx); if (!mc) { ret = -ENOMEM; goto err1; } mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc); if (ret) goto err2; resp.id = mc->id; if (copy_to_user((void __user *)(unsigned long) cmd->response, &resp, sizeof(resp))) { ret = -EFAULT; goto err3; } mutex_unlock(&file->mut); ucma_put_ctx(ctx); return 0; err3: rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); ucma_cleanup_mc_events(mc); err2: mutex_lock(&mut); idr_remove(&multicast_idr, mc->id); mutex_unlock(&mut); list_del(&mc->list); kfree(mc); err1: mutex_unlock(&file->mut); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_join_ip_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_join_ip_mcast cmd; struct rdma_ucm_join_mcast join_cmd; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; join_cmd.response = cmd.response; join_cmd.uid = cmd.uid; join_cmd.id = cmd.id; join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); join_cmd.reserved = 0; memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); return ucma_process_join(file, &join_cmd, out_len); } static ssize_t ucma_join_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_join_mcast cmd; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; return ucma_process_join(file, &cmd, out_len); } static ssize_t ucma_leave_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_destroy_id cmd; struct rdma_ucm_destroy_id_resp resp; struct ucma_multicast *mc; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&mut); mc = idr_find(&multicast_idr, cmd.id); if (!mc) mc = ERR_PTR(-ENOENT); else if (mc->ctx->file != file) mc = ERR_PTR(-EINVAL); else if (!atomic_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); else idr_remove(&multicast_idr, mc->id); mutex_unlock(&mut); if (IS_ERR(mc)) { ret = PTR_ERR(mc); goto out; } rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_lock(&mc->ctx->file->mut); ucma_cleanup_mc_events(mc); list_del(&mc->list); mutex_unlock(&mc->ctx->file->mut); ucma_put_ctx(mc->ctx); resp.events_reported = mc->events_reported; kfree(mc); if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) ret = -EFAULT; out: return ret; } static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) { /* Acquire mutex's based on pointer comparison to prevent deadlock. */ if (file1 < file2) { mutex_lock(&file1->mut); mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); } else { mutex_lock(&file2->mut); mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); } } static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2) { if (file1 < file2) { mutex_unlock(&file2->mut); mutex_unlock(&file1->mut); } else { mutex_unlock(&file1->mut); mutex_unlock(&file2->mut); } } static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) { struct ucma_event *uevent, *tmp; list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) if (uevent->ctx == ctx) list_move_tail(&uevent->list, &file->event_list); } static ssize_t ucma_migrate_id(struct ucma_file *new_file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_migrate_id cmd; struct rdma_ucm_migrate_resp resp; struct ucma_context *ctx; struct fd f; struct ucma_file *cur_file; int ret = 0; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; /* Get current fd to protect against it being closed */ f = fdget(cmd.fd); if (!f.file) return -ENOENT; /* Validate current fd and prevent destruction of id. */ ctx = ucma_get_ctx(f.file->private_data, cmd.id); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto file_put; } cur_file = ctx->file; if (cur_file == new_file) { resp.events_reported = ctx->events_reported; goto response; } /* * Migrate events between fd's, maintaining order, and avoiding new * events being added before existing events. */ ucma_lock_files(cur_file, new_file); mutex_lock(&mut); list_move_tail(&ctx->list, &new_file->ctx_list); ucma_move_events(ctx, new_file); ctx->file = new_file; resp.events_reported = ctx->events_reported; mutex_unlock(&mut); ucma_unlock_files(cur_file, new_file); response: if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) ret = -EFAULT; ucma_put_ctx(ctx); file_put: fdput(f); return ret; } static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) = { [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip, [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip, [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route, [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, [RDMA_USER_CM_CMD_REJECT] = ucma_reject, [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, [RDMA_USER_CM_CMD_GET_OPTION] = NULL, [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast, [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id, [RDMA_USER_CM_CMD_QUERY] = ucma_query, [RDMA_USER_CM_CMD_BIND] = ucma_bind, [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast }; static ssize_t ucma_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) { struct ucma_file *file = filp->private_data; struct rdma_ucm_cmd_hdr hdr; ssize_t ret; if (len < sizeof(hdr)) return -EINVAL; if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) return -EINVAL; if (hdr.in + sizeof(hdr) > len) return -EINVAL; if (!ucma_cmd_table[hdr.cmd]) return -ENOSYS; ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); if (!ret) ret = len; return ret; } static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait) { struct ucma_file *file = filp->private_data; unsigned int mask = 0; poll_wait(filp, &file->poll_wait, wait); if (!list_empty(&file->event_list)) mask = POLLIN | POLLRDNORM; return mask; } /* * ucma_open() does not need the BKL: * * - no global state is referred to; * - there is no ioctl method to race against; * - no further module initialization is required for open to work * after the device is registered. */ static int ucma_open(struct inode *inode, struct file *filp) { struct ucma_file *file; file = kmalloc(sizeof *file, GFP_KERNEL); if (!file) return -ENOMEM; file->close_wq = create_singlethread_workqueue("ucma_close_id"); if (!file->close_wq) { kfree(file); return -ENOMEM; } INIT_LIST_HEAD(&file->event_list); INIT_LIST_HEAD(&file->ctx_list); init_waitqueue_head(&file->poll_wait); mutex_init(&file->mut); filp->private_data = file; file->filp = filp; return nonseekable_open(inode, filp); } static int ucma_close(struct inode *inode, struct file *filp) { struct ucma_file *file = filp->private_data; struct ucma_context *ctx, *tmp; mutex_lock(&file->mut); list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { ctx->destroying = 1; mutex_unlock(&file->mut); mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); flush_workqueue(file->close_wq); /* At that step once ctx was marked as destroying and workqueue * was flushed we are safe from any inflights handlers that * might put other closing task. */ mutex_lock(&mut); if (!ctx->closing) { mutex_unlock(&mut); /* rdma_destroy_id ensures that no event handlers are * inflight for that id before releasing it. */ rdma_destroy_id(ctx->cm_id); } else { mutex_unlock(&mut); } ucma_free_ctx(ctx); mutex_lock(&file->mut); } mutex_unlock(&file->mut); destroy_workqueue(file->close_wq); kfree(file); return 0; } static const struct file_operations ucma_fops = { .owner = THIS_MODULE, .open = ucma_open, .release = ucma_close, .write = ucma_write, .poll = ucma_poll, .llseek = no_llseek, }; static struct miscdevice ucma_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "rdma_cm", .nodename = "infiniband/rdma_cm", .mode = 0666, .fops = &ucma_fops, }; static ssize_t show_abi_version(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); } static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); static int __init ucma_init(void) { int ret; ret = misc_register(&ucma_misc); if (ret) return ret; ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); if (ret) { pr_err("rdma_ucm: couldn't create abi_version attr\n"); goto err1; } ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); if (!ucma_ctl_table_hdr) { pr_err("rdma_ucm: couldn't register sysctl paths\n"); ret = -ENOMEM; goto err2; } return 0; err2: device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); err1: misc_deregister(&ucma_misc); return ret; } static void __exit ucma_cleanup(void) { unregister_net_sysctl_table(ucma_ctl_table_hdr); device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); misc_deregister(&ucma_misc); idr_destroy(&ctx_idr); idr_destroy(&multicast_idr); } module_init(ucma_init); module_exit(ucma_cleanup);
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5054_1
crossvul-cpp_data_bad_5861_3
/* * Glue code for the SHA512 Secure Hash Algorithm assembly implementation * using NEON instructions. * * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This file is based on sha512_ssse3_glue.c: * Copyright (C) 2013 Intel Corporation * Author: Tim Chen <tim.c.chen@linux.intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/cryptohash.h> #include <linux/types.h> #include <linux/string.h> #include <crypto/sha.h> #include <asm/byteorder.h> #include <asm/simd.h> #include <asm/neon.h> static const u64 sha512_k[] = { 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL }; asmlinkage void sha512_transform_neon(u64 *digest, const void *data, const u64 k[], unsigned int num_blks); static int sha512_neon_init(struct shash_desc *desc) { struct sha512_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA512_H0; sctx->state[1] = SHA512_H1; sctx->state[2] = SHA512_H2; sctx->state[3] = SHA512_H3; sctx->state[4] = SHA512_H4; sctx->state[5] = SHA512_H5; sctx->state[6] = SHA512_H6; sctx->state[7] = SHA512_H7; sctx->count[0] = sctx->count[1] = 0; return 0; } static int __sha512_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len, unsigned int partial) { struct sha512_state *sctx = shash_desc_ctx(desc); unsigned int done = 0; sctx->count[0] += len; if (sctx->count[0] < len) sctx->count[1]++; if (partial) { done = SHA512_BLOCK_SIZE - partial; memcpy(sctx->buf + partial, data, done); sha512_transform_neon(sctx->state, sctx->buf, sha512_k, 1); } if (len - done >= SHA512_BLOCK_SIZE) { const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE; sha512_transform_neon(sctx->state, data + done, sha512_k, rounds); done += rounds * SHA512_BLOCK_SIZE; } memcpy(sctx->buf, data + done, len - done); return 0; } static int sha512_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha512_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; int res; /* Handle the fast case right here */ if (partial + len < SHA512_BLOCK_SIZE) { sctx->count[0] += len; if (sctx->count[0] < len) sctx->count[1]++; memcpy(sctx->buf + partial, data, len); return 0; } if (!may_use_simd()) { res = crypto_sha512_update(desc, data, len); } else { kernel_neon_begin(); res = __sha512_neon_update(desc, data, len, partial); kernel_neon_end(); } return res; } /* Add padding and return the message digest. */ static int sha512_neon_final(struct shash_desc *desc, u8 *out) { struct sha512_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; __be64 *dst = (__be64 *)out; __be64 bits[2]; static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, }; /* save number of bits */ bits[1] = cpu_to_be64(sctx->count[0] << 3); bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); /* Pad out to 112 mod 128 and append length */ index = sctx->count[0] & 0x7f; padlen = (index < 112) ? (112 - index) : ((128+112) - index); if (!may_use_simd()) { crypto_sha512_update(desc, padding, padlen); crypto_sha512_update(desc, (const u8 *)&bits, sizeof(bits)); } else { kernel_neon_begin(); /* We need to fill a whole block for __sha512_neon_update() */ if (padlen <= 112) { sctx->count[0] += padlen; if (sctx->count[0] < padlen) sctx->count[1]++; memcpy(sctx->buf + index, padding, padlen); } else { __sha512_neon_update(desc, padding, padlen, index); } __sha512_neon_update(desc, (const u8 *)&bits, sizeof(bits), 112); kernel_neon_end(); } /* Store state in digest */ for (i = 0; i < 8; i++) dst[i] = cpu_to_be64(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int sha512_neon_export(struct shash_desc *desc, void *out) { struct sha512_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int sha512_neon_import(struct shash_desc *desc, const void *in) { struct sha512_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static int sha384_neon_init(struct shash_desc *desc) { struct sha512_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA384_H0; sctx->state[1] = SHA384_H1; sctx->state[2] = SHA384_H2; sctx->state[3] = SHA384_H3; sctx->state[4] = SHA384_H4; sctx->state[5] = SHA384_H5; sctx->state[6] = SHA384_H6; sctx->state[7] = SHA384_H7; sctx->count[0] = sctx->count[1] = 0; return 0; } static int sha384_neon_final(struct shash_desc *desc, u8 *hash) { u8 D[SHA512_DIGEST_SIZE]; sha512_neon_final(desc, D); memcpy(hash, D, SHA384_DIGEST_SIZE); memset(D, 0, SHA512_DIGEST_SIZE); return 0; } static struct shash_alg algs[] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_neon_init, .update = sha512_neon_update, .final = sha512_neon_final, .export = sha512_neon_export, .import = sha512_neon_import, .descsize = sizeof(struct sha512_state), .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name = "sha512-neon", .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_neon_init, .update = sha512_neon_update, .final = sha384_neon_final, .export = sha512_neon_export, .import = sha512_neon_import, .descsize = sizeof(struct sha512_state), .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_driver_name = "sha384-neon", .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } } }; static int __init sha512_neon_mod_init(void) { if (!cpu_has_neon()) return -ENODEV; return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } static void __exit sha512_neon_mod_fini(void) { crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } module_init(sha512_neon_mod_init); module_exit(sha512_neon_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated"); MODULE_ALIAS("sha512"); MODULE_ALIAS("sha384");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5861_3
crossvul-cpp_data_good_5861_12
/* * Cryptographic API. * * s390 implementation of the SHA512 and SHA38 Secure Hash Algorithm. * * Copyright IBM Corp. 2007 * Author(s): Jan Glauber (jang@de.ibm.com) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <crypto/sha.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include "sha.h" #include "crypt_s390.h" static int sha512_init(struct shash_desc *desc) { struct s390_sha_ctx *ctx = shash_desc_ctx(desc); *(__u64 *)&ctx->state[0] = 0x6a09e667f3bcc908ULL; *(__u64 *)&ctx->state[2] = 0xbb67ae8584caa73bULL; *(__u64 *)&ctx->state[4] = 0x3c6ef372fe94f82bULL; *(__u64 *)&ctx->state[6] = 0xa54ff53a5f1d36f1ULL; *(__u64 *)&ctx->state[8] = 0x510e527fade682d1ULL; *(__u64 *)&ctx->state[10] = 0x9b05688c2b3e6c1fULL; *(__u64 *)&ctx->state[12] = 0x1f83d9abfb41bd6bULL; *(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL; ctx->count = 0; ctx->func = KIMD_SHA_512; return 0; } static int sha512_export(struct shash_desc *desc, void *out) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); struct sha512_state *octx = out; octx->count[0] = sctx->count; octx->count[1] = 0; memcpy(octx->state, sctx->state, sizeof(octx->state)); memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); return 0; } static int sha512_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); const struct sha512_state *ictx = in; if (unlikely(ictx->count[1])) return -ERANGE; sctx->count = ictx->count[0]; memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); sctx->func = KIMD_SHA_512; return 0; } static struct shash_alg sha512_alg = { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_init, .update = s390_sha_update, .final = s390_sha_final, .export = sha512_export, .import = sha512_import, .descsize = sizeof(struct s390_sha_ctx), .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name= "sha512-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; MODULE_ALIAS_CRYPTO("sha512"); static int sha384_init(struct shash_desc *desc) { struct s390_sha_ctx *ctx = shash_desc_ctx(desc); *(__u64 *)&ctx->state[0] = 0xcbbb9d5dc1059ed8ULL; *(__u64 *)&ctx->state[2] = 0x629a292a367cd507ULL; *(__u64 *)&ctx->state[4] = 0x9159015a3070dd17ULL; *(__u64 *)&ctx->state[6] = 0x152fecd8f70e5939ULL; *(__u64 *)&ctx->state[8] = 0x67332667ffc00b31ULL; *(__u64 *)&ctx->state[10] = 0x8eb44a8768581511ULL; *(__u64 *)&ctx->state[12] = 0xdb0c2e0d64f98fa7ULL; *(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL; ctx->count = 0; ctx->func = KIMD_SHA_512; return 0; } static struct shash_alg sha384_alg = { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_init, .update = s390_sha_update, .final = s390_sha_final, .export = sha512_export, .import = sha512_import, .descsize = sizeof(struct s390_sha_ctx), .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_driver_name= "sha384-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_sha_ctx), .cra_module = THIS_MODULE, } }; MODULE_ALIAS_CRYPTO("sha384"); static int __init init(void) { int ret; if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA)) return -EOPNOTSUPP; if ((ret = crypto_register_shash(&sha512_alg)) < 0) goto out; if ((ret = crypto_register_shash(&sha384_alg)) < 0) crypto_unregister_shash(&sha512_alg); out: return ret; } static void __exit fini(void) { crypto_unregister_shash(&sha512_alg); crypto_unregister_shash(&sha384_alg); } module_init(init); module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA512 and SHA-384 Secure Hash Algorithm");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_12
crossvul-cpp_data_bad_3604_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3604_0
crossvul-cpp_data_bad_3647_1
/* * linux/fs/hfsplus/dir.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handling of directories */ #include <linux/errno.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/random.h> #include "hfsplus_fs.h" #include "hfsplus_raw.h" static inline void hfsplus_instantiate(struct dentry *dentry, struct inode *inode, u32 cnid) { dentry->d_fsdata = (void *)(unsigned long)cnid; d_instantiate(dentry, inode); } /* Find the entry inside dir named dentry->d_name */ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct hfs_find_data fd; struct super_block *sb; hfsplus_cat_entry entry; int err; u32 cnid, linkid = 0; u16 type; sb = dir->i_sb; dentry->d_fsdata = NULL; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (err) return ERR_PTR(err); hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name); again: err = hfs_brec_read(&fd, &entry, sizeof(entry)); if (err) { if (err == -ENOENT) { hfs_find_exit(&fd); /* No such entry */ inode = NULL; goto out; } goto fail; } type = be16_to_cpu(entry.type); if (type == HFSPLUS_FOLDER) { if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { err = -EIO; goto fail; } cnid = be32_to_cpu(entry.folder.id); dentry->d_fsdata = (void *)(unsigned long)cnid; } else if (type == HFSPLUS_FILE) { if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { err = -EIO; goto fail; } cnid = be32_to_cpu(entry.file.id); if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) && entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) && (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)-> create_date || entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode)-> create_date) && HFSPLUS_SB(sb)->hidden_dir) { struct qstr str; char name[32]; if (dentry->d_fsdata) { /* * We found a link pointing to another link, * so ignore it and treat it as regular file. */ cnid = (unsigned long)dentry->d_fsdata; linkid = 0; } else { dentry->d_fsdata = (void *)(unsigned long)cnid; linkid = be32_to_cpu(entry.file.permissions.dev); str.len = sprintf(name, "iNode%d", linkid); str.name = name; hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb)->hidden_dir->i_ino, &str); goto again; } } else if (!dentry->d_fsdata) dentry->d_fsdata = (void *)(unsigned long)cnid; } else { printk(KERN_ERR "hfs: invalid catalog entry type in lookup\n"); err = -EIO; goto fail; } hfs_find_exit(&fd); inode = hfsplus_iget(dir->i_sb, cnid); if (IS_ERR(inode)) return ERR_CAST(inode); if (S_ISREG(inode->i_mode)) HFSPLUS_I(inode)->linkid = linkid; out: d_add(dentry, inode); return NULL; fail: hfs_find_exit(&fd); return ERR_PTR(err); } static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFSPLUS_MAX_STRLEN + 1]; hfsplus_cat_entry entry; struct hfs_find_data fd; struct hfsplus_readdir_data *rd; u16 type; if (filp->f_pos >= inode->i_size) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (err) return err; hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); err = hfs_brec_find(&fd); if (err) goto out; switch ((u32)filp->f_pos) { case 0: /* This is completely artificial... */ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) goto out; filp->f_pos++; /* fall through */ case 1: hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { printk(KERN_ERR "hfs: bad catalog folder thread\n"); err = -EIO; goto out; } if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) { printk(KERN_ERR "hfs: truncated catalog thread\n"); err = -EIO; goto out; } if (filldir(dirent, "..", 2, 1, be32_to_cpu(entry.thread.parentID), DT_DIR)) goto out; filp->f_pos++; /* fall through */ default: if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, filp->f_pos - 1); if (err) goto out; } for (;;) { if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { printk(KERN_ERR "hfs: walked past end of dir\n"); err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = be16_to_cpu(entry.type); len = HFSPLUS_MAX_STRLEN; err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len); if (err) goto out; if (type == HFSPLUS_FOLDER) { if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { printk(KERN_ERR "hfs: small dir entry\n"); err = -EIO; goto out; } if (HFSPLUS_SB(sb)->hidden_dir && HFSPLUS_SB(sb)->hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) goto next; if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.folder.id), DT_DIR)) break; } else if (type == HFSPLUS_FILE) { if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { printk(KERN_ERR "hfs: small file entry\n"); err = -EIO; goto out; } if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.file.id), DT_REG)) break; } else { printk(KERN_ERR "hfs: bad catalog entry type\n"); err = -EIO; goto out; } next: filp->f_pos++; if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } rd = filp->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } filp->private_data = rd; rd->file = filp; list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); } memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); out: hfs_find_exit(&fd); return err; } static int hfsplus_dir_release(struct inode *inode, struct file *file) { struct hfsplus_readdir_data *rd = file->private_data; if (rd) { mutex_lock(&inode->i_mutex); list_del(&rd->list); mutex_unlock(&inode->i_mutex); kfree(rd); } return 0; } static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, struct dentry *dst_dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb); struct inode *inode = src_dentry->d_inode; struct inode *src_dir = src_dentry->d_parent->d_inode; struct qstr str; char name[32]; u32 cnid, id; int res; if (HFSPLUS_IS_RSRC(inode)) return -EPERM; if (!S_ISREG(inode->i_mode)) return -EPERM; mutex_lock(&sbi->vh_mutex); if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) { for (;;) { get_random_bytes(&id, sizeof(cnid)); id &= 0x3fffffff; str.name = name; str.len = sprintf(name, "iNode%d", id); res = hfsplus_rename_cat(inode->i_ino, src_dir, &src_dentry->d_name, sbi->hidden_dir, &str); if (!res) break; if (res != -EEXIST) goto out; } HFSPLUS_I(inode)->linkid = id; cnid = sbi->next_cnid++; src_dentry->d_fsdata = (void *)(unsigned long)cnid; res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode); if (res) /* panic? */ goto out; sbi->file_count++; } cnid = sbi->next_cnid++; res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode); if (res) goto out; inc_nlink(inode); hfsplus_instantiate(dst_dentry, inode, cnid); ihold(inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); sbi->file_count++; dst_dir->i_sb->s_dirt = 1; out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_unlink(struct inode *dir, struct dentry *dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode = dentry->d_inode; struct qstr str; char name[32]; u32 cnid; int res; if (HFSPLUS_IS_RSRC(inode)) return -EPERM; mutex_lock(&sbi->vh_mutex); cnid = (u32)(unsigned long)dentry->d_fsdata; if (inode->i_ino == cnid && atomic_read(&HFSPLUS_I(inode)->opencnt)) { str.name = name; str.len = sprintf(name, "temp%lu", inode->i_ino); res = hfsplus_rename_cat(inode->i_ino, dir, &dentry->d_name, sbi->hidden_dir, &str); if (!res) { inode->i_flags |= S_DEAD; drop_nlink(inode); } goto out; } res = hfsplus_delete_cat(cnid, dir, &dentry->d_name); if (res) goto out; if (inode->i_nlink > 0) drop_nlink(inode); if (inode->i_ino == cnid) clear_nlink(inode); if (!inode->i_nlink) { if (inode->i_ino != cnid) { sbi->file_count--; if (!atomic_read(&HFSPLUS_I(inode)->opencnt)) { res = hfsplus_delete_cat(inode->i_ino, sbi->hidden_dir, NULL); if (!res) hfsplus_delete_inode(inode); } else inode->i_flags |= S_DEAD; } else hfsplus_delete_inode(inode); } else sbi->file_count--; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode = dentry->d_inode; int res; if (inode->i_size != 2) return -ENOTEMPTY; mutex_lock(&sbi->vh_mutex); res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name); if (res) goto out; clear_nlink(inode); inode->i_ctime = CURRENT_TIME_SEC; hfsplus_delete_inode(inode); mark_inode_dirty(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode; int res = -ENOSPC; mutex_lock(&sbi->vh_mutex); inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO); if (!inode) goto out; res = page_symlink(inode, symname, strlen(symname) + 1); if (res) goto out_err; res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); if (res) goto out_err; hfsplus_instantiate(dentry, inode, inode->i_ino); mark_inode_dirty(inode); goto out; out_err: clear_nlink(inode); hfsplus_delete_inode(inode); iput(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode; int res = -ENOSPC; mutex_lock(&sbi->vh_mutex); inode = hfsplus_new_inode(dir->i_sb, mode); if (!inode) goto out; if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) init_special_inode(inode, mode, rdev); res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); if (res) { clear_nlink(inode); hfsplus_delete_inode(inode); iput(inode); goto out; } hfsplus_instantiate(dentry, inode, inode->i_ino); mark_inode_dirty(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { return hfsplus_mknod(dir, dentry, mode, 0); } static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { return hfsplus_mknod(dir, dentry, mode | S_IFDIR, 0); } static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int res; /* Unlink destination if it already exists */ if (new_dentry->d_inode) { if (S_ISDIR(new_dentry->d_inode->i_mode)) res = hfsplus_rmdir(new_dir, new_dentry); else res = hfsplus_unlink(new_dir, new_dentry); if (res) return res; } res = hfsplus_rename_cat((u32)(unsigned long)old_dentry->d_fsdata, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name); if (!res) new_dentry->d_fsdata = old_dentry->d_fsdata; return res; } const struct inode_operations hfsplus_dir_inode_operations = { .lookup = hfsplus_lookup, .create = hfsplus_create, .link = hfsplus_link, .unlink = hfsplus_unlink, .mkdir = hfsplus_mkdir, .rmdir = hfsplus_rmdir, .symlink = hfsplus_symlink, .mknod = hfsplus_mknod, .rename = hfsplus_rename, }; const struct file_operations hfsplus_dir_operations = { .fsync = hfsplus_file_fsync, .read = generic_read_dir, .readdir = hfsplus_readdir, .unlocked_ioctl = hfsplus_ioctl, .llseek = generic_file_llseek, .release = hfsplus_dir_release, };
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3647_1
crossvul-cpp_data_bad_2292_2
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mod_lua.h" #include <string.h> #include <stdlib.h> #include <ctype.h> #include <apr_thread_mutex.h> #include <apr_pools.h> #include "lua_apr.h" #include "lua_config.h" #include "apr_optional.h" #include "mod_ssl.h" #include "mod_auth.h" #include "util_mutex.h" #ifdef APR_HAS_THREADS #include "apr_thread_proc.h" #endif /* getpid for *NIX */ #if APR_HAVE_SYS_TYPES_H #include <sys/types.h> #endif #if APR_HAVE_UNISTD_H #include <unistd.h> #endif /* getpid for Windows */ #if APR_HAVE_PROCESS_H #include <process.h> #endif APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ap_lua, AP_LUA, int, lua_open, (lua_State *L, apr_pool_t *p), (L, p), OK, DECLINED) APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ap_lua, AP_LUA, int, lua_request, (lua_State *L, request_rec *r), (L, r), OK, DECLINED) static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *lua_ssl_val = NULL; static APR_OPTIONAL_FN_TYPE(ssl_is_https) *lua_ssl_is_https = NULL; module AP_MODULE_DECLARE_DATA lua_module; #define AP_LUA_HOOK_FIRST (APR_HOOK_FIRST - 1) #define AP_LUA_HOOK_LAST (APR_HOOK_LAST + 1) typedef struct { const char *name; const char *file_name; const char *function_name; ap_lua_vm_spec *spec; apr_array_header_t *args; } lua_authz_provider_spec; apr_hash_t *lua_authz_providers; typedef struct { apr_bucket_brigade *tmpBucket; lua_State *L; ap_lua_vm_spec *spec; int broken; } lua_filter_ctx; apr_global_mutex_t *lua_ivm_mutex; apr_shm_t *lua_ivm_shm; char *lua_ivm_shmfile; static apr_status_t shm_cleanup_wrapper(void *unused) { if (lua_ivm_shm) { return apr_shm_destroy(lua_ivm_shm); } return OK; } /** * error reporting if lua has an error. * Extracts the error from lua stack and prints */ static void report_lua_error(lua_State *L, request_rec *r) { const char *lua_response; r->status = HTTP_INTERNAL_SERVER_ERROR; r->content_type = "text/html"; ap_rputs("<h3>Error!</h3>\n", r); ap_rputs("<pre>", r); lua_response = lua_tostring(L, -1); ap_rputs(ap_escape_html(r->pool, lua_response), r); ap_rputs("</pre>\n", r); ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, r->pool, APLOGNO(01471) "Lua error: %s", lua_response); } static void lua_open_callback(lua_State *L, apr_pool_t *p, void *ctx) { ap_lua_init(L, p); ap_lua_load_apache2_lmodule(L); ap_lua_load_request_lmodule(L, p); ap_lua_load_config_lmodule(L); } static int lua_open_hook(lua_State *L, apr_pool_t *p) { lua_open_callback(L, p, NULL); return OK; } static const char *scope_to_string(unsigned int scope) { switch (scope) { case AP_LUA_SCOPE_ONCE: case AP_LUA_SCOPE_UNSET: return "once"; case AP_LUA_SCOPE_REQUEST: return "request"; case AP_LUA_SCOPE_CONN: return "conn"; #if APR_HAS_THREADS case AP_LUA_SCOPE_THREAD: return "thread"; case AP_LUA_SCOPE_SERVER: return "server"; #endif default: ap_assert(0); return 0; } } static void ap_lua_release_state(lua_State* L, ap_lua_vm_spec* spec, request_rec* r) { char *hash; apr_reslist_t* reslist = NULL; if (spec->scope == AP_LUA_SCOPE_SERVER) { ap_lua_server_spec* sspec = NULL; lua_settop(L, 0); lua_getfield(L, LUA_REGISTRYINDEX, "Apache2.Lua.server_spec"); sspec = (ap_lua_server_spec*) lua_touserdata(L, 1); hash = apr_psprintf(r->pool, "reslist:%s", spec->file); if (apr_pool_userdata_get((void **)&reslist, hash, r->server->process->pool) == APR_SUCCESS) { AP_DEBUG_ASSERT(sspec != NULL); if (reslist != NULL) { apr_reslist_release(reslist, sspec); } } } } static ap_lua_vm_spec *create_vm_spec(apr_pool_t **lifecycle_pool, request_rec *r, const ap_lua_dir_cfg *cfg, const ap_lua_server_cfg *server_cfg, const char *filename, const char *bytecode, apr_size_t bytecode_len, const char *function, const char *what) { apr_pool_t *pool; ap_lua_vm_spec *spec = apr_pcalloc(r->pool, sizeof(ap_lua_vm_spec)); spec->scope = cfg->vm_scope; spec->pool = r->pool; spec->package_paths = cfg->package_paths; spec->package_cpaths = cfg->package_cpaths; spec->cb = &lua_open_callback; spec->cb_arg = NULL; spec->bytecode = bytecode; spec->bytecode_len = bytecode_len; spec->codecache = (cfg->codecache == AP_LUA_CACHE_UNSET) ? AP_LUA_CACHE_STAT : cfg->codecache; spec->vm_min = cfg->vm_min ? cfg->vm_min : 1; spec->vm_max = cfg->vm_max ? cfg->vm_max : 1; if (filename) { char *file; apr_filepath_merge(&file, server_cfg->root_path, filename, APR_FILEPATH_NOTRELATIVE, r->pool); spec->file = file; } else { spec->file = r->filename; } ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, APLOGNO(02313) "%s details: scope: %s, file: %s, func: %s", what, scope_to_string(spec->scope), spec->file, function ? function : "-"); switch (spec->scope) { case AP_LUA_SCOPE_ONCE: case AP_LUA_SCOPE_UNSET: apr_pool_create(&pool, r->pool); break; case AP_LUA_SCOPE_REQUEST: pool = r->pool; break; case AP_LUA_SCOPE_CONN: pool = r->connection->pool; break; #if APR_HAS_THREADS case AP_LUA_SCOPE_THREAD: pool = apr_thread_pool_get(r->connection->current_thread); break; case AP_LUA_SCOPE_SERVER: pool = r->server->process->pool; break; #endif default: ap_assert(0); } *lifecycle_pool = pool; return spec; } static const char* ap_lua_interpolate_string(apr_pool_t* pool, const char* string, const char** values) { char *stringBetween; const char* ret; int srclen,x,y; srclen = strlen(string); ret = ""; y = 0; for (x=0; x < srclen; x++) { if (string[x] == '$' && x != srclen-1 && string[x+1] >= '0' && string[x+1] <= '9') { int v = *(string+x+1) - '0'; if (x-y > 0) { stringBetween = apr_pstrndup(pool, string+y, x-y); } else { stringBetween = ""; } ret = apr_pstrcat(pool, ret, stringBetween, values[v], NULL); y = ++x+1; } } if (x-y > 0 && y > 0) { stringBetween = apr_pstrndup(pool, string+y, x-y); ret = apr_pstrcat(pool, ret, stringBetween, NULL); } /* If no replacement was made, just return the original string */ else if (y == 0) { return string; } return ret; } /** * "main" */ static int lua_handler(request_rec *r) { int rc = OK; if (strcmp(r->handler, "lua-script")) { return DECLINED; } /* Decline the request if the script does not exist (or is a directory), * rather than just returning internal server error */ if ( (r->finfo.filetype == APR_NOFILE) || (r->finfo.filetype & APR_DIR) ) { return DECLINED; } ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(01472) "handling [%s] in mod_lua", r->filename); /* XXX: This seems wrong because it may generate wrong headers for HEAD requests */ if (!r->header_only) { lua_State *L; apr_pool_t *pool; const ap_lua_dir_cfg *cfg = ap_get_module_config(r->per_dir_config, &lua_module); ap_lua_vm_spec *spec = create_vm_spec(&pool, r, cfg, NULL, NULL, NULL, 0, "handle", "request handler"); L = ap_lua_get_lua_state(pool, spec, r); if (!L) { /* TODO annotate spec with failure reason */ r->status = HTTP_INTERNAL_SERVER_ERROR; ap_rputs("Unable to compile VM, see logs", r); ap_lua_release_state(L, spec, r); return HTTP_INTERNAL_SERVER_ERROR; } ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, APLOGNO(01474) "got a vm!"); lua_getglobal(L, "handle"); if (!lua_isfunction(L, -1)) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(01475) "lua: Unable to find entry function '%s' in %s (not a valid function)", "handle", spec->file); ap_lua_release_state(L, spec, r); return HTTP_INTERNAL_SERVER_ERROR; } ap_lua_run_lua_request(L, r); if (lua_pcall(L, 1, 1, 0)) { report_lua_error(L, r); } if (lua_isnumber(L, -1)) { rc = lua_tointeger(L, -1); } ap_lua_release_state(L, spec, r); } return rc; } /* ------------------- Input/output content filters ------------------- */ static apr_status_t lua_setup_filter_ctx(ap_filter_t* f, request_rec* r, lua_filter_ctx** c) { apr_pool_t *pool; ap_lua_vm_spec *spec; int n, rc; lua_State *L; lua_filter_ctx *ctx; ap_lua_server_cfg *server_cfg = ap_get_module_config(r->server->module_config, &lua_module); const ap_lua_dir_cfg *cfg = ap_get_module_config(r->per_dir_config, &lua_module); ctx = apr_pcalloc(r->pool, sizeof(lua_filter_ctx)); ctx->broken = 0; *c = ctx; /* Find the filter that was called. * XXX: If we were wired with mod_filter, the filter (mod_filters name) * and the provider (our underlying filters name) need to have matched. */ for (n = 0; n < cfg->mapped_filters->nelts; n++) { ap_lua_filter_handler_spec *hook_spec = ((ap_lua_filter_handler_spec **) cfg->mapped_filters->elts)[n]; if (hook_spec == NULL) { continue; } if (!strcasecmp(hook_spec->filter_name, f->frec->name)) { spec = create_vm_spec(&pool, r, cfg, server_cfg, hook_spec->file_name, NULL, 0, hook_spec->function_name, "filter"); L = ap_lua_get_lua_state(pool, spec, r); if (L) { L = lua_newthread(L); } if (!L) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(02328) "lua: Failed to obtain lua interpreter for %s %s", hook_spec->function_name, hook_spec->file_name); ap_lua_release_state(L, spec, r); return APR_EGENERAL; } if (hook_spec->function_name != NULL) { lua_getglobal(L, hook_spec->function_name); if (!lua_isfunction(L, -1)) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(02329) "lua: Unable to find entry function '%s' in %s (not a valid function)", hook_spec->function_name, hook_spec->file_name); ap_lua_release_state(L, spec, r); return APR_EGENERAL; } ap_lua_run_lua_request(L, r); } else { int t; ap_lua_run_lua_request(L, r); t = lua_gettop(L); lua_setglobal(L, "r"); lua_settop(L, t); } ctx->L = L; ctx->spec = spec; /* If a Lua filter is interested in filtering a request, it must first do a yield, * otherwise we'll assume that it's not interested and pretend we didn't find it. */ rc = lua_resume(L, 1); if (rc == LUA_YIELD) { if (f->frec->providers == NULL) { /* Not wired by mod_filter */ apr_table_unset(r->headers_out, "Content-Length"); apr_table_unset(r->headers_out, "Content-MD5"); apr_table_unset(r->headers_out, "ETAG"); } return OK; } else { ap_lua_release_state(L, spec, r); return APR_ENOENT; } } } return APR_ENOENT; } static apr_status_t lua_output_filter_handle(ap_filter_t *f, apr_bucket_brigade *pbbIn) { request_rec *r = f->r; int rc; lua_State *L; lua_filter_ctx* ctx; conn_rec *c = r->connection; apr_bucket *pbktIn; apr_status_t rv; /* Set up the initial filter context and acquire the function. * The corresponding Lua function should yield here. */ if (!f->ctx) { rc = lua_setup_filter_ctx(f,r,&ctx); if (rc == APR_EGENERAL) { return HTTP_INTERNAL_SERVER_ERROR; } if (rc == APR_ENOENT) { /* No filter entry found (or the script declined to filter), just pass on the buckets */ ap_remove_output_filter(f); return ap_pass_brigade(f->next,pbbIn); } else { /* We've got a willing lua filter, setup and check for a prefix */ size_t olen; apr_bucket *pbktOut; const char* output = lua_tolstring(ctx->L, 1, &olen); f->ctx = ctx; ctx->tmpBucket = apr_brigade_create(r->pool, c->bucket_alloc); if (olen > 0) { pbktOut = apr_bucket_heap_create(output, olen, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->tmpBucket, pbktOut); rv = ap_pass_brigade(f->next, ctx->tmpBucket); apr_brigade_cleanup(ctx->tmpBucket); if (rv != APR_SUCCESS) { return rv; } } } } ctx = (lua_filter_ctx*) f->ctx; L = ctx->L; /* While the Lua function is still yielding, pass in buckets to the coroutine */ if (!ctx->broken) { for (pbktIn = APR_BRIGADE_FIRST(pbbIn); pbktIn != APR_BRIGADE_SENTINEL(pbbIn); pbktIn = APR_BUCKET_NEXT(pbktIn)) { const char *data; apr_size_t len; apr_bucket *pbktOut; /* read the bucket */ apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ); /* Push the bucket onto the Lua stack as a global var */ lua_pushlstring(L, data, len); lua_setglobal(L, "bucket"); /* If Lua yielded, it means we have something to pass on */ if (lua_resume(L, 0) == LUA_YIELD) { size_t olen; const char* output = lua_tolstring(L, 1, &olen); if (olen > 0) { pbktOut = apr_bucket_heap_create(output, olen, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->tmpBucket, pbktOut); rv = ap_pass_brigade(f->next, ctx->tmpBucket); apr_brigade_cleanup(ctx->tmpBucket); if (rv != APR_SUCCESS) { return rv; } } } else { ctx->broken = 1; ap_lua_release_state(L, ctx->spec, r); ap_remove_output_filter(f); apr_brigade_cleanup(pbbIn); apr_brigade_cleanup(ctx->tmpBucket); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02663) "lua: Error while executing filter: %s", lua_tostring(L, -1)); return HTTP_INTERNAL_SERVER_ERROR; } } /* If we've safely reached the end, do a final call to Lua to allow for any finishing moves by the script, such as appending a tail. */ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pbbIn))) { apr_bucket *pbktEOS; lua_pushnil(L); lua_setglobal(L, "bucket"); if (lua_resume(L, 0) == LUA_YIELD) { apr_bucket *pbktOut; size_t olen; const char* output = lua_tolstring(L, 1, &olen); if (olen > 0) { pbktOut = apr_bucket_heap_create(output, olen, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->tmpBucket, pbktOut); } } pbktEOS = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->tmpBucket, pbktEOS); ap_lua_release_state(L, ctx->spec, r); rv = ap_pass_brigade(f->next, ctx->tmpBucket); apr_brigade_cleanup(ctx->tmpBucket); if (rv != APR_SUCCESS) { return rv; } } } /* Clean up */ apr_brigade_cleanup(pbbIn); return APR_SUCCESS; } static apr_status_t lua_input_filter_handle(ap_filter_t *f, apr_bucket_brigade *pbbOut, ap_input_mode_t eMode, apr_read_type_e eBlock, apr_off_t nBytes) { request_rec *r = f->r; int rc, lastCall = 0; lua_State *L; lua_filter_ctx* ctx; conn_rec *c = r->connection; apr_status_t ret; /* Set up the initial filter context and acquire the function. * The corresponding Lua function should yield here. */ if (!f->ctx) { rc = lua_setup_filter_ctx(f,r,&ctx); f->ctx = ctx; if (rc == APR_EGENERAL) { ctx->broken = 1; ap_remove_input_filter(f); return HTTP_INTERNAL_SERVER_ERROR; } if (rc == APR_ENOENT ) { ap_remove_input_filter(f); ctx->broken = 1; } if (rc == APR_SUCCESS) { ctx->tmpBucket = apr_brigade_create(r->pool, c->bucket_alloc); } } ctx = (lua_filter_ctx*) f->ctx; L = ctx->L; /* If the Lua script broke or denied serving the request, just pass the buckets through */ if (ctx->broken) { return ap_get_brigade(f->next, pbbOut, eMode, eBlock, nBytes); } if (APR_BRIGADE_EMPTY(ctx->tmpBucket)) { ret = ap_get_brigade(f->next, ctx->tmpBucket, eMode, eBlock, nBytes); if (eMode == AP_MODE_EATCRLF || ret != APR_SUCCESS) return ret; } /* While the Lua function is still yielding, pass buckets to the coroutine */ if (!ctx->broken) { lastCall = 0; while(!APR_BRIGADE_EMPTY(ctx->tmpBucket)) { apr_bucket *pbktIn = APR_BRIGADE_FIRST(ctx->tmpBucket); apr_bucket *pbktOut; const char *data; apr_size_t len; if (APR_BUCKET_IS_EOS(pbktIn)) { APR_BUCKET_REMOVE(pbktIn); break; } /* read the bucket */ ret = apr_bucket_read(pbktIn, &data, &len, eBlock); if (ret != APR_SUCCESS) return ret; /* Push the bucket onto the Lua stack as a global var */ lastCall++; lua_pushlstring(L, data, len); lua_setglobal(L, "bucket"); /* If Lua yielded, it means we have something to pass on */ if (lua_resume(L, 0) == LUA_YIELD) { size_t olen; const char* output = lua_tolstring(L, 1, &olen); pbktOut = apr_bucket_heap_create(output, olen, 0, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut, pbktOut); apr_bucket_delete(pbktIn); return APR_SUCCESS; } else { ctx->broken = 1; ap_lua_release_state(L, ctx->spec, r); ap_remove_input_filter(f); apr_bucket_delete(pbktIn); return HTTP_INTERNAL_SERVER_ERROR; } } /* If we've safely reached the end, do a final call to Lua to allow for any finishing moves by the script, such as appending a tail. */ if (lastCall == 0) { apr_bucket *pbktEOS = apr_bucket_eos_create(c->bucket_alloc); lua_pushnil(L); lua_setglobal(L, "bucket"); if (lua_resume(L, 0) == LUA_YIELD) { apr_bucket *pbktOut; size_t olen; const char* output = lua_tolstring(L, 1, &olen); pbktOut = apr_bucket_heap_create(output, olen, 0, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut, pbktOut); } APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS); ap_lua_release_state(L, ctx->spec, r); } } return APR_SUCCESS; } /* ---------------- Configury stuff --------------- */ /** harnesses for magic hooks **/ static int lua_request_rec_hook_harness(request_rec *r, const char *name, int apr_hook_when) { int rc; apr_pool_t *pool; lua_State *L; ap_lua_vm_spec *spec; ap_lua_server_cfg *server_cfg = ap_get_module_config(r->server->module_config, &lua_module); const ap_lua_dir_cfg *cfg = ap_get_module_config(r->per_dir_config, &lua_module); const char *key = apr_psprintf(r->pool, "%s_%d", name, apr_hook_when); apr_array_header_t *hook_specs = apr_hash_get(cfg->hooks, key, APR_HASH_KEY_STRING); if (hook_specs) { int i; for (i = 0; i < hook_specs->nelts; i++) { ap_lua_mapped_handler_spec *hook_spec = ((ap_lua_mapped_handler_spec **) hook_specs->elts)[i]; if (hook_spec == NULL) { continue; } spec = create_vm_spec(&pool, r, cfg, server_cfg, hook_spec->file_name, hook_spec->bytecode, hook_spec->bytecode_len, hook_spec->function_name, "request hook"); L = ap_lua_get_lua_state(pool, spec, r); if (!L) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(01477) "lua: Failed to obtain lua interpreter for entry function '%s' in %s", hook_spec->function_name, hook_spec->file_name); return HTTP_INTERNAL_SERVER_ERROR; } if (hook_spec->function_name != NULL) { lua_getglobal(L, hook_spec->function_name); if (!lua_isfunction(L, -1)) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(01478) "lua: Unable to find entry function '%s' in %s (not a valid function)", hook_spec->function_name, hook_spec->file_name); ap_lua_release_state(L, spec, r); return HTTP_INTERNAL_SERVER_ERROR; } ap_lua_run_lua_request(L, r); } else { int t; ap_lua_run_lua_request(L, r); t = lua_gettop(L); lua_setglobal(L, "r"); lua_settop(L, t); } if (lua_pcall(L, 1, 1, 0)) { report_lua_error(L, r); ap_lua_release_state(L, spec, r); return HTTP_INTERNAL_SERVER_ERROR; } rc = DECLINED; if (lua_isnumber(L, -1)) { rc = lua_tointeger(L, -1); ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, "Lua hook %s:%s for phase %s returned %d", hook_spec->file_name, hook_spec->function_name, name, rc); } else { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, "Lua hook %s:%s for phase %s did not return a numeric value", hook_spec->file_name, hook_spec->function_name, name); return HTTP_INTERNAL_SERVER_ERROR; } if (rc != DECLINED) { ap_lua_release_state(L, spec, r); return rc; } ap_lua_release_state(L, spec, r); } } return DECLINED; } /* Fix for making sure that LuaMapHandler works when FallbackResource is set */ static int lua_map_handler_fixups(request_rec *r) { /* If there is no handler set yet, this might be a LuaMapHandler request */ if (r->handler == NULL) { int n = 0; ap_regmatch_t match[10]; const ap_lua_dir_cfg *cfg = ap_get_module_config(r->per_dir_config, &lua_module); for (n = 0; n < cfg->mapped_handlers->nelts; n++) { ap_lua_mapped_handler_spec *hook_spec = ((ap_lua_mapped_handler_spec **) cfg->mapped_handlers->elts)[n]; if (hook_spec == NULL) { continue; } if (!ap_regexec(hook_spec->uri_pattern, r->uri, 10, match, 0)) { r->handler = apr_pstrdup(r->pool, "lua-map-handler"); return OK; } } } return DECLINED; } static int lua_map_handler(request_rec *r) { int rc, n = 0; apr_pool_t *pool; lua_State *L; const char *filename, *function_name; const char *values[10]; ap_lua_vm_spec *spec; ap_regmatch_t match[10]; ap_lua_server_cfg *server_cfg = ap_get_module_config(r->server->module_config, &lua_module); const ap_lua_dir_cfg *cfg = ap_get_module_config(r->per_dir_config, &lua_module); for (n = 0; n < cfg->mapped_handlers->nelts; n++) { ap_lua_mapped_handler_spec *hook_spec = ((ap_lua_mapped_handler_spec **) cfg->mapped_handlers->elts)[n]; if (hook_spec == NULL) { continue; } if (!ap_regexec(hook_spec->uri_pattern, r->uri, 10, match, 0)) { int i; for (i=0 ; i < 10; i++) { if (match[i].rm_eo >= 0) { values[i] = apr_pstrndup(r->pool, r->uri+match[i].rm_so, match[i].rm_eo - match[i].rm_so); } else values[i] = ""; } filename = ap_lua_interpolate_string(r->pool, hook_spec->file_name, values); function_name = ap_lua_interpolate_string(r->pool, hook_spec->function_name, values); spec = create_vm_spec(&pool, r, cfg, server_cfg, filename, hook_spec->bytecode, hook_spec->bytecode_len, function_name, "mapped handler"); L = ap_lua_get_lua_state(pool, spec, r); if (!L) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(02330) "lua: Failed to obtain Lua interpreter for entry function '%s' in %s", function_name, filename); ap_lua_release_state(L, spec, r); return HTTP_INTERNAL_SERVER_ERROR; } if (function_name != NULL) { lua_getglobal(L, function_name); if (!lua_isfunction(L, -1)) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(02331) "lua: Unable to find entry function '%s' in %s (not a valid function)", function_name, filename); ap_lua_release_state(L, spec, r); return HTTP_INTERNAL_SERVER_ERROR; } ap_lua_run_lua_request(L, r); } else { int t; ap_lua_run_lua_request(L, r); t = lua_gettop(L); lua_setglobal(L, "r"); lua_settop(L, t); } if (lua_pcall(L, 1, 1, 0)) { report_lua_error(L, r); ap_lua_release_state(L, spec, r); return HTTP_INTERNAL_SERVER_ERROR; } rc = DECLINED; if (lua_isnumber(L, -1)) { rc = lua_tointeger(L, -1); } else { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02483) "lua: Lua handler %s in %s did not return a value, assuming apache2.OK", function_name, filename); rc = OK; } ap_lua_release_state(L, spec, r); if (rc != DECLINED) { return rc; } } } return DECLINED; } static apr_size_t config_getstr(ap_configfile_t *cfg, char *buf, size_t bufsiz) { apr_size_t i = 0; if (cfg->getstr) { apr_status_t rc = (cfg->getstr) (buf, bufsiz, cfg->param); if (rc == APR_SUCCESS) { i = strlen(buf); if (i && buf[i - 1] == '\n') ++cfg->line_number; } else { buf[0] = '\0'; i = 0; } } else { while (i < bufsiz) { char ch; apr_status_t rc = (cfg->getch) (&ch, cfg->param); if (rc != APR_SUCCESS) break; buf[i++] = ch; if (ch == '\n') { ++cfg->line_number; break; } } } return i; } typedef struct cr_ctx { cmd_parms *cmd; ap_configfile_t *cfp; size_t startline; const char *endstr; char buf[HUGE_STRING_LEN]; } cr_ctx; /* Okay, this deserves a little explaination -- in order for the errors that lua * generates to be 'accuarate', including line numbers, we basically inject * N line number new lines into the 'top' of the chunk reader..... * * be happy. this is cool. * */ static const char *lf = "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"; #define N_LF 32 static const char *direct_chunkreader(lua_State *lvm, void *udata, size_t *plen) { const char *p; struct cr_ctx *ctx = udata; if (ctx->startline) { *plen = ctx->startline > N_LF ? N_LF : ctx->startline; ctx->startline -= *plen; return lf; } *plen = config_getstr(ctx->cfp, ctx->buf, HUGE_STRING_LEN); for (p = ctx->buf; isspace(*p); ++p); if (p[0] == '<' && p[1] == '/') { apr_size_t i = 0; while (i < strlen(ctx->endstr)) { if (tolower(p[i + 2]) != ctx->endstr[i]) return ctx->buf; ++i; } *plen = 0; return NULL; } /*fprintf(stderr, "buf read: %s\n", ctx->buf); */ return ctx->buf; } static int ldump_writer(lua_State *L, const void *b, size_t size, void *B) { (void) L; luaL_addlstring((luaL_Buffer *) B, (const char *) b, size); return 0; } typedef struct hack_section_baton { const char *name; ap_lua_mapped_handler_spec *spec; int apr_hook_when; } hack_section_baton; /* You can be unhappy now. * * This is uncool. * * When you create a <Section handler in httpd, the only 'easy' way to create * a directory context is to parse the section, and convert it into a 'normal' * Configureation option, and then collapse the entire section, in memory, * back into the parent section -- from which you can then get the new directive * invoked.... anyways. evil. Rici taught me how to do this hack :-) */ static const char *hack_section_handler(cmd_parms *cmd, void *_cfg, const char *arg) { ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg; ap_directive_t *directive = cmd->directive; hack_section_baton *baton = directive->data; const char *key = apr_psprintf(cmd->pool, "%s_%d", baton->name, baton->apr_hook_when); apr_array_header_t *hook_specs = apr_hash_get(cfg->hooks, key, APR_HASH_KEY_STRING); if (!hook_specs) { hook_specs = apr_array_make(cmd->pool, 2, sizeof(ap_lua_mapped_handler_spec *)); apr_hash_set(cfg->hooks, key, APR_HASH_KEY_STRING, hook_specs); } baton->spec->scope = cfg->vm_scope; *(ap_lua_mapped_handler_spec **) apr_array_push(hook_specs) = baton->spec; return NULL; } static const char *register_named_block_function_hook(const char *name, cmd_parms *cmd, void *mconfig, const char *line) { const char *function = NULL; ap_lua_mapped_handler_spec *spec; int when = APR_HOOK_MIDDLE; const char *endp = ap_strrchr_c(line, '>'); if (endp == NULL) { return apr_pstrcat(cmd->pool, cmd->cmd->name, "> directive missing closing '>'", NULL); } line = apr_pstrndup(cmd->temp_pool, line, endp - line); if (line[0]) { const char *word; word = ap_getword_conf(cmd->temp_pool, &line); if (*word) { function = apr_pstrdup(cmd->pool, word); } word = ap_getword_conf(cmd->temp_pool, &line); if (*word) { if (!strcasecmp("early", word)) { when = AP_LUA_HOOK_FIRST; } else if (!strcasecmp("late", word)) { when = AP_LUA_HOOK_LAST; } else { return apr_pstrcat(cmd->pool, cmd->cmd->name, "> 2nd argument must be 'early' or 'late'", NULL); } } } spec = apr_pcalloc(cmd->pool, sizeof(ap_lua_mapped_handler_spec)); { cr_ctx ctx; lua_State *lvm; char *tmp; int rv; ap_directive_t **current; hack_section_baton *baton; spec->file_name = apr_psprintf(cmd->pool, "%s:%u", cmd->config_file->name, cmd->config_file->line_number); if (function) { spec->function_name = (char *) function; } else { function = NULL; } ctx.cmd = cmd; tmp = apr_pstrdup(cmd->pool, cmd->err_directive->directive + 1); ap_str_tolower(tmp); ctx.endstr = tmp; ctx.cfp = cmd->config_file; ctx.startline = cmd->config_file->line_number; /* This lua State is used only to compile the input strings -> bytecode, so we don't need anything extra. */ lvm = luaL_newstate(); lua_settop(lvm, 0); rv = lua_load(lvm, direct_chunkreader, &ctx, spec->file_name); if (rv != 0) { const char *errstr = apr_pstrcat(cmd->pool, "Lua Error:", lua_tostring(lvm, -1), NULL); lua_close(lvm); return errstr; } else { luaL_Buffer b; luaL_buffinit(lvm, &b); lua_dump(lvm, ldump_writer, &b); luaL_pushresult(&b); spec->bytecode_len = lua_strlen(lvm, -1); spec->bytecode = apr_pstrmemdup(cmd->pool, lua_tostring(lvm, -1), spec->bytecode_len); lua_close(lvm); } current = mconfig; /* Here, we have to replace our current config node for the next pass */ if (!*current) { *current = apr_pcalloc(cmd->pool, sizeof(**current)); } baton = apr_pcalloc(cmd->pool, sizeof(hack_section_baton)); baton->name = name; baton->spec = spec; baton->apr_hook_when = when; (*current)->filename = cmd->config_file->name; (*current)->line_num = cmd->config_file->line_number; (*current)->directive = apr_pstrdup(cmd->pool, "Lua_____ByteCodeHack"); (*current)->args = NULL; (*current)->data = baton; } return NULL; } static const char *register_named_file_function_hook(const char *name, cmd_parms *cmd, void *_cfg, const char *file, const char *function, int apr_hook_when) { ap_lua_mapped_handler_spec *spec; ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg; const char *key = apr_psprintf(cmd->pool, "%s_%d", name, apr_hook_when); apr_array_header_t *hook_specs = apr_hash_get(cfg->hooks, key, APR_HASH_KEY_STRING); if (!hook_specs) { hook_specs = apr_array_make(cmd->pool, 2, sizeof(ap_lua_mapped_handler_spec *)); apr_hash_set(cfg->hooks, key, APR_HASH_KEY_STRING, hook_specs); } spec = apr_pcalloc(cmd->pool, sizeof(ap_lua_mapped_handler_spec)); spec->file_name = apr_pstrdup(cmd->pool, file); spec->function_name = apr_pstrdup(cmd->pool, function); spec->scope = cfg->vm_scope; *(ap_lua_mapped_handler_spec **) apr_array_push(hook_specs) = spec; return NULL; } static const char *register_mapped_file_function_hook(const char *pattern, cmd_parms *cmd, void *_cfg, const char *file, const char *function) { ap_lua_mapped_handler_spec *spec; ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg; ap_regex_t *regex = apr_pcalloc(cmd->pool, sizeof(ap_regex_t)); if (ap_regcomp(regex, pattern,0)) { return "Invalid regex pattern!"; } spec = apr_pcalloc(cmd->pool, sizeof(ap_lua_mapped_handler_spec)); spec->file_name = apr_pstrdup(cmd->pool, file); spec->function_name = apr_pstrdup(cmd->pool, function); spec->scope = cfg->vm_scope; spec->uri_pattern = regex; *(ap_lua_mapped_handler_spec **) apr_array_push(cfg->mapped_handlers) = spec; return NULL; } static const char *register_filter_function_hook(const char *filter, cmd_parms *cmd, void *_cfg, const char *file, const char *function, int direction) { ap_lua_filter_handler_spec *spec; ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg; spec = apr_pcalloc(cmd->pool, sizeof(ap_lua_filter_handler_spec)); spec->file_name = apr_pstrdup(cmd->pool, file); spec->function_name = apr_pstrdup(cmd->pool, function); spec->filter_name = filter; *(ap_lua_filter_handler_spec **) apr_array_push(cfg->mapped_filters) = spec; /* TODO: Make it work on other types than just AP_FTYPE_RESOURCE? */ if (direction == AP_LUA_FILTER_OUTPUT) { spec->direction = AP_LUA_FILTER_OUTPUT; ap_register_output_filter_protocol(filter, lua_output_filter_handle, NULL, AP_FTYPE_RESOURCE, AP_FILTER_PROTO_CHANGE|AP_FILTER_PROTO_CHANGE_LENGTH); } else { spec->direction = AP_LUA_FILTER_INPUT; ap_register_input_filter(filter, lua_input_filter_handle, NULL, AP_FTYPE_RESOURCE); } return NULL; } /* disabled (see reference below) static int lua_check_user_id_harness_first(request_rec *r) { return lua_request_rec_hook_harness(r, "check_user_id", AP_LUA_HOOK_FIRST); } */ static int lua_check_user_id_harness(request_rec *r) { return lua_request_rec_hook_harness(r, "check_user_id", APR_HOOK_MIDDLE); } /* disabled (see reference below) static int lua_check_user_id_harness_last(request_rec *r) { return lua_request_rec_hook_harness(r, "check_user_id", AP_LUA_HOOK_LAST); } */ static int lua_translate_name_harness_first(request_rec *r) { return lua_request_rec_hook_harness(r, "translate_name", AP_LUA_HOOK_FIRST); } static int lua_translate_name_harness(request_rec *r) { return lua_request_rec_hook_harness(r, "translate_name", APR_HOOK_MIDDLE); } static int lua_translate_name_harness_last(request_rec *r) { return lua_request_rec_hook_harness(r, "translate_name", AP_LUA_HOOK_LAST); } static int lua_fixup_harness(request_rec *r) { return lua_request_rec_hook_harness(r, "fixups", APR_HOOK_MIDDLE); } static int lua_map_to_storage_harness(request_rec *r) { return lua_request_rec_hook_harness(r, "map_to_storage", APR_HOOK_MIDDLE); } static int lua_type_checker_harness(request_rec *r) { return lua_request_rec_hook_harness(r, "type_checker", APR_HOOK_MIDDLE); } static int lua_access_checker_harness_first(request_rec *r) { return lua_request_rec_hook_harness(r, "access_checker", AP_LUA_HOOK_FIRST); } static int lua_access_checker_harness(request_rec *r) { return lua_request_rec_hook_harness(r, "access_checker", APR_HOOK_MIDDLE); } static int lua_access_checker_harness_last(request_rec *r) { return lua_request_rec_hook_harness(r, "access_checker", AP_LUA_HOOK_LAST); } static int lua_auth_checker_harness_first(request_rec *r) { return lua_request_rec_hook_harness(r, "auth_checker", AP_LUA_HOOK_FIRST); } static int lua_auth_checker_harness(request_rec *r) { return lua_request_rec_hook_harness(r, "auth_checker", APR_HOOK_MIDDLE); } static int lua_auth_checker_harness_last(request_rec *r) { return lua_request_rec_hook_harness(r, "auth_checker", AP_LUA_HOOK_LAST); } static void lua_insert_filter_harness(request_rec *r) { /* ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, "LuaHookInsertFilter not yet implemented"); */ } static int lua_log_transaction_harness(request_rec *r) { return lua_request_rec_hook_harness(r, "log_transaction", APR_HOOK_FIRST); } static int lua_quick_harness(request_rec *r, int lookup) { if (lookup) { return DECLINED; } return lua_request_rec_hook_harness(r, "quick", APR_HOOK_MIDDLE); } static const char *register_translate_name_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function, const char *when) { const char *err = ap_check_cmd_context(cmd, NOT_IN_DIRECTORY|NOT_IN_FILES| NOT_IN_HTACCESS); int apr_hook_when = APR_HOOK_MIDDLE; if (err) { return err; } if (when) { if (!strcasecmp(when, "early")) { apr_hook_when = AP_LUA_HOOK_FIRST; } else if (!strcasecmp(when, "late")) { apr_hook_when = AP_LUA_HOOK_LAST; } else { return "Third argument must be 'early' or 'late'"; } } return register_named_file_function_hook("translate_name", cmd, _cfg, file, function, apr_hook_when); } static const char *register_translate_name_block(cmd_parms *cmd, void *_cfg, const char *line) { return register_named_block_function_hook("translate_name", cmd, _cfg, line); } static const char *register_fixups_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function) { return register_named_file_function_hook("fixups", cmd, _cfg, file, function, APR_HOOK_MIDDLE); } static const char *register_fixups_block(cmd_parms *cmd, void *_cfg, const char *line) { return register_named_block_function_hook("fixups", cmd, _cfg, line); } static const char *register_map_to_storage_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function) { return register_named_file_function_hook("map_to_storage", cmd, _cfg, file, function, APR_HOOK_MIDDLE); } static const char *register_log_transaction_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function) { return register_named_file_function_hook("log_transaction", cmd, _cfg, file, function, APR_HOOK_FIRST); } static const char *register_map_to_storage_block(cmd_parms *cmd, void *_cfg, const char *line) { return register_named_block_function_hook("map_to_storage", cmd, _cfg, line); } static const char *register_check_user_id_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function, const char *when) { int apr_hook_when = APR_HOOK_MIDDLE; /* XXX: This does not currently work!! if (when) { if (!strcasecmp(when, "early")) { apr_hook_when = AP_LUA_HOOK_FIRST; } else if (!strcasecmp(when, "late")) { apr_hook_when = AP_LUA_HOOK_LAST; } else { return "Third argument must be 'early' or 'late'"; } } */ return register_named_file_function_hook("check_user_id", cmd, _cfg, file, function, apr_hook_when); } static const char *register_check_user_id_block(cmd_parms *cmd, void *_cfg, const char *line) { return register_named_block_function_hook("check_user_id", cmd, _cfg, line); } static const char *register_type_checker_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function) { return register_named_file_function_hook("type_checker", cmd, _cfg, file, function, APR_HOOK_MIDDLE); } static const char *register_type_checker_block(cmd_parms *cmd, void *_cfg, const char *line) { return register_named_block_function_hook("type_checker", cmd, _cfg, line); } static const char *register_access_checker_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function, const char *when) { int apr_hook_when = APR_HOOK_MIDDLE; if (when) { if (!strcasecmp(when, "early")) { apr_hook_when = AP_LUA_HOOK_FIRST; } else if (!strcasecmp(when, "late")) { apr_hook_when = AP_LUA_HOOK_LAST; } else { return "Third argument must be 'early' or 'late'"; } } return register_named_file_function_hook("access_checker", cmd, _cfg, file, function, apr_hook_when); } static const char *register_access_checker_block(cmd_parms *cmd, void *_cfg, const char *line) { return register_named_block_function_hook("access_checker", cmd, _cfg, line); } static const char *register_auth_checker_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function, const char *when) { int apr_hook_when = APR_HOOK_MIDDLE; if (when) { if (!strcasecmp(when, "early")) { apr_hook_when = AP_LUA_HOOK_FIRST; } else if (!strcasecmp(when, "late")) { apr_hook_when = AP_LUA_HOOK_LAST; } else { return "Third argument must be 'early' or 'late'"; } } return register_named_file_function_hook("auth_checker", cmd, _cfg, file, function, apr_hook_when); } static const char *register_auth_checker_block(cmd_parms *cmd, void *_cfg, const char *line) { return register_named_block_function_hook("auth_checker", cmd, _cfg, line); } static const char *register_insert_filter_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function) { return "LuaHookInsertFilter not yet implemented"; } static const char *register_quick_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function) { const char *err = ap_check_cmd_context(cmd, NOT_IN_DIRECTORY|NOT_IN_FILES| NOT_IN_HTACCESS); if (err) { return err; } return register_named_file_function_hook("quick", cmd, _cfg, file, function, APR_HOOK_MIDDLE); } static const char *register_map_handler(cmd_parms *cmd, void *_cfg, const char* match, const char *file, const char *function) { const char *err = ap_check_cmd_context(cmd, NOT_IN_DIRECTORY|NOT_IN_FILES| NOT_IN_HTACCESS); if (err) { return err; } if (!function) function = "handle"; return register_mapped_file_function_hook(match, cmd, _cfg, file, function); } static const char *register_output_filter(cmd_parms *cmd, void *_cfg, const char* filter, const char *file, const char *function) { const char *err = ap_check_cmd_context(cmd, NOT_IN_DIRECTORY|NOT_IN_FILES| NOT_IN_HTACCESS); if (err) { return err; } if (!function) function = "handle"; return register_filter_function_hook(filter, cmd, _cfg, file, function, AP_LUA_FILTER_OUTPUT); } static const char *register_input_filter(cmd_parms *cmd, void *_cfg, const char* filter, const char *file, const char *function) { const char *err = ap_check_cmd_context(cmd, NOT_IN_DIRECTORY|NOT_IN_FILES| NOT_IN_HTACCESS); if (err) { return err; } if (!function) function = "handle"; return register_filter_function_hook(filter, cmd, _cfg, file, function, AP_LUA_FILTER_INPUT); } static const char *register_quick_block(cmd_parms *cmd, void *_cfg, const char *line) { return register_named_block_function_hook("quick", cmd, _cfg, line); } static const char *register_package_helper(cmd_parms *cmd, const char *arg, apr_array_header_t *dir_array) { apr_status_t rv; ap_lua_server_cfg *server_cfg = ap_get_module_config(cmd->server->module_config, &lua_module); char *fixed_filename; rv = apr_filepath_merge(&fixed_filename, server_cfg->root_path, arg, APR_FILEPATH_NOTRELATIVE, cmd->pool); if (rv != APR_SUCCESS) { return apr_psprintf(cmd->pool, "Unable to build full path to file, %s", arg); } *(const char **) apr_array_push(dir_array) = fixed_filename; return NULL; } /** * Called for config directive which looks like * LuaPackagePath /lua/package/path/mapped/thing/like/this/?.lua */ static const char *register_package_dir(cmd_parms *cmd, void *_cfg, const char *arg) { ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg; return register_package_helper(cmd, arg, cfg->package_paths); } /** * Called for config directive which looks like * LuaPackageCPath /lua/package/path/mapped/thing/like/this/?.so */ static const char *register_package_cdir(cmd_parms *cmd, void *_cfg, const char *arg) { ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg; return register_package_helper(cmd, arg, cfg->package_cpaths); } static const char *register_lua_inherit(cmd_parms *cmd, void *_cfg, const char *arg) { ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg; if (strcasecmp("none", arg) == 0) { cfg->inherit = AP_LUA_INHERIT_NONE; } else if (strcasecmp("parent-first", arg) == 0) { cfg->inherit = AP_LUA_INHERIT_PARENT_FIRST; } else if (strcasecmp("parent-last", arg) == 0) { cfg->inherit = AP_LUA_INHERIT_PARENT_LAST; } else { return apr_psprintf(cmd->pool, "LuaInherit type of '%s' not recognized, valid " "options are 'none', 'parent-first', and 'parent-last'", arg); } return NULL; } static const char *register_lua_codecache(cmd_parms *cmd, void *_cfg, const char *arg) { ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg; if (strcasecmp("never", arg) == 0) { cfg->codecache = AP_LUA_CACHE_NEVER; } else if (strcasecmp("stat", arg) == 0) { cfg->codecache = AP_LUA_CACHE_STAT; } else if (strcasecmp("forever", arg) == 0) { cfg->codecache = AP_LUA_CACHE_FOREVER; } else { return apr_psprintf(cmd->pool, "LuaCodeCache type of '%s' not recognized, valid " "options are 'never', 'stat', and 'forever'", arg); } return NULL; } static const char *register_lua_scope(cmd_parms *cmd, void *_cfg, const char *scope, const char *min, const char *max) { ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg; if (strcmp("once", scope) == 0) { cfg->vm_scope = AP_LUA_SCOPE_ONCE; } else if (strcmp("request", scope) == 0) { cfg->vm_scope = AP_LUA_SCOPE_REQUEST; } else if (strcmp("conn", scope) == 0) { cfg->vm_scope = AP_LUA_SCOPE_CONN; } else if (strcmp("thread", scope) == 0) { #if !APR_HAS_THREADS return apr_psprintf(cmd->pool, "Scope type of '%s' cannot be used because this " "server does not have threading support " "(APR_HAS_THREADS)" scope); #endif cfg->vm_scope = AP_LUA_SCOPE_THREAD; } else if (strcmp("server", scope) == 0) { unsigned int vmin, vmax; #if !APR_HAS_THREADS return apr_psprintf(cmd->pool, "Scope type of '%s' cannot be used because this " "server does not have threading support " "(APR_HAS_THREADS)" scope); #endif cfg->vm_scope = AP_LUA_SCOPE_SERVER; vmin = min ? atoi(min) : 1; vmax = max ? atoi(max) : 1; if (vmin == 0) { vmin = 1; } if (vmax < vmin) { vmax = vmin; } cfg->vm_min = vmin; cfg->vm_max = vmax; } else { return apr_psprintf(cmd->pool, "Invalid value for LuaScope, '%s', acceptable " "values are: 'once', 'request', 'conn'" #if APR_HAS_THREADS ", 'thread', 'server'" #endif ,scope); } return NULL; } static const char *register_lua_root(cmd_parms *cmd, void *_cfg, const char *root) { /* ap_lua_dir_cfg* cfg = (ap_lua_dir_cfg*)_cfg; */ ap_lua_server_cfg *cfg = ap_get_module_config(cmd->server->module_config, &lua_module); cfg->root_path = root; return NULL; } const char *ap_lua_ssl_val(apr_pool_t *p, server_rec *s, conn_rec *c, request_rec *r, const char *var) { if (lua_ssl_val) { return (const char *)lua_ssl_val(p, s, c, r, (char *)var); } return NULL; } int ap_lua_ssl_is_https(conn_rec *c) { return lua_ssl_is_https ? lua_ssl_is_https(c) : 0; } /*******************************/ static const char *lua_authz_parse(cmd_parms *cmd, const char *require_line, const void **parsed_require_line) { const char *provider_name; lua_authz_provider_spec *spec; apr_pool_userdata_get((void**)&provider_name, AUTHZ_PROVIDER_NAME_NOTE, cmd->temp_pool); ap_assert(provider_name != NULL); spec = apr_hash_get(lua_authz_providers, provider_name, APR_HASH_KEY_STRING); ap_assert(spec != NULL); if (require_line && *require_line) { const char *arg; spec->args = apr_array_make(cmd->pool, 2, sizeof(const char *)); while ((arg = ap_getword_conf(cmd->pool, &require_line)) && *arg) { APR_ARRAY_PUSH(spec->args, const char *) = arg; } } *parsed_require_line = spec; return NULL; } static authz_status lua_authz_check(request_rec *r, const char *require_line, const void *parsed_require_line) { apr_pool_t *pool; ap_lua_vm_spec *spec; lua_State *L; ap_lua_server_cfg *server_cfg = ap_get_module_config(r->server->module_config, &lua_module); const ap_lua_dir_cfg *cfg = ap_get_module_config(r->per_dir_config, &lua_module); const lua_authz_provider_spec *prov_spec = parsed_require_line; int result; int nargs = 0; spec = create_vm_spec(&pool, r, cfg, server_cfg, prov_spec->file_name, NULL, 0, prov_spec->function_name, "authz provider"); L = ap_lua_get_lua_state(pool, spec, r); if (L == NULL) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02314) "Unable to compile VM for authz provider %s", prov_spec->name); return AUTHZ_GENERAL_ERROR; } lua_getglobal(L, prov_spec->function_name); if (!lua_isfunction(L, -1)) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(02319) "Unable to find entry function '%s' in %s (not a valid function)", prov_spec->function_name, prov_spec->file_name); ap_lua_release_state(L, spec, r); return AUTHZ_GENERAL_ERROR; } ap_lua_run_lua_request(L, r); if (prov_spec->args) { int i; if (!lua_checkstack(L, prov_spec->args->nelts)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02315) "Error: authz provider %s: too many arguments", prov_spec->name); ap_lua_release_state(L, spec, r); return AUTHZ_GENERAL_ERROR; } for (i = 0; i < prov_spec->args->nelts; i++) { const char *arg = APR_ARRAY_IDX(prov_spec->args, i, const char *); lua_pushstring(L, arg); } nargs = prov_spec->args->nelts; } if (lua_pcall(L, 1 + nargs, 1, 0)) { const char *err = lua_tostring(L, -1); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02316) "Error executing authz provider %s: %s", prov_spec->name, err); ap_lua_release_state(L, spec, r); return AUTHZ_GENERAL_ERROR; } if (!lua_isnumber(L, -1)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02317) "Error: authz provider %s did not return integer", prov_spec->name); ap_lua_release_state(L, spec, r); return AUTHZ_GENERAL_ERROR; } result = lua_tointeger(L, -1); ap_lua_release_state(L, spec, r); switch (result) { case AUTHZ_DENIED: case AUTHZ_GRANTED: case AUTHZ_NEUTRAL: case AUTHZ_GENERAL_ERROR: case AUTHZ_DENIED_NO_USER: return result; default: ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02318) "Error: authz provider %s: invalid return value %d", prov_spec->name, result); } return AUTHZ_GENERAL_ERROR; } static const authz_provider lua_authz_provider = { &lua_authz_check, &lua_authz_parse, }; static const char *register_authz_provider(cmd_parms *cmd, void *_cfg, const char *name, const char *file, const char *function) { lua_authz_provider_spec *spec; const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (err) return err; spec = apr_pcalloc(cmd->pool, sizeof(*spec)); spec->name = name; spec->file_name = file; spec->function_name = function; apr_hash_set(lua_authz_providers, name, APR_HASH_KEY_STRING, spec); ap_register_auth_provider(cmd->pool, AUTHZ_PROVIDER_GROUP, name, AUTHZ_PROVIDER_VERSION, &lua_authz_provider, AP_AUTH_INTERNAL_PER_CONF); return NULL; } command_rec lua_commands[] = { AP_INIT_TAKE1("LuaRoot", register_lua_root, NULL, OR_ALL, "Specify the base path for resolving relative paths for mod_lua directives"), AP_INIT_TAKE1("LuaPackagePath", register_package_dir, NULL, OR_ALL, "Add a directory to lua's package.path"), AP_INIT_TAKE1("LuaPackageCPath", register_package_cdir, NULL, OR_ALL, "Add a directory to lua's package.cpath"), AP_INIT_TAKE3("LuaAuthzProvider", register_authz_provider, NULL, RSRC_CONF|EXEC_ON_READ, "Provide an authorization provider"), AP_INIT_TAKE23("LuaHookTranslateName", register_translate_name_hook, NULL, OR_ALL, "Provide a hook for the translate name phase of request processing"), AP_INIT_RAW_ARGS("<LuaHookTranslateName", register_translate_name_block, NULL, EXEC_ON_READ | OR_ALL, "Provide a hook for the translate name phase of request processing"), AP_INIT_TAKE2("LuaHookFixups", register_fixups_hook, NULL, OR_ALL, "Provide a hook for the fixups phase of request processing"), AP_INIT_RAW_ARGS("<LuaHookFixups", register_fixups_block, NULL, EXEC_ON_READ | OR_ALL, "Provide a inline hook for the fixups phase of request processing"), /* todo: test */ AP_INIT_TAKE2("LuaHookMapToStorage", register_map_to_storage_hook, NULL, OR_ALL, "Provide a hook for the map_to_storage phase of request processing"), AP_INIT_RAW_ARGS("<LuaHookMapToStorage", register_map_to_storage_block, NULL, EXEC_ON_READ | OR_ALL, "Provide a hook for the map_to_storage phase of request processing"), /* todo: test */ AP_INIT_TAKE23("LuaHookCheckUserID", register_check_user_id_hook, NULL, OR_ALL, "Provide a hook for the check_user_id phase of request processing"), AP_INIT_RAW_ARGS("<LuaHookCheckUserID", register_check_user_id_block, NULL, EXEC_ON_READ | OR_ALL, "Provide a hook for the check_user_id phase of request processing"), /* todo: test */ AP_INIT_TAKE2("LuaHookTypeChecker", register_type_checker_hook, NULL, OR_ALL, "Provide a hook for the type_checker phase of request processing"), AP_INIT_RAW_ARGS("<LuaHookTypeChecker", register_type_checker_block, NULL, EXEC_ON_READ | OR_ALL, "Provide a hook for the type_checker phase of request processing"), /* todo: test */ AP_INIT_TAKE23("LuaHookAccessChecker", register_access_checker_hook, NULL, OR_ALL, "Provide a hook for the access_checker phase of request processing"), AP_INIT_RAW_ARGS("<LuaHookAccessChecker", register_access_checker_block, NULL, EXEC_ON_READ | OR_ALL, "Provide a hook for the access_checker phase of request processing"), /* todo: test */ AP_INIT_TAKE23("LuaHookAuthChecker", register_auth_checker_hook, NULL, OR_ALL, "Provide a hook for the auth_checker phase of request processing"), AP_INIT_RAW_ARGS("<LuaHookAuthChecker", register_auth_checker_block, NULL, EXEC_ON_READ | OR_ALL, "Provide a hook for the auth_checker phase of request processing"), /* todo: test */ AP_INIT_TAKE2("LuaHookInsertFilter", register_insert_filter_hook, NULL, OR_ALL, "Provide a hook for the insert_filter phase of request processing"), AP_INIT_TAKE2("LuaHookLog", register_log_transaction_hook, NULL, OR_ALL, "Provide a hook for the logging phase of request processing"), AP_INIT_TAKE123("LuaScope", register_lua_scope, NULL, OR_ALL, "One of once, request, conn, server -- default is once"), AP_INIT_TAKE1("LuaInherit", register_lua_inherit, NULL, OR_ALL, "Controls how Lua scripts in parent contexts are merged with the current " " context: none|parent-last|parent-first (default: parent-first) "), AP_INIT_TAKE1("LuaCodeCache", register_lua_codecache, NULL, OR_ALL, "Controls the behavior of the in-memory code cache " " context: stat|forever|never (default: stat) "), AP_INIT_TAKE2("LuaQuickHandler", register_quick_hook, NULL, OR_ALL, "Provide a hook for the quick handler of request processing"), AP_INIT_RAW_ARGS("<LuaQuickHandler", register_quick_block, NULL, EXEC_ON_READ | OR_ALL, "Provide a hook for the quick handler of request processing"), AP_INIT_RAW_ARGS("Lua_____ByteCodeHack", hack_section_handler, NULL, OR_ALL, "(internal) Byte code handler"), AP_INIT_TAKE23("LuaMapHandler", register_map_handler, NULL, OR_ALL, "Maps a path to a lua handler"), AP_INIT_TAKE3("LuaOutputFilter", register_output_filter, NULL, OR_ALL, "Registers a Lua function as an output filter"), AP_INIT_TAKE3("LuaInputFilter", register_input_filter, NULL, OR_ALL, "Registers a Lua function as an input filter"), {NULL} }; static void *create_dir_config(apr_pool_t *p, char *dir) { ap_lua_dir_cfg *cfg = apr_pcalloc(p, sizeof(ap_lua_dir_cfg)); cfg->package_paths = apr_array_make(p, 2, sizeof(char *)); cfg->package_cpaths = apr_array_make(p, 2, sizeof(char *)); cfg->mapped_handlers = apr_array_make(p, 1, sizeof(ap_lua_mapped_handler_spec *)); cfg->mapped_filters = apr_array_make(p, 1, sizeof(ap_lua_filter_handler_spec *)); cfg->pool = p; cfg->hooks = apr_hash_make(p); cfg->dir = apr_pstrdup(p, dir); cfg->vm_scope = AP_LUA_SCOPE_UNSET; cfg->codecache = AP_LUA_CACHE_UNSET; cfg->vm_min = 0; cfg->vm_max = 0; return cfg; } static int create_request_config(request_rec *r) { ap_lua_request_cfg *cfg = apr_palloc(r->pool, sizeof(ap_lua_request_cfg)); cfg->mapped_request_details = NULL; cfg->request_scoped_vms = apr_hash_make(r->pool); ap_set_module_config(r->request_config, &lua_module, cfg); return OK; } static void *create_server_config(apr_pool_t *p, server_rec *s) { ap_lua_server_cfg *cfg = apr_pcalloc(p, sizeof(ap_lua_server_cfg)); cfg->vm_reslists = apr_hash_make(p); apr_thread_rwlock_create(&cfg->vm_reslists_lock, p); cfg->root_path = NULL; return cfg; } static int lua_request_hook(lua_State *L, request_rec *r) { ap_lua_push_request(L, r); return OK; } static int lua_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp) { ap_mutex_register(pconf, "lua-ivm-shm", NULL, APR_LOCK_DEFAULT, 0); return OK; } static int lua_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { apr_pool_t **pool; const char *tempdir; apr_status_t rs; lua_ssl_val = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); lua_ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) return OK; /* Create ivm mutex */ rs = ap_global_mutex_create(&lua_ivm_mutex, NULL, "lua-ivm-shm", NULL, s, pconf, 0); if (APR_SUCCESS != rs) { return HTTP_INTERNAL_SERVER_ERROR; } /* Create shared memory space */ rs = apr_temp_dir_get(&tempdir, pconf); if (rs != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rs, s, APLOGNO(02664) "mod_lua IVM: Failed to find temporary directory"); return HTTP_INTERNAL_SERVER_ERROR; } lua_ivm_shmfile = apr_psprintf(pconf, "%s/httpd_lua_shm.%ld", tempdir, (long int)getpid()); rs = apr_shm_create(&lua_ivm_shm, sizeof(apr_pool_t**), (const char *) lua_ivm_shmfile, pconf); if (rs != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rs, s, APLOGNO(02665) "mod_lua: Failed to create shared memory segment on file %s", lua_ivm_shmfile); return HTTP_INTERNAL_SERVER_ERROR; } pool = (apr_pool_t **)apr_shm_baseaddr_get(lua_ivm_shm); apr_pool_create(pool, pconf); apr_pool_cleanup_register(pconf, NULL, shm_cleanup_wrapper, apr_pool_cleanup_null); return OK; } static void *overlay_hook_specs(apr_pool_t *p, const void *key, apr_ssize_t klen, const void *overlay_val, const void *base_val, const void *data) { const apr_array_header_t *overlay_info = (const apr_array_header_t*)overlay_val; const apr_array_header_t *base_info = (const apr_array_header_t*)base_val; return apr_array_append(p, base_info, overlay_info); } static void *merge_dir_config(apr_pool_t *p, void *basev, void *overridesv) { ap_lua_dir_cfg *a, *base, *overrides; a = (ap_lua_dir_cfg *)apr_pcalloc(p, sizeof(ap_lua_dir_cfg)); base = (ap_lua_dir_cfg*)basev; overrides = (ap_lua_dir_cfg*)overridesv; a->pool = overrides->pool; a->dir = apr_pstrdup(p, overrides->dir); a->vm_scope = (overrides->vm_scope == AP_LUA_SCOPE_UNSET) ? base->vm_scope: overrides->vm_scope; a->inherit = (overrides->inherit == AP_LUA_INHERIT_UNSET) ? base->inherit : overrides->inherit; a->codecache = (overrides->codecache == AP_LUA_CACHE_UNSET) ? base->codecache : overrides->codecache; a->vm_min = (overrides->vm_min == 0) ? base->vm_min : overrides->vm_min; a->vm_max = (overrides->vm_max == 0) ? base->vm_max : overrides->vm_max; if (a->inherit == AP_LUA_INHERIT_UNSET || a->inherit == AP_LUA_INHERIT_PARENT_FIRST) { a->package_paths = apr_array_append(p, base->package_paths, overrides->package_paths); a->package_cpaths = apr_array_append(p, base->package_cpaths, overrides->package_cpaths); a->mapped_handlers = apr_array_append(p, base->mapped_handlers, overrides->mapped_handlers); a->mapped_filters = apr_array_append(p, base->mapped_filters, overrides->mapped_filters); a->hooks = apr_hash_merge(p, overrides->hooks, base->hooks, overlay_hook_specs, NULL); } else if (a->inherit == AP_LUA_INHERIT_PARENT_LAST) { a->package_paths = apr_array_append(p, overrides->package_paths, base->package_paths); a->package_cpaths = apr_array_append(p, overrides->package_cpaths, base->package_cpaths); a->mapped_handlers = apr_array_append(p, overrides->mapped_handlers, base->mapped_handlers); a->mapped_filters = apr_array_append(p, overrides->mapped_filters, base->mapped_filters); a->hooks = apr_hash_merge(p, base->hooks, overrides->hooks, overlay_hook_specs, NULL); } else { a->package_paths = overrides->package_paths; a->package_cpaths = overrides->package_cpaths; a->mapped_handlers= overrides->mapped_handlers; a->mapped_filters= overrides->mapped_filters; a->hooks= overrides->hooks; } return a; } static void lua_register_hooks(apr_pool_t *p) { /* ap_register_output_filter("luahood", luahood, NULL, AP_FTYPE_RESOURCE); */ ap_hook_handler(lua_handler, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_create_request(create_request_config, NULL, NULL, APR_HOOK_MIDDLE); /* http_request.h hooks */ ap_hook_translate_name(lua_translate_name_harness_first, NULL, NULL, AP_LUA_HOOK_FIRST); ap_hook_translate_name(lua_translate_name_harness, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_translate_name(lua_translate_name_harness_last, NULL, NULL, AP_LUA_HOOK_LAST); ap_hook_fixups(lua_fixup_harness, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_map_to_storage(lua_map_to_storage_harness, NULL, NULL, APR_HOOK_MIDDLE); /* XXX: Does not work :( * ap_hook_check_user_id(lua_check_user_id_harness_first, NULL, NULL, AP_LUA_HOOK_FIRST); */ ap_hook_check_user_id(lua_check_user_id_harness, NULL, NULL, APR_HOOK_MIDDLE); /* XXX: Does not work :( * ap_hook_check_user_id(lua_check_user_id_harness_last, NULL, NULL, AP_LUA_HOOK_LAST); */ ap_hook_type_checker(lua_type_checker_harness, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_access_checker(lua_access_checker_harness_first, NULL, NULL, AP_LUA_HOOK_FIRST); ap_hook_access_checker(lua_access_checker_harness, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_access_checker(lua_access_checker_harness_last, NULL, NULL, AP_LUA_HOOK_LAST); ap_hook_auth_checker(lua_auth_checker_harness_first, NULL, NULL, AP_LUA_HOOK_FIRST); ap_hook_auth_checker(lua_auth_checker_harness, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_auth_checker(lua_auth_checker_harness_last, NULL, NULL, AP_LUA_HOOK_LAST); ap_hook_insert_filter(lua_insert_filter_harness, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_quick_handler(lua_quick_harness, NULL, NULL, APR_HOOK_FIRST); ap_hook_post_config(lua_post_config, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_pre_config(lua_pre_config, NULL, NULL, APR_HOOK_MIDDLE); APR_OPTIONAL_HOOK(ap_lua, lua_open, lua_open_hook, NULL, NULL, APR_HOOK_REALLY_FIRST); APR_OPTIONAL_HOOK(ap_lua, lua_request, lua_request_hook, NULL, NULL, APR_HOOK_REALLY_FIRST); ap_hook_handler(lua_map_handler, NULL, NULL, AP_LUA_HOOK_FIRST); /* Hook this right before FallbackResource kicks in */ ap_hook_fixups(lua_map_handler_fixups, NULL, NULL, AP_LUA_HOOK_LAST-2); #if APR_HAS_THREADS ap_hook_child_init(ap_lua_init_mutex, NULL, NULL, APR_HOOK_MIDDLE); #endif /* providers */ lua_authz_providers = apr_hash_make(p); /* Logging catcher */ ap_hook_log_transaction(lua_log_transaction_harness,NULL,NULL, APR_HOOK_FIRST); } AP_DECLARE_MODULE(lua) = { STANDARD20_MODULE_STUFF, create_dir_config, /* create per-dir config structures */ merge_dir_config, /* merge per-dir config structures */ create_server_config, /* create per-server config structures */ NULL, /* merge per-server config structures */ lua_commands, /* table of config file commands */ lua_register_hooks /* register hooks */ };
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2292_2
crossvul-cpp_data_bad_1504_0
/* * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <crm_internal.h> #include <sys/param.h> #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <time.h> #include <string.h> #include <dirent.h> #include <stdlib.h> #include <errno.h> #include <fcntl.h> #include <ctype.h> #include <math.h> #include <crm/crm.h> #include <crm/msg_xml.h> #include <crm/common/xml.h> #include <libxml/xmlreader.h> #if HAVE_BZLIB_H # include <bzlib.h> #endif #if HAVE_LIBXML2 # include <libxml/parser.h> # include <libxml/tree.h> # include <libxml/relaxng.h> #endif #if HAVE_LIBXSLT # include <libxslt/xslt.h> # include <libxslt/transform.h> #endif #define XML_BUFFER_SIZE 4096 #define XML_PARSER_DEBUG 0 void xml_log(int priority, const char *fmt, ...) G_GNUC_PRINTF(2, 3); static inline int __get_prefix(const char *prefix, xmlNode *xml, char *buffer, int offset); void xml_log(int priority, const char *fmt, ...) { va_list ap; va_start(ap, fmt); qb_log_from_external_source_va(__FUNCTION__, __FILE__, fmt, priority, __LINE__, 0, ap); va_end(ap); } typedef struct { xmlRelaxNGPtr rng; xmlRelaxNGValidCtxtPtr valid; xmlRelaxNGParserCtxtPtr parser; } relaxng_ctx_cache_t; struct schema_s { int type; float version; char *name; char *location; char *transform; int after_transform; void *cache; }; typedef struct { int found; const char *string; } filter_t; enum xml_private_flags { xpf_none = 0x0000, xpf_dirty = 0x0001, xpf_deleted = 0x0002, xpf_created = 0x0004, xpf_modified = 0x0008, xpf_tracking = 0x0010, xpf_processed = 0x0020, xpf_skip = 0x0040, xpf_moved = 0x0080, xpf_acl_enabled = 0x0100, xpf_acl_read = 0x0200, xpf_acl_write = 0x0400, xpf_acl_deny = 0x0800, xpf_acl_create = 0x1000, xpf_acl_denied = 0x2000, }; typedef struct xml_private_s { long check; uint32_t flags; char *user; GListPtr acls; GListPtr deleted_paths; } xml_private_t; typedef struct xml_acl_s { enum xml_private_flags mode; char *xpath; } xml_acl_t; /* *INDENT-OFF* */ static filter_t filter[] = { { 0, XML_ATTR_ORIGIN }, { 0, XML_CIB_ATTR_WRITTEN }, { 0, XML_ATTR_UPDATE_ORIG }, { 0, XML_ATTR_UPDATE_CLIENT }, { 0, XML_ATTR_UPDATE_USER }, }; /* *INDENT-ON* */ static struct schema_s *known_schemas = NULL; static int xml_schema_max = 0; static xmlNode *subtract_xml_comment(xmlNode * parent, xmlNode * left, xmlNode * right, gboolean * changed); static xmlNode *find_xml_comment(xmlNode * root, xmlNode * search_comment); static int add_xml_comment(xmlNode * parent, xmlNode * target, xmlNode * update); static bool __xml_acl_check(xmlNode *xml, const char *name, enum xml_private_flags mode); const char *__xml_acl_to_text(enum xml_private_flags flags); static int xml_latest_schema_index(void) { return xml_schema_max - 4; } static int xml_minimum_schema_index(void) { static int best = 0; if(best == 0) { int lpc = 0; float target = 0.0; best = xml_latest_schema_index(); target = floor(known_schemas[best].version); for(lpc = best; lpc > 0; lpc--) { if(known_schemas[lpc].version < target) { return best; } else { best = lpc; } } best = xml_latest_schema_index(); } return best; } const char * xml_latest_schema(void) { return get_schema_name(xml_latest_schema_index()); } #define CHUNK_SIZE 1024 static inline bool TRACKING_CHANGES(xmlNode *xml) { if(xml == NULL || xml->doc == NULL || xml->doc->_private == NULL) { return FALSE; } else if(is_set(((xml_private_t *)xml->doc->_private)->flags, xpf_tracking)) { return TRUE; } return FALSE; } #define buffer_print(buffer, max, offset, fmt, args...) do { \ int rc = (max); \ if(buffer) { \ rc = snprintf((buffer) + (offset), (max) - (offset), fmt, ##args); \ } \ if(buffer && rc < 0) { \ crm_perror(LOG_ERR, "snprintf failed at offset %d", offset); \ (buffer)[(offset)] = 0; \ } else if(rc >= ((max) - (offset))) { \ char *tmp = NULL; \ (max) = QB_MAX(CHUNK_SIZE, (max) * 2); \ tmp = realloc_safe((buffer), (max) + 1); \ CRM_ASSERT(tmp); \ (buffer) = tmp; \ } else { \ offset += rc; \ break; \ } \ } while(1); static void insert_prefix(int options, char **buffer, int *offset, int *max, int depth) { if (options & xml_log_option_formatted) { size_t spaces = 2 * depth; if ((*buffer) == NULL || spaces >= ((*max) - (*offset))) { (*max) = QB_MAX(CHUNK_SIZE, (*max) * 2); (*buffer) = realloc_safe((*buffer), (*max) + 1); } memset((*buffer) + (*offset), ' ', spaces); (*offset) += spaces; } } static const char * get_schema_root(void) { static const char *base = NULL; if (base == NULL) { base = getenv("PCMK_schema_directory"); } if (base == NULL || strlen(base) == 0) { base = CRM_DTD_DIRECTORY; } return base; } static char * get_schema_path(const char *name, const char *file) { const char *base = get_schema_root(); if(file) { return crm_strdup_printf("%s/%s", base, file); } return crm_strdup_printf("%s/%s.rng", base, name); } static int schema_filter(const struct dirent * a) { int rc = 0; float version = 0; if(strstr(a->d_name, "pacemaker-") != a->d_name) { /* crm_trace("%s - wrong prefix", a->d_name); */ } else if(strstr(a->d_name, ".rng") == NULL) { /* crm_trace("%s - wrong suffix", a->d_name); */ } else if(sscanf(a->d_name, "pacemaker-%f.rng", &version) == 0) { /* crm_trace("%s - wrong format", a->d_name); */ } else if(strcmp(a->d_name, "pacemaker-1.1.rng") == 0) { /* crm_trace("%s - hack", a->d_name); */ } else { /* crm_debug("%s - candidate", a->d_name); */ rc = 1; } return rc; } static int schema_sort(const struct dirent ** a, const struct dirent **b) { int rc = 0; float a_version = 0.0; float b_version = 0.0; sscanf(a[0]->d_name, "pacemaker-%f.rng", &a_version); sscanf(b[0]->d_name, "pacemaker-%f.rng", &b_version); if(a_version > b_version) { rc = 1; } else if(a_version < b_version) { rc = -1; } /* crm_trace("%s (%f) vs. %s (%f) : %d", a[0]->d_name, a_version, b[0]->d_name, b_version, rc); */ return rc; } static void __xml_schema_add( int type, float version, const char *name, const char *location, const char *transform, int after_transform) { int last = xml_schema_max; xml_schema_max++; known_schemas = realloc_safe(known_schemas, xml_schema_max*sizeof(struct schema_s)); CRM_ASSERT(known_schemas != NULL); memset(known_schemas+last, 0, sizeof(struct schema_s)); known_schemas[last].type = type; known_schemas[last].after_transform = after_transform; if(version > 0.0) { known_schemas[last].version = version; known_schemas[last].name = crm_strdup_printf("pacemaker-%.1f", version); known_schemas[last].location = crm_strdup_printf("%s.rng", known_schemas[last].name); } else { char dummy[1024]; CRM_ASSERT(name); CRM_ASSERT(location); sscanf(name, "%[^-]-%f", dummy, &version); known_schemas[last].version = version; known_schemas[last].name = strdup(name); known_schemas[last].location = strdup(location); } if(transform) { known_schemas[last].transform = strdup(transform); } if(after_transform == 0) { after_transform = xml_schema_max; } known_schemas[last].after_transform = after_transform; if(known_schemas[last].after_transform < 0) { crm_debug("Added supported schema %d: %s (%s)", last, known_schemas[last].name, known_schemas[last].location); } else if(known_schemas[last].transform) { crm_debug("Added supported schema %d: %s (%s upgrades to %d with %s)", last, known_schemas[last].name, known_schemas[last].location, known_schemas[last].after_transform, known_schemas[last].transform); } else { crm_debug("Added supported schema %d: %s (%s upgrades to %d)", last, known_schemas[last].name, known_schemas[last].location, known_schemas[last].after_transform); } } static int __xml_build_schema_list(void) { int lpc, max; const char *base = get_schema_root(); struct dirent **namelist = NULL; max = scandir(base, &namelist, schema_filter, schema_sort); __xml_schema_add(1, 0.0, "pacemaker-0.6", "crm.dtd", "upgrade06.xsl", 3); __xml_schema_add(1, 0.0, "transitional-0.6", "crm-transitional.dtd", "upgrade06.xsl", 3); __xml_schema_add(2, 0.0, "pacemaker-0.7", "pacemaker-1.0.rng", NULL, 0); if (max < 0) { crm_notice("scandir(%s) failed: %s (%d)", base, strerror(errno), errno); } else { for (lpc = 0; lpc < max; lpc++) { int next = 0; float version = 0.0; char *transform = NULL; sscanf(namelist[lpc]->d_name, "pacemaker-%f.rng", &version); if((lpc + 1) < max) { float next_version = 0.0; sscanf(namelist[lpc+1]->d_name, "pacemaker-%f.rng", &next_version); if(floor(version) < floor(next_version)) { struct stat s; char *xslt = NULL; transform = crm_strdup_printf("upgrade-%.1f.xsl", version); xslt = get_schema_path(NULL, transform); if(stat(xslt, &s) != 0) { crm_err("Transform %s not found", xslt); free(xslt); __xml_schema_add(2, version, NULL, NULL, NULL, -1); break; } else { free(xslt); } } } else { next = -1; } __xml_schema_add(2, version, NULL, NULL, transform, next); free(namelist[lpc]); free(transform); } } /* 1.1 was the old name for -next */ __xml_schema_add(2, 0.0, "pacemaker-1.1", "pacemaker-next.rng", NULL, 0); __xml_schema_add(2, 0.0, "pacemaker-next", "pacemaker-next.rng", NULL, -1); __xml_schema_add(0, 0.0, "none", "N/A", NULL, -1); free(namelist); return TRUE; } static void set_parent_flag(xmlNode *xml, long flag) { for(; xml; xml = xml->parent) { xml_private_t *p = xml->_private; if(p == NULL) { /* During calls to xmlDocCopyNode(), _private will be unset for parent nodes */ } else { p->flags |= flag; /* crm_trace("Setting flag %x due to %s[@id=%s]", flag, xml->name, ID(xml)); */ } } } static void set_doc_flag(xmlNode *xml, long flag) { if(xml && xml->doc && xml->doc->_private){ /* During calls to xmlDocCopyNode(), xml->doc may be unset */ xml_private_t *p = xml->doc->_private; p->flags |= flag; /* crm_trace("Setting flag %x due to %s[@id=%s]", flag, xml->name, ID(xml)); */ } } static void __xml_node_dirty(xmlNode *xml) { set_doc_flag(xml, xpf_dirty); set_parent_flag(xml, xpf_dirty); } static void __xml_node_clean(xmlNode *xml) { xmlNode *cIter = NULL; xml_private_t *p = xml->_private; if(p) { p->flags = 0; } for (cIter = __xml_first_child(xml); cIter != NULL; cIter = __xml_next(cIter)) { __xml_node_clean(cIter); } } static void crm_node_created(xmlNode *xml) { xmlNode *cIter = NULL; xml_private_t *p = xml->_private; if(p && TRACKING_CHANGES(xml)) { if(is_not_set(p->flags, xpf_created)) { p->flags |= xpf_created; __xml_node_dirty(xml); } for (cIter = __xml_first_child(xml); cIter != NULL; cIter = __xml_next(cIter)) { crm_node_created(cIter); } } } static void crm_attr_dirty(xmlAttr *a) { xmlNode *parent = a->parent; xml_private_t *p = NULL; p = a->_private; p->flags |= (xpf_dirty|xpf_modified); p->flags = (p->flags & ~xpf_deleted); /* crm_trace("Setting flag %x due to %s[@id=%s, @%s=%s]", */ /* xpf_dirty, parent?parent->name:NULL, ID(parent), a->name, a->children->content); */ __xml_node_dirty(parent); } int get_tag_name(const char *input, size_t offset, size_t max); int get_attr_name(const char *input, size_t offset, size_t max); int get_attr_value(const char *input, size_t offset, size_t max); gboolean can_prune_leaf(xmlNode * xml_node); void diff_filter_context(int context, int upper_bound, int lower_bound, xmlNode * xml_node, xmlNode * parent); int in_upper_context(int depth, int context, xmlNode * xml_node); int add_xml_object(xmlNode * parent, xmlNode * target, xmlNode * update, gboolean as_diff); static inline const char * crm_attr_value(xmlAttr * attr) { if (attr == NULL || attr->children == NULL) { return NULL; } return (const char *)attr->children->content; } static inline xmlAttr * crm_first_attr(xmlNode * xml) { if (xml == NULL) { return NULL; } return xml->properties; } #define XML_PRIVATE_MAGIC (long) 0x81726354 static void __xml_acl_free(void *data) { if(data) { xml_acl_t *acl = data; free(acl->xpath); free(acl); } } static void __xml_private_clean(xml_private_t *p) { if(p) { CRM_ASSERT(p->check == XML_PRIVATE_MAGIC); free(p->user); p->user = NULL; if(p->acls) { g_list_free_full(p->acls, __xml_acl_free); p->acls = NULL; } if(p->deleted_paths) { g_list_free_full(p->deleted_paths, free); p->deleted_paths = NULL; } } } static void __xml_private_free(xml_private_t *p) { __xml_private_clean(p); free(p); } static void pcmkDeregisterNode(xmlNodePtr node) { __xml_private_free(node->_private); } static void pcmkRegisterNode(xmlNodePtr node) { xml_private_t *p = NULL; switch(node->type) { case XML_ELEMENT_NODE: case XML_DOCUMENT_NODE: case XML_ATTRIBUTE_NODE: case XML_COMMENT_NODE: p = calloc(1, sizeof(xml_private_t)); p->check = XML_PRIVATE_MAGIC; /* Flags will be reset if necessary when tracking is enabled */ p->flags |= (xpf_dirty|xpf_created); node->_private = p; break; case XML_TEXT_NODE: case XML_DTD_NODE: break; default: /* Ignore */ crm_trace("Ignoring %p %d", node, node->type); CRM_LOG_ASSERT(node->type == XML_ELEMENT_NODE); break; } if(p && TRACKING_CHANGES(node)) { /* XML_ELEMENT_NODE doesn't get picked up here, node->doc is * not hooked up at the point we are called */ set_doc_flag(node, xpf_dirty); __xml_node_dirty(node); } } static xml_acl_t * __xml_acl_create(xmlNode * xml, xmlNode *target, enum xml_private_flags mode) { xml_acl_t *acl = NULL; xml_private_t *p = NULL; const char *tag = crm_element_value(xml, XML_ACL_ATTR_TAG); const char *ref = crm_element_value(xml, XML_ACL_ATTR_REF); const char *xpath = crm_element_value(xml, XML_ACL_ATTR_XPATH); if(tag == NULL) { /* Compatability handling for pacemaker < 1.1.12 */ tag = crm_element_value(xml, XML_ACL_ATTR_TAGv1); } if(ref == NULL) { /* Compatability handling for pacemaker < 1.1.12 */ ref = crm_element_value(xml, XML_ACL_ATTR_REFv1); } if(target == NULL || target->doc == NULL || target->doc->_private == NULL){ CRM_ASSERT(target); CRM_ASSERT(target->doc); CRM_ASSERT(target->doc->_private); return NULL; } else if (tag == NULL && ref == NULL && xpath == NULL) { crm_trace("No criteria %p", xml); return NULL; } p = target->doc->_private; acl = calloc(1, sizeof(xml_acl_t)); if (acl) { const char *attr = crm_element_value(xml, XML_ACL_ATTR_ATTRIBUTE); acl->mode = mode; if(xpath) { acl->xpath = strdup(xpath); crm_trace("Using xpath: %s", acl->xpath); } else { int offset = 0; char buffer[XML_BUFFER_SIZE]; if(tag) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "//%s", tag); } else { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "//*"); } if(ref || attr) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "["); } if(ref) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "@id='%s'", ref); } if(ref && attr) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, " and "); } if(attr) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "@%s", attr); } if(ref || attr) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "]"); } CRM_LOG_ASSERT(offset > 0); acl->xpath = strdup(buffer); crm_trace("Built xpath: %s", acl->xpath); } p->acls = g_list_append(p->acls, acl); } return acl; } static gboolean __xml_acl_parse_entry(xmlNode * acl_top, xmlNode * acl_entry, xmlNode *target) { xmlNode *child = NULL; for (child = __xml_first_child(acl_entry); child; child = __xml_next(child)) { const char *tag = crm_element_name(child); const char *kind = crm_element_value(child, XML_ACL_ATTR_KIND); if (strcmp(XML_ACL_TAG_PERMISSION, tag) == 0){ tag = kind; } crm_trace("Processing %s %p", tag, child); if(tag == NULL) { CRM_ASSERT(tag != NULL); } else if (strcmp(XML_ACL_TAG_ROLE_REF, tag) == 0 || strcmp(XML_ACL_TAG_ROLE_REFv1, tag) == 0) { const char *ref_role = crm_element_value(child, XML_ATTR_ID); if (ref_role) { xmlNode *role = NULL; for (role = __xml_first_child(acl_top); role; role = __xml_next(role)) { if (strcmp(XML_ACL_TAG_ROLE, (const char *)role->name) == 0) { const char *role_id = crm_element_value(role, XML_ATTR_ID); if (role_id && strcmp(ref_role, role_id) == 0) { crm_debug("Unpacking referenced role: %s", role_id); __xml_acl_parse_entry(acl_top, role, target); break; } } } } } else if (strcmp(XML_ACL_TAG_READ, tag) == 0) { __xml_acl_create(child, target, xpf_acl_read); } else if (strcmp(XML_ACL_TAG_WRITE, tag) == 0) { __xml_acl_create(child, target, xpf_acl_write); } else if (strcmp(XML_ACL_TAG_DENY, tag) == 0) { __xml_acl_create(child, target, xpf_acl_deny); } else { crm_warn("Unknown ACL entry: %s/%s", tag, kind); } } return TRUE; } /* <acls> <acl_target id="l33t-haxor"><role id="auto-l33t-haxor"/></acl_target> <acl_role id="auto-l33t-haxor"> <acl_permission id="crook-nothing" kind="deny" xpath="/cib"/> </acl_role> <acl_target id="niceguy"> <role id="observer"/> </acl_target> <acl_role id="observer"> <acl_permission id="observer-read-1" kind="read" xpath="/cib"/> <acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name='stonith-enabled']"/> <acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name='target-role']"/> </acl_role> <acl_target id="badidea"><role id="auto-badidea"/></acl_target> <acl_role id="auto-badidea"> <acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/> <acl_permission id="badidea-resources-2" kind="deny" reference="dummy-meta_attributes"/> </acl_role> </acls> */ const char * __xml_acl_to_text(enum xml_private_flags flags) { if(is_set(flags, xpf_acl_deny)) { return "deny"; } if(is_set(flags, xpf_acl_write)) { return "read/write"; } if(is_set(flags, xpf_acl_read)) { return "read"; } return "none"; } static void __xml_acl_apply(xmlNode *xml) { GListPtr aIter = NULL; xml_private_t *p = NULL; xmlXPathObjectPtr xpathObj = NULL; if(xml_acl_enabled(xml) == FALSE) { p = xml->doc->_private; crm_trace("Not applying ACLs for %s", p->user); return; } p = xml->doc->_private; for(aIter = p->acls; aIter != NULL; aIter = aIter->next) { int max = 0, lpc = 0; xml_acl_t *acl = aIter->data; xpathObj = xpath_search(xml, acl->xpath); max = numXpathResults(xpathObj); for(lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); char *path = xml_get_path(match); p = match->_private; crm_trace("Applying %x to %s for %s", acl->mode, path, acl->xpath); #ifdef SUSE_ACL_COMPAT if(is_not_set(p->flags, acl->mode)) { if(is_set(p->flags, xpf_acl_read) || is_set(p->flags, xpf_acl_write) || is_set(p->flags, xpf_acl_deny)) { crm_config_warn("Configuration element %s is matched by multiple ACL rules, only the first applies ('%s' wins over '%s')", path, __xml_acl_to_text(p->flags), __xml_acl_to_text(acl->mode)); free(path); continue; } } #endif p->flags |= acl->mode; free(path); } crm_trace("Now enforcing ACL: %s (%d matches)", acl->xpath, max); freeXpathObject(xpathObj); } p = xml->_private; if(is_not_set(p->flags, xpf_acl_read) && is_not_set(p->flags, xpf_acl_write)) { p->flags |= xpf_acl_deny; p = xml->doc->_private; crm_info("Enforcing default ACL for %s to %s", p->user, crm_element_name(xml)); } } static void __xml_acl_unpack(xmlNode *source, xmlNode *target, const char *user) { #if ENABLE_ACL xml_private_t *p = NULL; if(target == NULL || target->doc == NULL || target->doc->_private == NULL) { return; } p = target->doc->_private; if(pcmk_acl_required(user) == FALSE) { crm_trace("no acls needed for '%s'", user); } else if(p->acls == NULL) { xmlNode *acls = get_xpath_object("//"XML_CIB_TAG_ACLS, source, LOG_TRACE); free(p->user); p->user = strdup(user); if(acls) { xmlNode *child = NULL; for (child = __xml_first_child(acls); child; child = __xml_next(child)) { const char *tag = crm_element_name(child); if (strcmp(tag, XML_ACL_TAG_USER) == 0 || strcmp(tag, XML_ACL_TAG_USERv1) == 0) { const char *id = crm_element_value(child, XML_ATTR_ID); if(id && strcmp(id, user) == 0) { crm_debug("Unpacking ACLs for %s", id); __xml_acl_parse_entry(acls, child, target); } } } } } #endif } static inline bool __xml_acl_mode_test(enum xml_private_flags allowed, enum xml_private_flags requested) { if(is_set(allowed, xpf_acl_deny)) { return FALSE; } else if(is_set(allowed, requested)) { return TRUE; } else if(is_set(requested, xpf_acl_read) && is_set(allowed, xpf_acl_write)) { return TRUE; } else if(is_set(requested, xpf_acl_create) && is_set(allowed, xpf_acl_write)) { return TRUE; } else if(is_set(requested, xpf_acl_create) && is_set(allowed, xpf_created)) { return TRUE; } return FALSE; } /* rc = TRUE if orig_cib has been filtered * That means '*result' rather than 'xml' should be exploited afterwards */ static bool __xml_purge_attributes(xmlNode *xml) { xmlNode *child = NULL; xmlAttr *xIter = NULL; bool readable_children = FALSE; xml_private_t *p = xml->_private; if(__xml_acl_mode_test(p->flags, xpf_acl_read)) { crm_trace("%s is readable", crm_element_name(xml), ID(xml)); return TRUE; } xIter = crm_first_attr(xml); while(xIter != NULL) { xmlAttr *tmp = xIter; const char *prop_name = (const char *)xIter->name; xIter = xIter->next; if (strcmp(prop_name, XML_ATTR_ID) == 0) { continue; } xmlUnsetProp(xml, tmp->name); } child = __xml_first_child(xml); while ( child != NULL ) { xmlNode *tmp = child; child = __xml_next(child); readable_children |= __xml_purge_attributes(tmp); } if(readable_children == FALSE) { free_xml(xml); /* Nothing readable under here, purge completely */ } return readable_children; } bool xml_acl_filtered_copy(const char *user, xmlNode* acl_source, xmlNode *xml, xmlNode ** result) { GListPtr aIter = NULL; xmlNode *target = NULL; xml_private_t *p = NULL; xml_private_t *doc = NULL; *result = NULL; if(xml == NULL || pcmk_acl_required(user) == FALSE) { crm_trace("no acls needed for '%s'", user); return FALSE; } crm_trace("filtering copy of %p for '%s'", xml, user); target = copy_xml(xml); if(target == NULL) { return TRUE; } __xml_acl_unpack(acl_source, target, user); set_doc_flag(target, xpf_acl_enabled); __xml_acl_apply(target); doc = target->doc->_private; for(aIter = doc->acls; aIter != NULL && target; aIter = aIter->next) { int max = 0; xml_acl_t *acl = aIter->data; if(acl->mode != xpf_acl_deny) { /* Nothing to do */ } else if(acl->xpath) { int lpc = 0; xmlXPathObjectPtr xpathObj = xpath_search(target, acl->xpath); max = numXpathResults(xpathObj); for(lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); crm_trace("Purging attributes from %s", acl->xpath); if(__xml_purge_attributes(match) == FALSE && match == target) { crm_trace("No access to the entire document for %s", user); freeXpathObject(xpathObj); return TRUE; } } crm_trace("Enforced ACL %s (%d matches)", acl->xpath, max); freeXpathObject(xpathObj); } } p = target->_private; if(is_set(p->flags, xpf_acl_deny) && __xml_purge_attributes(target) == FALSE) { crm_trace("No access to the entire document for %s", user); return TRUE; } if(doc->acls) { g_list_free_full(doc->acls, __xml_acl_free); doc->acls = NULL; } else { crm_trace("Ordinary user '%s' cannot access the CIB without any defined ACLs", doc->user); free_xml(target); target = NULL; } if(target) { *result = target; } return TRUE; } static void __xml_acl_post_process(xmlNode * xml) { xmlNode *cIter = __xml_first_child(xml); xml_private_t *p = xml->_private; if(is_set(p->flags, xpf_created)) { xmlAttr *xIter = NULL; /* Always allow new scaffolding, ie. node with no attributes or only an 'id' */ for (xIter = crm_first_attr(xml); xIter != NULL; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; if (strcmp(prop_name, XML_ATTR_ID) == 0) { /* Delay the acl check */ continue; } else if(__xml_acl_check(xml, NULL, xpf_acl_write)) { crm_trace("Creation of %s=%s is allowed", crm_element_name(xml), ID(xml)); break; } else { char *path = xml_get_path(xml); crm_trace("Cannot add new node %s at %s", crm_element_name(xml), path); if(xml != xmlDocGetRootElement(xml->doc)) { xmlUnlinkNode(xml); xmlFreeNode(xml); } free(path); return; } } } while (cIter != NULL) { xmlNode *child = cIter; cIter = __xml_next(cIter); /* In case it is free'd */ __xml_acl_post_process(child); } } bool xml_acl_denied(xmlNode *xml) { if(xml && xml->doc && xml->doc->_private){ xml_private_t *p = xml->doc->_private; return is_set(p->flags, xpf_acl_denied); } return FALSE; } void xml_acl_disable(xmlNode *xml) { if(xml_acl_enabled(xml)) { xml_private_t *p = xml->doc->_private; /* Catch anything that was created but shouldn't have been */ __xml_acl_apply(xml); __xml_acl_post_process(xml); clear_bit(p->flags, xpf_acl_enabled); } } bool xml_acl_enabled(xmlNode *xml) { if(xml && xml->doc && xml->doc->_private){ xml_private_t *p = xml->doc->_private; return is_set(p->flags, xpf_acl_enabled); } return FALSE; } void xml_track_changes(xmlNode * xml, const char *user, xmlNode *acl_source, bool enforce_acls) { xml_accept_changes(xml); crm_trace("Tracking changes%s to %p", enforce_acls?" with ACLs":"", xml); set_doc_flag(xml, xpf_tracking); if(enforce_acls) { if(acl_source == NULL) { acl_source = xml; } set_doc_flag(xml, xpf_acl_enabled); __xml_acl_unpack(acl_source, xml, user); __xml_acl_apply(xml); } } bool xml_tracking_changes(xmlNode * xml) { if(xml == NULL) { return FALSE; } else if(is_set(((xml_private_t *)xml->doc->_private)->flags, xpf_tracking)) { return TRUE; } return FALSE; } bool xml_document_dirty(xmlNode *xml) { if(xml != NULL && xml->doc && xml->doc->_private) { xml_private_t *doc = xml->doc->_private; return is_set(doc->flags, xpf_dirty); } return FALSE; } /* <diff format="2.0"> <version> <source admin_epoch="1" epoch="2" num_updates="3"/> <target admin_epoch="1" epoch="3" num_updates="0"/> </version> <change operation="add" xpath="/cib/configuration/nodes"> <node id="node2" uname="node2" description="foo"/> </change> <change operation="add" xpath="/cib/configuration/nodes/node[node2]"> <instance_attributes id="nodes-node"><!-- NOTE: can be a full tree --> <nvpair id="nodes-node2-ram" name="ram" value="1024M"/> </instance_attributes> </change> <change operation="update" xpath="/cib/configuration/nodes[@id='node2']"> <change-list> <change-attr operation="set" name="type" value="member"/> <change-attr operation="unset" name="description"/> </change-list> <change-result> <node id="node2" uname="node2" type="member"/><!-- NOTE: not recursive --> </change-result> </change> <change operation="delete" xpath="/cib/configuration/nodes/node[@id='node3'] /"> <change operation="update" xpath="/cib/configuration/resources/group[@id='g1']"> <change-list> <change-attr operation="set" name="description" value="some grabage here"/> </change-list> <change-result> <group id="g1" description="some grabage here"/><!-- NOTE: not recursive --> </change-result> </change> <change operation="update" xpath="/cib/status/node_state[@id='node2]/lrm[@id='node2']/lrm_resources/lrm_resource[@id='Fence']"> <change-list> <change-attr operation="set" name="oper" value="member"/> <change-attr operation="set" name="operation_key" value="Fence_start_0"/> <change-attr operation="set" name="operation" value="start"/> <change-attr operation="set" name="transition-key" value="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"/> <change-attr operation="set" name="transition-magic" value="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"/> <change-attr operation="set" name="call-id" value="2"/> <change-attr operation="set" name="rc-code" value="0"/> </change-list> <change-result> <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </change-result> </change> </diff> */ static int __xml_offset(xmlNode *xml) { int position = 0; xmlNode *cIter = NULL; for(cIter = xml; cIter->prev; cIter = cIter->prev) { xml_private_t *p = ((xmlNode*)cIter->prev)->_private; if(is_not_set(p->flags, xpf_skip)) { position++; } } return position; } static int __xml_offset_no_deletions(xmlNode *xml) { int position = 0; xmlNode *cIter = NULL; for(cIter = xml; cIter->prev; cIter = cIter->prev) { xml_private_t *p = ((xmlNode*)cIter->prev)->_private; if(is_not_set(p->flags, xpf_deleted)) { position++; } } return position; } static void __xml_build_changes(xmlNode * xml, xmlNode *patchset) { xmlNode *cIter = NULL; xmlAttr *pIter = NULL; xmlNode *change = NULL; xml_private_t *p = xml->_private; if(patchset && is_set(p->flags, xpf_created)) { int offset = 0; char buffer[XML_BUFFER_SIZE]; if(__get_prefix(NULL, xml->parent, buffer, offset) > 0) { int position = __xml_offset_no_deletions(xml); change = create_xml_node(patchset, XML_DIFF_CHANGE); crm_xml_add(change, XML_DIFF_OP, "create"); crm_xml_add(change, XML_DIFF_PATH, buffer); crm_xml_add_int(change, XML_DIFF_POSITION, position); add_node_copy(change, xml); } return; } for (pIter = crm_first_attr(xml); pIter != NULL; pIter = pIter->next) { xmlNode *attr = NULL; p = pIter->_private; if(is_not_set(p->flags, xpf_deleted) && is_not_set(p->flags, xpf_dirty)) { continue; } if(change == NULL) { int offset = 0; char buffer[XML_BUFFER_SIZE]; if(__get_prefix(NULL, xml, buffer, offset) > 0) { change = create_xml_node(patchset, XML_DIFF_CHANGE); crm_xml_add(change, XML_DIFF_OP, "modify"); crm_xml_add(change, XML_DIFF_PATH, buffer); change = create_xml_node(change, XML_DIFF_LIST); } } attr = create_xml_node(change, XML_DIFF_ATTR); crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, (const char *)pIter->name); if(p->flags & xpf_deleted) { crm_xml_add(attr, XML_DIFF_OP, "unset"); } else { const char *value = crm_element_value(xml, (const char *)pIter->name); crm_xml_add(attr, XML_DIFF_OP, "set"); crm_xml_add(attr, XML_NVPAIR_ATTR_VALUE, value); } } if(change) { xmlNode *result = NULL; change = create_xml_node(change->parent, XML_DIFF_RESULT); result = create_xml_node(change, (const char *)xml->name); for (pIter = crm_first_attr(xml); pIter != NULL; pIter = pIter->next) { const char *value = crm_element_value(xml, (const char *)pIter->name); p = pIter->_private; if (is_not_set(p->flags, xpf_deleted)) { crm_xml_add(result, (const char *)pIter->name, value); } } } for (cIter = __xml_first_child(xml); cIter != NULL; cIter = __xml_next(cIter)) { __xml_build_changes(cIter, patchset); } p = xml->_private; if(patchset && is_set(p->flags, xpf_moved)) { int offset = 0; char buffer[XML_BUFFER_SIZE]; crm_trace("%s.%s moved to position %d", xml->name, ID(xml), __xml_offset(xml)); if(__get_prefix(NULL, xml, buffer, offset) > 0) { change = create_xml_node(patchset, XML_DIFF_CHANGE); crm_xml_add(change, XML_DIFF_OP, "move"); crm_xml_add(change, XML_DIFF_PATH, buffer); crm_xml_add_int(change, XML_DIFF_POSITION, __xml_offset_no_deletions(xml)); } } } static void __xml_accept_changes(xmlNode * xml) { xmlNode *cIter = NULL; xmlAttr *pIter = NULL; xml_private_t *p = xml->_private; p->flags = xpf_none; pIter = crm_first_attr(xml); while (pIter != NULL) { const xmlChar *name = pIter->name; p = pIter->_private; pIter = pIter->next; if(p->flags & xpf_deleted) { xml_remove_prop(xml, (const char *)name); } else { p->flags = xpf_none; } } for (cIter = __xml_first_child(xml); cIter != NULL; cIter = __xml_next(cIter)) { __xml_accept_changes(cIter); } } static bool is_config_change(xmlNode *xml) { GListPtr gIter = NULL; xml_private_t *p = NULL; xmlNode *config = first_named_child(xml, XML_CIB_TAG_CONFIGURATION); if(config) { p = config->_private; } if(p && is_set(p->flags, xpf_dirty)) { return TRUE; } if(xml->doc && xml->doc->_private) { p = xml->doc->_private; for(gIter = p->deleted_paths; gIter; gIter = gIter->next) { char *path = gIter->data; if(strstr(path, "/"XML_TAG_CIB"/"XML_CIB_TAG_CONFIGURATION) != NULL) { return TRUE; } } } return FALSE; } static void xml_repair_v1_diff(xmlNode * last, xmlNode * next, xmlNode * local_diff, gboolean changed) { int lpc = 0; xmlNode *cib = NULL; xmlNode *diff_child = NULL; const char *tag = NULL; const char *vfields[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; if (local_diff == NULL) { crm_trace("Nothing to do"); return; } tag = "diff-removed"; diff_child = find_xml_node(local_diff, tag, FALSE); if (diff_child == NULL) { diff_child = create_xml_node(local_diff, tag); } tag = XML_TAG_CIB; cib = find_xml_node(diff_child, tag, FALSE); if (cib == NULL) { cib = create_xml_node(diff_child, tag); } for(lpc = 0; last && lpc < DIMOF(vfields); lpc++){ const char *value = crm_element_value(last, vfields[lpc]); crm_xml_add(diff_child, vfields[lpc], value); if(changed || lpc == 2) { crm_xml_add(cib, vfields[lpc], value); } } tag = "diff-added"; diff_child = find_xml_node(local_diff, tag, FALSE); if (diff_child == NULL) { diff_child = create_xml_node(local_diff, tag); } tag = XML_TAG_CIB; cib = find_xml_node(diff_child, tag, FALSE); if (cib == NULL) { cib = create_xml_node(diff_child, tag); } for(lpc = 0; next && lpc < DIMOF(vfields); lpc++){ const char *value = crm_element_value(next, vfields[lpc]); crm_xml_add(diff_child, vfields[lpc], value); } if (next) { xmlAttrPtr xIter = NULL; for (xIter = next->properties; xIter; xIter = xIter->next) { const char *p_name = (const char *)xIter->name; const char *p_value = crm_element_value(next, p_name); xmlSetProp(cib, (const xmlChar *)p_name, (const xmlChar *)p_value); } } crm_log_xml_explicit(local_diff, "Repaired-diff"); } static xmlNode * xml_create_patchset_v1(xmlNode *source, xmlNode *target, bool config, bool with_digest) { xmlNode *patchset = diff_xml_object(source, target, !with_digest); if(patchset) { CRM_LOG_ASSERT(xml_document_dirty(target)); xml_repair_v1_diff(source, target, patchset, config); crm_xml_add(patchset, "format", "1"); } return patchset; } static xmlNode * xml_create_patchset_v2(xmlNode *source, xmlNode *target) { int lpc = 0; GListPtr gIter = NULL; xml_private_t *doc = NULL; xmlNode *v = NULL; xmlNode *version = NULL; xmlNode *patchset = NULL; const char *vfields[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; CRM_ASSERT(target); if(xml_document_dirty(target) == FALSE) { return NULL; } CRM_ASSERT(target->doc); doc = target->doc->_private; patchset = create_xml_node(NULL, XML_TAG_DIFF); crm_xml_add_int(patchset, "format", 2); version = create_xml_node(patchset, XML_DIFF_VERSION); v = create_xml_node(version, XML_DIFF_VSOURCE); for(lpc = 0; lpc < DIMOF(vfields); lpc++){ const char *value = crm_element_value(source, vfields[lpc]); if(value == NULL) { value = "1"; } crm_xml_add(v, vfields[lpc], value); } v = create_xml_node(version, XML_DIFF_VTARGET); for(lpc = 0; lpc < DIMOF(vfields); lpc++){ const char *value = crm_element_value(target, vfields[lpc]); if(value == NULL) { value = "1"; } crm_xml_add(v, vfields[lpc], value); } for(gIter = doc->deleted_paths; gIter; gIter = gIter->next) { xmlNode *change = create_xml_node(patchset, XML_DIFF_CHANGE); crm_xml_add(change, XML_DIFF_OP, "delete"); crm_xml_add(change, XML_DIFF_PATH, gIter->data); } __xml_build_changes(target, patchset); return patchset; } static gboolean patch_legacy_mode(void) { static gboolean init = TRUE; static gboolean legacy = FALSE; if(init) { init = FALSE; legacy = daemon_option_enabled("cib", "legacy"); if(legacy) { crm_notice("Enabled legacy mode"); } } return legacy; } xmlNode * xml_create_patchset(int format, xmlNode *source, xmlNode *target, bool *config_changed, bool manage_version, bool with_digest) { int counter = 0; bool config = FALSE; xmlNode *patch = NULL; const char *version = crm_element_value(source, XML_ATTR_CRM_VERSION); xml_acl_disable(target); if(xml_document_dirty(target) == FALSE) { crm_trace("No change %d", format); return NULL; /* No change */ } config = is_config_change(target); if(config_changed) { *config_changed = config; } if(manage_version && config) { crm_trace("Config changed %d", format); crm_xml_add(target, XML_ATTR_NUMUPDATES, "0"); crm_element_value_int(target, XML_ATTR_GENERATION, &counter); crm_xml_add_int(target, XML_ATTR_GENERATION, counter+1); } else if(manage_version) { crm_trace("Status changed %d", format); crm_element_value_int(target, XML_ATTR_NUMUPDATES, &counter); crm_xml_add_int(target, XML_ATTR_NUMUPDATES, counter+1); } if(format == 0) { if(patch_legacy_mode()) { format = 1; } else if(compare_version("3.0.8", version) < 0) { format = 2; } else { format = 1; } crm_trace("Using patch format %d for version: %s", format, version); } switch(format) { case 1: with_digest = TRUE; patch = xml_create_patchset_v1(source, target, config, with_digest); break; case 2: patch = xml_create_patchset_v2(source, target); break; default: crm_err("Unknown patch format: %d", format); return NULL; } if(patch && with_digest) { char *digest = calculate_xml_versioned_digest(target, FALSE, TRUE, version); crm_xml_add(patch, XML_ATTR_DIGEST, digest); free(digest); } return patch; } static void __xml_log_element(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode * data, int depth, int options); void xml_log_patchset(uint8_t log_level, const char *function, xmlNode * patchset) { int format = 1; xmlNode *child = NULL; xmlNode *added = NULL; xmlNode *removed = NULL; gboolean is_first = TRUE; int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; const char *fmt = NULL; const char *digest = NULL; int options = xml_log_option_formatted; static struct qb_log_callsite *patchset_cs = NULL; if (patchset_cs == NULL) { patchset_cs = qb_log_callsite_get(function, __FILE__, "xml-patchset", log_level, __LINE__, 0); } if (patchset == NULL) { crm_trace("Empty patch"); return; } else if (log_level == 0) { /* Log to stdout */ } else if (crm_is_callsite_active(patchset_cs, log_level, 0) == FALSE) { return; } xml_patch_versions(patchset, add, del); fmt = crm_element_value(patchset, "format"); digest = crm_element_value(patchset, XML_ATTR_DIGEST); if (add[2] != del[2] || add[1] != del[1] || add[0] != del[0]) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "Diff: --- %d.%d.%d %s", del[0], del[1], del[2], fmt); do_crm_log_alias(log_level, __FILE__, function, __LINE__, "Diff: +++ %d.%d.%d %s", add[0], add[1], add[2], digest); } else if (patchset != NULL && (add[0] || add[1] || add[2])) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "%s: Local-only Change: %d.%d.%d", function ? function : "", add[0], add[1], add[2]); } crm_element_value_int(patchset, "format", &format); if(format == 2) { xmlNode *change = NULL; for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) { const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); if(op == NULL) { } else if(strcmp(op, "create") == 0) { int lpc = 0, max = 0; char *prefix = crm_strdup_printf("++ %s: ", xpath); max = strlen(prefix); __xml_log_element(log_level, __FILE__, function, __LINE__, prefix, change->children, 0, xml_log_option_formatted|xml_log_option_open); for(lpc = 2; lpc < max; lpc++) { prefix[lpc] = ' '; } __xml_log_element(log_level, __FILE__, function, __LINE__, prefix, change->children, 0, xml_log_option_formatted|xml_log_option_close|xml_log_option_children); free(prefix); } else if(strcmp(op, "move") == 0) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "+~ %s moved to offset %s", xpath, crm_element_value(change, XML_DIFF_POSITION)); } else if(strcmp(op, "modify") == 0) { xmlNode *clist = first_named_child(change, XML_DIFF_LIST); char buffer_set[XML_BUFFER_SIZE]; char buffer_unset[XML_BUFFER_SIZE]; int o_set = 0; int o_unset = 0; buffer_set[0] = 0; buffer_unset[0] = 0; for (child = __xml_first_child(clist); child != NULL; child = __xml_next(child)) { const char *name = crm_element_value(child, "name"); op = crm_element_value(child, XML_DIFF_OP); if(op == NULL) { } else if(strcmp(op, "set") == 0) { const char *value = crm_element_value(child, "value"); if(o_set > 0) { o_set += snprintf(buffer_set + o_set, XML_BUFFER_SIZE - o_set, ", "); } o_set += snprintf(buffer_set + o_set, XML_BUFFER_SIZE - o_set, "@%s=%s", name, value); } else if(strcmp(op, "unset") == 0) { if(o_unset > 0) { o_unset += snprintf(buffer_unset + o_unset, XML_BUFFER_SIZE - o_unset, ", "); } o_unset += snprintf(buffer_unset + o_unset, XML_BUFFER_SIZE - o_unset, "@%s", name); } } if(o_set) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "+ %s: %s", xpath, buffer_set); } if(o_unset) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "-- %s: %s", xpath, buffer_unset); } } else if(strcmp(op, "delete") == 0) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "-- %s", xpath); } } return; } if (log_level < LOG_DEBUG || function == NULL) { options |= xml_log_option_diff_short; } removed = find_xml_node(patchset, "diff-removed", FALSE); for (child = __xml_first_child(removed); child != NULL; child = __xml_next(child)) { log_data_element(log_level, __FILE__, function, __LINE__, "- ", child, 0, options | xml_log_option_diff_minus); if (is_first) { is_first = FALSE; } else { do_crm_log_alias(log_level, __FILE__, function, __LINE__, " --- "); } } is_first = TRUE; added = find_xml_node(patchset, "diff-added", FALSE); for (child = __xml_first_child(added); child != NULL; child = __xml_next(child)) { log_data_element(log_level, __FILE__, function, __LINE__, "+ ", child, 0, options | xml_log_option_diff_plus); if (is_first) { is_first = FALSE; } else { do_crm_log_alias(log_level, __FILE__, function, __LINE__, " +++ "); } } } void xml_log_changes(uint8_t log_level, const char *function, xmlNode * xml) { GListPtr gIter = NULL; xml_private_t *doc = NULL; CRM_ASSERT(xml); CRM_ASSERT(xml->doc); doc = xml->doc->_private; if(is_not_set(doc->flags, xpf_dirty)) { return; } for(gIter = doc->deleted_paths; gIter; gIter = gIter->next) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "-- %s", (char*)gIter->data); } log_data_element(log_level, __FILE__, function, __LINE__, "+ ", xml, 0, xml_log_option_formatted|xml_log_option_dirty_add); } void xml_accept_changes(xmlNode * xml) { xmlNode *top = NULL; xml_private_t *doc = NULL; if(xml == NULL) { return; } crm_trace("Accepting changes to %p", xml); doc = xml->doc->_private; top = xmlDocGetRootElement(xml->doc); __xml_private_clean(xml->doc->_private); if(is_not_set(doc->flags, xpf_dirty)) { doc->flags = xpf_none; return; } doc->flags = xpf_none; __xml_accept_changes(top); } /* Simplified version for applying v1-style XML patches */ static void __subtract_xml_object(xmlNode * target, xmlNode * patch) { xmlNode *patch_child = NULL; xmlNode *cIter = NULL; xmlAttrPtr xIter = NULL; char *id = NULL; const char *name = NULL; const char *value = NULL; if (target == NULL || patch == NULL) { return; } if (target->type == XML_COMMENT_NODE) { gboolean dummy; subtract_xml_comment(target->parent, target, patch, &dummy); } name = crm_element_name(target); CRM_CHECK(name != NULL, return); CRM_CHECK(safe_str_eq(crm_element_name(target), crm_element_name(patch)), return); CRM_CHECK(safe_str_eq(ID(target), ID(patch)), return); /* check for XML_DIFF_MARKER in a child */ id = crm_element_value_copy(target, XML_ATTR_ID); value = crm_element_value(patch, XML_DIFF_MARKER); if (value != NULL && strcmp(value, "removed:top") == 0) { crm_trace("We are the root of the deletion: %s.id=%s", name, id); free_xml(target); free(id); return; } for (xIter = crm_first_attr(patch); xIter != NULL; xIter = xIter->next) { const char *p_name = (const char *)xIter->name; /* Removing and then restoring the id field would change the ordering of properties */ if (safe_str_neq(p_name, XML_ATTR_ID)) { xml_remove_prop(target, p_name); } } /* changes to child objects */ cIter = __xml_first_child(target); while (cIter) { xmlNode *target_child = cIter; cIter = __xml_next(cIter); if (target_child->type == XML_COMMENT_NODE) { patch_child = find_xml_comment(patch, target_child); } else { patch_child = find_entity(patch, crm_element_name(target_child), ID(target_child)); } __subtract_xml_object(target_child, patch_child); } free(id); } static void __add_xml_object(xmlNode * parent, xmlNode * target, xmlNode * patch) { xmlNode *patch_child = NULL; xmlNode *target_child = NULL; xmlAttrPtr xIter = NULL; const char *id = NULL; const char *name = NULL; const char *value = NULL; if (patch == NULL) { return; } else if (parent == NULL && target == NULL) { return; } /* check for XML_DIFF_MARKER in a child */ value = crm_element_value(patch, XML_DIFF_MARKER); if (target == NULL && value != NULL && strcmp(value, "added:top") == 0) { id = ID(patch); name = crm_element_name(patch); crm_trace("We are the root of the addition: %s.id=%s", name, id); add_node_copy(parent, patch); return; } else if(target == NULL) { id = ID(patch); name = crm_element_name(patch); crm_err("Could not locate: %s.id=%s", name, id); return; } if (target->type == XML_COMMENT_NODE) { add_xml_comment(parent, target, patch); } name = crm_element_name(target); CRM_CHECK(name != NULL, return); CRM_CHECK(safe_str_eq(crm_element_name(target), crm_element_name(patch)), return); CRM_CHECK(safe_str_eq(ID(target), ID(patch)), return); for (xIter = crm_first_attr(patch); xIter != NULL; xIter = xIter->next) { const char *p_name = (const char *)xIter->name; const char *p_value = crm_element_value(patch, p_name); xml_remove_prop(target, p_name); /* Preserve the patch order */ crm_xml_add(target, p_name, p_value); } /* changes to child objects */ for (patch_child = __xml_first_child(patch); patch_child != NULL; patch_child = __xml_next(patch_child)) { if (patch_child->type == XML_COMMENT_NODE) { target_child = find_xml_comment(target, patch_child); } else { target_child = find_entity(target, crm_element_name(patch_child), ID(patch_child)); } __add_xml_object(target, target_child, patch_child); } } bool xml_patch_versions(xmlNode *patchset, int add[3], int del[3]) { int lpc = 0; int format = 1; xmlNode *tmp = NULL; const char *vfields[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; crm_element_value_int(patchset, "format", &format); switch(format) { case 1: tmp = find_xml_node(patchset, "diff-removed", FALSE); tmp = find_xml_node(tmp, "cib", FALSE); if(tmp == NULL) { /* Revert to the diff-removed line */ tmp = find_xml_node(patchset, "diff-removed", FALSE); } break; case 2: tmp = find_xml_node(patchset, "version", FALSE); tmp = find_xml_node(tmp, "source", FALSE); break; default: crm_warn("Unknown patch format: %d", format); return -EINVAL; } if (tmp) { for(lpc = 0; lpc < DIMOF(vfields); lpc++) { crm_element_value_int(tmp, vfields[lpc], &(del[lpc])); crm_trace("Got %d for del[%s]", del[lpc], vfields[lpc]); } } switch(format) { case 1: tmp = find_xml_node(patchset, "diff-added", FALSE); tmp = find_xml_node(tmp, "cib", FALSE); if(tmp == NULL) { /* Revert to the diff-added line */ tmp = find_xml_node(patchset, "diff-added", FALSE); } break; case 2: tmp = find_xml_node(patchset, "version", FALSE); tmp = find_xml_node(tmp, "target", FALSE); break; default: crm_warn("Unknown patch format: %d", format); return -EINVAL; } if (tmp) { for(lpc = 0; lpc < DIMOF(vfields); lpc++) { crm_element_value_int(tmp, vfields[lpc], &(add[lpc])); crm_trace("Got %d for add[%s]", add[lpc], vfields[lpc]); } } return pcmk_ok; } static int xml_patch_version_check(xmlNode *xml, xmlNode *patchset, int format) { int lpc = 0; bool changed = FALSE; int this[] = { 0, 0, 0 }; int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; const char *vfields[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; for(lpc = 0; lpc < DIMOF(vfields); lpc++) { crm_element_value_int(xml, vfields[lpc], &(this[lpc])); crm_trace("Got %d for this[%s]", this[lpc], vfields[lpc]); if (this[lpc] < 0) { this[lpc] = 0; } } /* Set some defaults in case nothing is present */ add[0] = this[0]; add[1] = this[1]; add[2] = this[2] + 1; for(lpc = 0; lpc < DIMOF(vfields); lpc++) { del[lpc] = this[lpc]; } xml_patch_versions(patchset, add, del); for(lpc = 0; lpc < DIMOF(vfields); lpc++) { if(this[lpc] < del[lpc]) { crm_debug("Current %s is too low (%d < %d)", vfields[lpc], this[lpc], del[lpc]); return -pcmk_err_diff_resync; } else if(this[lpc] > del[lpc]) { crm_info("Current %s is too high (%d > %d)", vfields[lpc], this[lpc], del[lpc]); return -pcmk_err_old_data; } } for(lpc = 0; lpc < DIMOF(vfields); lpc++) { if(add[lpc] > del[lpc]) { changed = TRUE; } } if(changed == FALSE) { crm_notice("Versions did not change in patch %d.%d.%d", add[0], add[1], add[2]); return -pcmk_err_old_data; } crm_debug("Can apply patch %d.%d.%d to %d.%d.%d", add[0], add[1], add[2], this[0], this[1], this[2]); return pcmk_ok; } static int xml_apply_patchset_v1(xmlNode *xml, xmlNode *patchset, bool check_version) { int rc = pcmk_ok; int root_nodes_seen = 0; char *version = crm_element_value_copy(xml, XML_ATTR_CRM_VERSION); xmlNode *child_diff = NULL; xmlNode *added = find_xml_node(patchset, "diff-added", FALSE); xmlNode *removed = find_xml_node(patchset, "diff-removed", FALSE); xmlNode *old = copy_xml(xml); crm_trace("Substraction Phase"); for (child_diff = __xml_first_child(removed); child_diff != NULL; child_diff = __xml_next(child_diff)) { CRM_CHECK(root_nodes_seen == 0, rc = FALSE); if (root_nodes_seen == 0) { __subtract_xml_object(xml, child_diff); } root_nodes_seen++; } if (root_nodes_seen > 1) { crm_err("(-) Diffs cannot contain more than one change set... saw %d", root_nodes_seen); rc = -ENOTUNIQ; } root_nodes_seen = 0; crm_trace("Addition Phase"); if (rc == pcmk_ok) { xmlNode *child_diff = NULL; for (child_diff = __xml_first_child(added); child_diff != NULL; child_diff = __xml_next(child_diff)) { CRM_CHECK(root_nodes_seen == 0, rc = FALSE); if (root_nodes_seen == 0) { __add_xml_object(NULL, xml, child_diff); } root_nodes_seen++; } } if (root_nodes_seen > 1) { crm_err("(+) Diffs cannot contain more than one change set... saw %d", root_nodes_seen); rc = -ENOTUNIQ; } purge_diff_markers(xml); /* Purge prior to checking the digest */ free_xml(old); free(version); return rc; } static xmlNode * __first_xml_child_match(xmlNode *parent, const char *name, const char *id) { xmlNode *cIter = NULL; for (cIter = __xml_first_child(parent); cIter != NULL; cIter = __xml_next(cIter)) { if(strcmp((const char *)cIter->name, name) != 0) { continue; } else if(id) { const char *cid = ID(cIter); if(cid == NULL || strcmp(cid, id) != 0) { continue; } } return cIter; } return NULL; } static xmlNode * __xml_find_path(xmlNode *top, const char *key) { xmlNode *target = (xmlNode*)top->doc; char *id = malloc(XML_BUFFER_SIZE); char *tag = malloc(XML_BUFFER_SIZE); char *section = malloc(XML_BUFFER_SIZE); char *current = strdup(key); char *remainder = malloc(XML_BUFFER_SIZE); int rc = 0; while(current) { rc = sscanf (current, "/%[^/]%s", section, remainder); if(rc <= 0) { crm_trace("Done"); break; } else if(rc > 2) { crm_trace("Aborting on %s", current); target = NULL; break; } else if(tag && section) { int f = sscanf (section, "%[^[][@id='%[^']", tag, id); switch(f) { case 1: target = __first_xml_child_match(target, tag, NULL); break; case 2: target = __first_xml_child_match(target, tag, id); break; default: crm_trace("Aborting on %s", section); target = NULL; break; } if(rc == 1 || target == NULL) { crm_trace("Done"); break; } else { char *tmp = current; current = remainder; remainder = tmp; } } } if(target) { char *path = (char *)xmlGetNodePath(target); crm_trace("Found %s for %s", path, key); free(path); } else { crm_debug("No match for %s", key); } free(remainder); free(current); free(section); free(tag); free(id); return target; } static int xml_apply_patchset_v2(xmlNode *xml, xmlNode *patchset, bool check_version) { int rc = pcmk_ok; xmlNode *change = NULL; for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) { xmlNode *match = NULL; const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); crm_trace("Processing %s %s", change->name, op); if(op == NULL) { continue; } #if 0 match = get_xpath_object(xpath, xml, LOG_TRACE); #else match = __xml_find_path(xml, xpath); #endif crm_trace("Performing %s on %s with %p", op, xpath, match); if(match == NULL && strcmp(op, "delete") == 0) { crm_debug("No %s match for %s in %p", op, xpath, xml->doc); continue; } else if(match == NULL) { crm_err("No %s match for %s in %p", op, xpath, xml->doc); rc = -pcmk_err_diff_failed; continue; } else if(strcmp(op, "create") == 0) { int position = 0; xmlNode *child = NULL; xmlNode *match_child = NULL; match_child = match->children; crm_element_value_int(change, XML_DIFF_POSITION, &position); while(match_child && position != __xml_offset(match_child)) { match_child = match_child->next; } child = xmlDocCopyNode(change->children, match->doc, 1); if(match_child) { crm_trace("Adding %s at position %d", child->name, position); xmlAddPrevSibling(match_child, child); } else if(match->last) { /* Add to the end */ crm_trace("Adding %s at position %d (end)", child->name, position); xmlAddNextSibling(match->last, child); } else { crm_trace("Adding %s at position %d (first)", child->name, position); CRM_LOG_ASSERT(position == 0); xmlAddChild(match, child); } crm_node_created(child); } else if(strcmp(op, "move") == 0) { int position = 0; crm_element_value_int(change, XML_DIFF_POSITION, &position); if(position != __xml_offset(match)) { xmlNode *match_child = NULL; int p = position; if(p > __xml_offset(match)) { p++; /* Skip ourselves */ } CRM_ASSERT(match->parent != NULL); match_child = match->parent->children; while(match_child && p != __xml_offset(match_child)) { match_child = match_child->next; } crm_trace("Moving %s to position %d (was %d, prev %p, %s %p)", match->name, position, __xml_offset(match), match->prev, match_child?"next":"last", match_child?match_child:match->parent->last); if(match_child) { xmlAddPrevSibling(match_child, match); } else { CRM_ASSERT(match->parent->last != NULL); xmlAddNextSibling(match->parent->last, match); } } else { crm_trace("%s is already in position %d", match->name, position); } if(position != __xml_offset(match)) { crm_err("Moved %s.%d to position %d instead of %d (%p)", match->name, ID(match), __xml_offset(match), position, match->prev); rc = -pcmk_err_diff_failed; } } else if(strcmp(op, "delete") == 0) { free_xml(match); } else if(strcmp(op, "modify") == 0) { xmlAttr *pIter = crm_first_attr(match); xmlNode *attrs = __xml_first_child(first_named_child(change, XML_DIFF_RESULT)); if(attrs == NULL) { rc = -ENOMSG; continue; } while(pIter != NULL) { const char *name = (const char *)pIter->name; pIter = pIter->next; xml_remove_prop(match, name); } for (pIter = crm_first_attr(attrs); pIter != NULL; pIter = pIter->next) { const char *name = (const char *)pIter->name; const char *value = crm_element_value(attrs, name); crm_xml_add(match, name, value); } } else { crm_err("Unknown operation: %s", op); } } return rc; } int xml_apply_patchset(xmlNode *xml, xmlNode *patchset, bool check_version) { int format = 1; int rc = pcmk_ok; xmlNode *old = NULL; const char *digest = crm_element_value(patchset, XML_ATTR_DIGEST); if(patchset == NULL) { return rc; } xml_log_patchset(LOG_TRACE, __FUNCTION__, patchset); crm_element_value_int(patchset, "format", &format); if(check_version) { rc = xml_patch_version_check(xml, patchset, format); if(rc != pcmk_ok) { return rc; } } if(digest) { /* Make it available for logging if the result doesn't have the expected digest */ old = copy_xml(xml); } if(rc == pcmk_ok) { switch(format) { case 1: rc = xml_apply_patchset_v1(xml, patchset, check_version); break; case 2: rc = xml_apply_patchset_v2(xml, patchset, check_version); break; default: crm_err("Unknown patch format: %d", format); rc = -EINVAL; } } if(rc == pcmk_ok && digest) { static struct qb_log_callsite *digest_cs = NULL; char *new_digest = NULL; char *version = crm_element_value_copy(xml, XML_ATTR_CRM_VERSION); if (digest_cs == NULL) { digest_cs = qb_log_callsite_get(__func__, __FILE__, "diff-digest", LOG_TRACE, __LINE__, crm_trace_nonlog); } new_digest = calculate_xml_versioned_digest(xml, FALSE, TRUE, version); if (safe_str_neq(new_digest, digest)) { crm_info("v%d digest mis-match: expected %s, calculated %s", format, digest, new_digest); rc = -pcmk_err_diff_failed; if (digest_cs && digest_cs->targets) { save_xml_to_file(old, "PatchDigest:input", NULL); save_xml_to_file(xml, "PatchDigest:result", NULL); save_xml_to_file(patchset,"PatchDigest:diff", NULL); } else { crm_trace("%p %0.6x", digest_cs, digest_cs ? digest_cs->targets : 0); } } else { crm_trace("v%d digest matched: expected %s, calculated %s", format, digest, new_digest); } free(new_digest); free(version); } free_xml(old); return rc; } xmlNode * find_xml_node(xmlNode * root, const char *search_path, gboolean must_find) { xmlNode *a_child = NULL; const char *name = "NULL"; if (root != NULL) { name = crm_element_name(root); } if (search_path == NULL) { crm_warn("Will never find <NULL>"); return NULL; } for (a_child = __xml_first_child(root); a_child != NULL; a_child = __xml_next(a_child)) { if (strcmp((const char *)a_child->name, search_path) == 0) { /* crm_trace("returning node (%s).", crm_element_name(a_child)); */ return a_child; } } if (must_find) { crm_warn("Could not find %s in %s.", search_path, name); } else if (root != NULL) { crm_trace("Could not find %s in %s.", search_path, name); } else { crm_trace("Could not find %s in <NULL>.", search_path); } return NULL; } xmlNode * find_entity(xmlNode * parent, const char *node_name, const char *id) { xmlNode *a_child = NULL; for (a_child = __xml_first_child(parent); a_child != NULL; a_child = __xml_next(a_child)) { /* Uncertain if node_name == NULL check is strictly necessary here */ if (node_name == NULL || strcmp((const char *)a_child->name, node_name) == 0) { const char *cid = ID(a_child); if (id == NULL || (cid != NULL && strcmp(id, cid) == 0)) { return a_child; } } } crm_trace("node <%s id=%s> not found in %s.", node_name, id, crm_element_name(parent)); return NULL; } void copy_in_properties(xmlNode * target, xmlNode * src) { if (src == NULL) { crm_warn("No node to copy properties from"); } else if (target == NULL) { crm_err("No node to copy properties into"); } else { xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(src); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); expand_plus_plus(target, p_name, p_value); } } return; } void fix_plus_plus_recursive(xmlNode * target) { /* TODO: Remove recursion and use xpath searches for value++ */ xmlNode *child = NULL; xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(target); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); expand_plus_plus(target, p_name, p_value); } for (child = __xml_first_child(target); child != NULL; child = __xml_next(child)) { fix_plus_plus_recursive(child); } } void expand_plus_plus(xmlNode * target, const char *name, const char *value) { int offset = 1; int name_len = 0; int int_value = 0; int value_len = 0; const char *old_value = NULL; if (value == NULL || name == NULL) { return; } old_value = crm_element_value(target, name); if (old_value == NULL) { /* if no previous value, set unexpanded */ goto set_unexpanded; } else if (strstr(value, name) != value) { goto set_unexpanded; } name_len = strlen(name); value_len = strlen(value); if (value_len < (name_len + 2) || value[name_len] != '+' || (value[name_len + 1] != '+' && value[name_len + 1] != '=')) { goto set_unexpanded; } /* if we are expanding ourselves, * then no previous value was set and leave int_value as 0 */ if (old_value != value) { int_value = char2score(old_value); } if (value[name_len + 1] != '+') { const char *offset_s = value + (name_len + 2); offset = char2score(offset_s); } int_value += offset; if (int_value > INFINITY) { int_value = (int)INFINITY; } crm_xml_add_int(target, name, int_value); return; set_unexpanded: if (old_value == value) { /* the old value is already set, nothing to do */ return; } crm_xml_add(target, name, value); return; } xmlDoc * getDocPtr(xmlNode * node) { xmlDoc *doc = NULL; CRM_CHECK(node != NULL, return NULL); doc = node->doc; if (doc == NULL) { doc = xmlNewDoc((const xmlChar *)"1.0"); xmlDocSetRootElement(doc, node); xmlSetTreeDoc(node, doc); } return doc; } xmlNode * add_node_copy(xmlNode * parent, xmlNode * src_node) { xmlNode *child = NULL; xmlDoc *doc = getDocPtr(parent); CRM_CHECK(src_node != NULL, return NULL); child = xmlDocCopyNode(src_node, doc, 1); xmlAddChild(parent, child); crm_node_created(child); return child; } int add_node_nocopy(xmlNode * parent, const char *name, xmlNode * child) { add_node_copy(parent, child); free_xml(child); return 1; } static bool __xml_acl_check(xmlNode *xml, const char *name, enum xml_private_flags mode) { CRM_ASSERT(xml); CRM_ASSERT(xml->doc); CRM_ASSERT(xml->doc->_private); #if ENABLE_ACL { if(TRACKING_CHANGES(xml) && xml_acl_enabled(xml)) { int offset = 0; xmlNode *parent = xml; char buffer[XML_BUFFER_SIZE]; xml_private_t *docp = xml->doc->_private; if(docp->acls == NULL) { crm_trace("Ordinary user %s cannot access the CIB without any defined ACLs", docp->user); set_doc_flag(xml, xpf_acl_denied); return FALSE; } offset = __get_prefix(NULL, xml, buffer, offset); if(name) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "[@%s]", name); } CRM_LOG_ASSERT(offset > 0); /* Walk the tree upwards looking for xml_acl_* flags * - Creating an attribute requires write permissions for the node * - Creating a child requires write permissions for the parent */ if(name) { xmlAttr *attr = xmlHasProp(xml, (const xmlChar *)name); if(attr && mode == xpf_acl_create) { mode = xpf_acl_write; } } while(parent && parent->_private) { xml_private_t *p = parent->_private; if(__xml_acl_mode_test(p->flags, mode)) { return TRUE; } else if(is_set(p->flags, xpf_acl_deny)) { crm_trace("%x access denied to %s: parent", mode, buffer); set_doc_flag(xml, xpf_acl_denied); return FALSE; } parent = parent->parent; } crm_trace("%x access denied to %s: default", mode, buffer); set_doc_flag(xml, xpf_acl_denied); return FALSE; } } #endif return TRUE; } const char * crm_xml_add(xmlNode * node, const char *name, const char *value) { bool dirty = FALSE; xmlAttr *attr = NULL; CRM_CHECK(node != NULL, return NULL); CRM_CHECK(name != NULL, return NULL); if (value == NULL) { return NULL; } #if XML_PARANOIA_CHECKS { const char *old_value = NULL; old_value = crm_element_value(node, name); /* Could be re-setting the same value */ CRM_CHECK(old_value != value, crm_err("Cannot reset %s with crm_xml_add(%s)", name, value); return value); } #endif if(TRACKING_CHANGES(node)) { const char *old = crm_element_value(node, name); if(old == NULL || value == NULL || strcmp(old, value) != 0) { dirty = TRUE; } } if(dirty && __xml_acl_check(node, name, xpf_acl_create) == FALSE) { crm_trace("Cannot add %s=%s to %s", name, value, node->name); return NULL; } attr = xmlSetProp(node, (const xmlChar *)name, (const xmlChar *)value); if(dirty) { crm_attr_dirty(attr); } CRM_CHECK(attr && attr->children && attr->children->content, return NULL); return (char *)attr->children->content; } const char * crm_xml_replace(xmlNode * node, const char *name, const char *value) { bool dirty = FALSE; xmlAttr *attr = NULL; const char *old_value = NULL; CRM_CHECK(node != NULL, return NULL); CRM_CHECK(name != NULL && name[0] != 0, return NULL); old_value = crm_element_value(node, name); /* Could be re-setting the same value */ CRM_CHECK(old_value != value, return value); if(__xml_acl_check(node, name, xpf_acl_write) == FALSE) { /* Create a fake object linked to doc->_private instead? */ crm_trace("Cannot replace %s=%s to %s", name, value, node->name); return NULL; } else if (old_value != NULL && value == NULL) { xml_remove_prop(node, name); return NULL; } else if (value == NULL) { return NULL; } if(TRACKING_CHANGES(node)) { if(old_value == NULL || value == NULL || strcmp(old_value, value) != 0) { dirty = TRUE; } } attr = xmlSetProp(node, (const xmlChar *)name, (const xmlChar *)value); if(dirty) { crm_attr_dirty(attr); } CRM_CHECK(attr && attr->children && attr->children->content, return NULL); return (char *)attr->children->content; } const char * crm_xml_add_int(xmlNode * node, const char *name, int value) { char *number = crm_itoa(value); const char *added = crm_xml_add(node, name, number); free(number); return added; } xmlNode * create_xml_node(xmlNode * parent, const char *name) { xmlDoc *doc = NULL; xmlNode *node = NULL; if (name == NULL || name[0] == 0) { CRM_CHECK(name != NULL && name[0] == 0, return NULL); return NULL; } if (parent == NULL) { doc = xmlNewDoc((const xmlChar *)"1.0"); node = xmlNewDocRawNode(doc, NULL, (const xmlChar *)name, NULL); xmlDocSetRootElement(doc, node); } else { doc = getDocPtr(parent); node = xmlNewDocRawNode(doc, NULL, (const xmlChar *)name, NULL); xmlAddChild(parent, node); } crm_node_created(node); return node; } static inline int __get_prefix(const char *prefix, xmlNode *xml, char *buffer, int offset) { const char *id = ID(xml); if(offset == 0 && prefix == NULL && xml->parent) { offset = __get_prefix(NULL, xml->parent, buffer, offset); } if(id) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "/%s[@id='%s']", (const char *)xml->name, id); } else if(xml->name) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "/%s", (const char *)xml->name); } return offset; } char * xml_get_path(xmlNode *xml) { int offset = 0; char buffer[XML_BUFFER_SIZE]; if(__get_prefix(NULL, xml, buffer, offset) > 0) { return strdup(buffer); } return NULL; } void free_xml(xmlNode * child) { if (child != NULL) { xmlNode *top = NULL; xmlDoc *doc = child->doc; xml_private_t *p = child->_private; if (doc != NULL) { top = xmlDocGetRootElement(doc); } if (doc != NULL && top == child) { /* Free everything */ xmlFreeDoc(doc); } else if(__xml_acl_check(child, NULL, xpf_acl_write) == FALSE) { int offset = 0; char buffer[XML_BUFFER_SIZE]; __get_prefix(NULL, child, buffer, offset); crm_trace("Cannot remove %s %x", buffer, p->flags); return; } else { if(doc && TRACKING_CHANGES(child) && is_not_set(p->flags, xpf_created)) { int offset = 0; char buffer[XML_BUFFER_SIZE]; if(__get_prefix(NULL, child, buffer, offset) > 0) { crm_trace("Deleting %s %p from %p", buffer, child, doc); p = doc->_private; p->deleted_paths = g_list_append(p->deleted_paths, strdup(buffer)); set_doc_flag(child, xpf_dirty); } } /* Free this particular subtree * Make sure to unlink it from the parent first */ xmlUnlinkNode(child); xmlFreeNode(child); } } } xmlNode * copy_xml(xmlNode * src) { xmlDoc *doc = xmlNewDoc((const xmlChar *)"1.0"); xmlNode *copy = xmlDocCopyNode(src, doc, 1); xmlDocSetRootElement(doc, copy); xmlSetTreeDoc(copy, doc); return copy; } static void crm_xml_err(void *ctx, const char *msg, ...) G_GNUC_PRINTF(2, 3); static void crm_xml_err(void *ctx, const char *msg, ...) { int len = 0; va_list args; char *buf = NULL; static int buffer_len = 0; static char *buffer = NULL; static struct qb_log_callsite *xml_error_cs = NULL; va_start(args, msg); len = vasprintf(&buf, msg, args); if(xml_error_cs == NULL) { xml_error_cs = qb_log_callsite_get( __func__, __FILE__, "xml library error", LOG_TRACE, __LINE__, crm_trace_nonlog); } if (strchr(buf, '\n')) { buf[len - 1] = 0; if (buffer) { crm_err("XML Error: %s%s", buffer, buf); free(buffer); } else { crm_err("XML Error: %s", buf); } if (xml_error_cs && xml_error_cs->targets) { crm_abort(__FILE__, __PRETTY_FUNCTION__, __LINE__, "xml library error", TRUE, TRUE); } buffer = NULL; buffer_len = 0; } else if (buffer == NULL) { buffer_len = len; buffer = buf; buf = NULL; } else { buffer = realloc_safe(buffer, 1 + buffer_len + len); memcpy(buffer + buffer_len, buf, len); buffer_len += len; buffer[buffer_len] = 0; } va_end(args); free(buf); } xmlNode * string2xml(const char *input) { xmlNode *xml = NULL; xmlDocPtr output = NULL; xmlParserCtxtPtr ctxt = NULL; xmlErrorPtr last_error = NULL; if (input == NULL) { crm_err("Can't parse NULL input"); return NULL; } /* create a parser context */ ctxt = xmlNewParserCtxt(); CRM_CHECK(ctxt != NULL, return NULL); /* xmlCtxtUseOptions(ctxt, XML_PARSE_NOBLANKS|XML_PARSE_RECOVER); */ xmlCtxtResetLastError(ctxt); xmlSetGenericErrorFunc(ctxt, crm_xml_err); /* initGenericErrorDefaultFunc(crm_xml_err); */ output = xmlCtxtReadDoc(ctxt, (const xmlChar *)input, NULL, NULL, XML_PARSE_NOBLANKS | XML_PARSE_RECOVER); if (output) { xml = xmlDocGetRootElement(output); } last_error = xmlCtxtGetLastError(ctxt); if (last_error && last_error->code != XML_ERR_OK) { /* crm_abort(__FILE__,__FUNCTION__,__LINE__, "last_error->code != XML_ERR_OK", TRUE, TRUE); */ /* * http://xmlsoft.org/html/libxml-xmlerror.html#xmlErrorLevel * http://xmlsoft.org/html/libxml-xmlerror.html#xmlParserErrors */ crm_warn("Parsing failed (domain=%d, level=%d, code=%d): %s", last_error->domain, last_error->level, last_error->code, last_error->message); if (last_error->code == XML_ERR_DOCUMENT_EMPTY) { CRM_LOG_ASSERT("Cannot parse an empty string"); } else if (last_error->code != XML_ERR_DOCUMENT_END) { crm_err("Couldn't%s parse %d chars: %s", xml ? " fully" : "", (int)strlen(input), input); if (xml != NULL) { crm_log_xml_err(xml, "Partial"); } } else { int len = strlen(input); int lpc = 0; while(lpc < len) { crm_warn("Parse error[+%.3d]: %.80s", lpc, input+lpc); lpc += 80; } CRM_LOG_ASSERT("String parsing error"); } } xmlFreeParserCtxt(ctxt); return xml; } xmlNode * stdin2xml(void) { size_t data_length = 0; size_t read_chars = 0; char *xml_buffer = NULL; xmlNode *xml_obj = NULL; do { size_t next = XML_BUFFER_SIZE + data_length + 1; if(next <= 0) { crm_err("Buffer size exceeded at: %l + %d", data_length, XML_BUFFER_SIZE); break; } xml_buffer = realloc_safe(xml_buffer, next); read_chars = fread(xml_buffer + data_length, 1, XML_BUFFER_SIZE, stdin); data_length += read_chars; } while (read_chars > 0); if (data_length == 0) { crm_warn("No XML supplied on stdin"); free(xml_buffer); return NULL; } xml_buffer[data_length] = '\0'; xml_obj = string2xml(xml_buffer); free(xml_buffer); crm_log_xml_trace(xml_obj, "Created fragment"); return xml_obj; } static char * decompress_file(const char *filename) { char *buffer = NULL; #if HAVE_BZLIB_H int rc = 0; size_t length = 0, read_len = 0; BZFILE *bz_file = NULL; FILE *input = fopen(filename, "r"); if (input == NULL) { crm_perror(LOG_ERR, "Could not open %s for reading", filename); return NULL; } bz_file = BZ2_bzReadOpen(&rc, input, 0, 0, NULL, 0); if (rc != BZ_OK) { BZ2_bzReadClose(&rc, bz_file); return NULL; } rc = BZ_OK; while (rc == BZ_OK) { buffer = realloc_safe(buffer, XML_BUFFER_SIZE + length + 1); read_len = BZ2_bzRead(&rc, bz_file, buffer + length, XML_BUFFER_SIZE); crm_trace("Read %ld bytes from file: %d", (long)read_len, rc); if (rc == BZ_OK || rc == BZ_STREAM_END) { length += read_len; } } buffer[length] = '\0'; if (rc != BZ_STREAM_END) { crm_err("Couldnt read compressed xml from file"); free(buffer); buffer = NULL; } BZ2_bzReadClose(&rc, bz_file); fclose(input); #else crm_err("Cannot read compressed files:" " bzlib was not available at compile time"); #endif return buffer; } void strip_text_nodes(xmlNode * xml) { xmlNode *iter = xml->children; while (iter) { xmlNode *next = iter->next; switch (iter->type) { case XML_TEXT_NODE: /* Remove it */ xmlUnlinkNode(iter); xmlFreeNode(iter); break; case XML_ELEMENT_NODE: /* Search it */ strip_text_nodes(iter); break; default: /* Leave it */ break; } iter = next; } } xmlNode * filename2xml(const char *filename) { xmlNode *xml = NULL; xmlDocPtr output = NULL; const char *match = NULL; xmlParserCtxtPtr ctxt = NULL; xmlErrorPtr last_error = NULL; static int xml_options = XML_PARSE_NOBLANKS | XML_PARSE_RECOVER; /* create a parser context */ ctxt = xmlNewParserCtxt(); CRM_CHECK(ctxt != NULL, return NULL); /* xmlCtxtUseOptions(ctxt, XML_PARSE_NOBLANKS|XML_PARSE_RECOVER); */ xmlCtxtResetLastError(ctxt); xmlSetGenericErrorFunc(ctxt, crm_xml_err); /* initGenericErrorDefaultFunc(crm_xml_err); */ if (filename) { match = strstr(filename, ".bz2"); } if (filename == NULL) { /* STDIN_FILENO == fileno(stdin) */ output = xmlCtxtReadFd(ctxt, STDIN_FILENO, "unknown.xml", NULL, xml_options); } else if (match == NULL || match[4] != 0) { output = xmlCtxtReadFile(ctxt, filename, NULL, xml_options); } else { char *input = decompress_file(filename); output = xmlCtxtReadDoc(ctxt, (const xmlChar *)input, NULL, NULL, xml_options); free(input); } if (output && (xml = xmlDocGetRootElement(output))) { strip_text_nodes(xml); } last_error = xmlCtxtGetLastError(ctxt); if (last_error && last_error->code != XML_ERR_OK) { /* crm_abort(__FILE__,__FUNCTION__,__LINE__, "last_error->code != XML_ERR_OK", TRUE, TRUE); */ /* * http://xmlsoft.org/html/libxml-xmlerror.html#xmlErrorLevel * http://xmlsoft.org/html/libxml-xmlerror.html#xmlParserErrors */ crm_err("Parsing failed (domain=%d, level=%d, code=%d): %s", last_error->domain, last_error->level, last_error->code, last_error->message); if (last_error && last_error->code != XML_ERR_OK) { crm_err("Couldn't%s parse %s", xml ? " fully" : "", filename); if (xml != NULL) { crm_log_xml_err(xml, "Partial"); } } } xmlFreeParserCtxt(ctxt); return xml; } /*! * \internal * \brief Add a "last written" attribute to an XML node, set to current time * * \param[in] xml_node XML node to get attribute * * \return Value that was set, or NULL on error */ const char * crm_xml_add_last_written(xmlNode *xml_node) { time_t now = time(NULL); char *now_str = ctime(&now); now_str[24] = EOS; /* replace the newline */ return crm_xml_add(xml_node, XML_CIB_ATTR_WRITTEN, now_str); } static int write_xml_stream(xmlNode * xml_node, const char *filename, FILE * stream, gboolean compress) { int res = 0; char *buffer = NULL; unsigned int out = 0; CRM_CHECK(stream != NULL, return -1); crm_trace("Writing XML out to %s", filename); if (xml_node == NULL) { crm_err("Cannot write NULL to %s", filename); fclose(stream); return -1; } crm_log_xml_trace(xml_node, "Writing out"); buffer = dump_xml_formatted(xml_node); CRM_CHECK(buffer != NULL && strlen(buffer) > 0, crm_log_xml_warn(xml_node, "dump:failed"); goto bail); if (compress) { #if HAVE_BZLIB_H int rc = BZ_OK; unsigned int in = 0; BZFILE *bz_file = NULL; bz_file = BZ2_bzWriteOpen(&rc, stream, 5, 0, 30); if (rc != BZ_OK) { crm_err("bzWriteOpen failed: %d", rc); } else { BZ2_bzWrite(&rc, bz_file, buffer, strlen(buffer)); if (rc != BZ_OK) { crm_err("bzWrite() failed: %d", rc); } } if (rc == BZ_OK) { BZ2_bzWriteClose(&rc, bz_file, 0, &in, &out); if (rc != BZ_OK) { crm_err("bzWriteClose() failed: %d", rc); out = -1; } else { crm_trace("%s: In: %d, out: %d", filename, in, out); } } #else crm_err("Cannot write compressed files:" " bzlib was not available at compile time"); #endif } if (out <= 0) { res = fprintf(stream, "%s", buffer); if (res < 0) { crm_perror(LOG_ERR, "Cannot write output to %s", filename); goto bail; } } bail: if (fflush(stream) != 0) { crm_perror(LOG_ERR, "fflush for %s failed:", filename); res = -1; } if (fsync(fileno(stream)) < 0) { crm_perror(LOG_ERR, "fsync for %s failed:", filename); res = -1; } fclose(stream); crm_trace("Saved %d bytes to the Cib as XML", res); free(buffer); return res; } int write_xml_fd(xmlNode * xml_node, const char *filename, int fd, gboolean compress) { FILE *stream = NULL; CRM_CHECK(fd > 0, return -1); stream = fdopen(fd, "w"); return write_xml_stream(xml_node, filename, stream, compress); } int write_xml_file(xmlNode * xml_node, const char *filename, gboolean compress) { FILE *stream = NULL; stream = fopen(filename, "w"); return write_xml_stream(xml_node, filename, stream, compress); } xmlNode * get_message_xml(xmlNode * msg, const char *field) { xmlNode *tmp = first_named_child(msg, field); return __xml_first_child(tmp); } gboolean add_message_xml(xmlNode * msg, const char *field, xmlNode * xml) { xmlNode *holder = create_xml_node(msg, field); add_node_copy(holder, xml); return TRUE; } static char * crm_xml_escape_shuffle(char *text, int start, int *length, const char *replace) { int lpc; int offset = strlen(replace) - 1; /* We have space for 1 char already */ *length += offset; text = realloc_safe(text, *length); for (lpc = (*length) - 1; lpc > (start + offset); lpc--) { text[lpc] = text[lpc - offset]; } memcpy(text + start, replace, offset + 1); return text; } char * crm_xml_escape(const char *text) { int index; int changes = 0; int length = 1 + strlen(text); char *copy = strdup(text); /* * When xmlCtxtReadDoc() parses &lt; and friends in a * value, it converts them to their human readable * form. * * If one uses xmlNodeDump() to convert it back to a * string, all is well, because special characters are * converted back to their escape sequences. * * However xmlNodeDump() is randomly dog slow, even with the same * input. So we need to replicate the escapeing in our custom * version so that the result can be re-parsed by xmlCtxtReadDoc() * when necessary. */ for (index = 0; index < length; index++) { switch (copy[index]) { case 0: break; case '<': copy = crm_xml_escape_shuffle(copy, index, &length, "&lt;"); changes++; break; case '>': copy = crm_xml_escape_shuffle(copy, index, &length, "&gt;"); changes++; break; case '"': copy = crm_xml_escape_shuffle(copy, index, &length, "&quot;"); changes++; break; case '\'': copy = crm_xml_escape_shuffle(copy, index, &length, "&apos;"); changes++; break; case '&': copy = crm_xml_escape_shuffle(copy, index, &length, "&amp;"); changes++; break; case '\t': /* Might as well just expand to a few spaces... */ copy = crm_xml_escape_shuffle(copy, index, &length, " "); changes++; break; case '\n': /* crm_trace("Convert: \\%.3o", copy[index]); */ copy = crm_xml_escape_shuffle(copy, index, &length, "\\n"); changes++; break; case '\r': copy = crm_xml_escape_shuffle(copy, index, &length, "\\r"); changes++; break; /* For debugging... case '\\': crm_trace("Passthrough: \\%c", copy[index+1]); break; */ default: /* Check for and replace non-printing characters with their octal equivalent */ if(copy[index] < ' ' || copy[index] > '~') { char *replace = crm_strdup_printf("\\%.3o", copy[index]); /* crm_trace("Convert to octal: \\%.3o", copy[index]); */ copy = crm_xml_escape_shuffle(copy, index, &length, replace); free(replace); changes++; } } } if (changes) { crm_trace("Dumped '%s'", copy); } return copy; } static inline void dump_xml_attr(xmlAttrPtr attr, int options, char **buffer, int *offset, int *max) { char *p_value = NULL; const char *p_name = NULL; CRM_ASSERT(buffer != NULL); if (attr == NULL || attr->children == NULL) { return; } p_name = (const char *)attr->name; p_value = crm_xml_escape((const char *)attr->children->content); buffer_print(*buffer, *max, *offset, " %s=\"%s\"", p_name, p_value); free(p_value); } static void __xml_log_element(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode * data, int depth, int options) { int max = 0; int offset = 0; const char *name = NULL; const char *hidden = NULL; xmlNode *child = NULL; xmlAttrPtr pIter = NULL; if(data == NULL) { return; } name = crm_element_name(data); if(is_set(options, xml_log_option_open)) { char *buffer = NULL; insert_prefix(options, &buffer, &offset, &max, depth); if(data->type == XML_COMMENT_NODE) { buffer_print(buffer, max, offset, "<!--"); buffer_print(buffer, max, offset, "%s", data->content); buffer_print(buffer, max, offset, "-->"); } else { buffer_print(buffer, max, offset, "<%s", name); } hidden = crm_element_value(data, "hidden"); for (pIter = crm_first_attr(data); pIter != NULL; pIter = pIter->next) { xml_private_t *p = pIter->_private; const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); char *p_copy = NULL; if(is_set(p->flags, xpf_deleted)) { continue; } else if ((is_set(options, xml_log_option_diff_plus) || is_set(options, xml_log_option_diff_minus)) && strcmp(XML_DIFF_MARKER, p_name) == 0) { continue; } else if (hidden != NULL && p_name[0] != 0 && strstr(hidden, p_name) != NULL) { p_copy = strdup("*****"); } else { p_copy = crm_xml_escape(p_value); } buffer_print(buffer, max, offset, " %s=\"%s\"", p_name, p_copy); free(p_copy); } if(xml_has_children(data) == FALSE) { buffer_print(buffer, max, offset, "/>"); } else if(is_set(options, xml_log_option_children)) { buffer_print(buffer, max, offset, ">"); } else { buffer_print(buffer, max, offset, "/>"); } do_crm_log_alias(log_level, file, function, line, "%s %s", prefix, buffer); free(buffer); } if(data->type == XML_COMMENT_NODE) { return; } else if(xml_has_children(data) == FALSE) { return; } else if(is_set(options, xml_log_option_children)) { offset = 0; max = 0; for (child = __xml_first_child(data); child != NULL; child = __xml_next(child)) { __xml_log_element(log_level, file, function, line, prefix, child, depth + 1, options|xml_log_option_open|xml_log_option_close); } } if(is_set(options, xml_log_option_close)) { char *buffer = NULL; insert_prefix(options, &buffer, &offset, &max, depth); buffer_print(buffer, max, offset, "</%s>", name); do_crm_log_alias(log_level, file, function, line, "%s %s", prefix, buffer); free(buffer); } } static void __xml_log_change_element(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode * data, int depth, int options) { xml_private_t *p; char *prefix_m = NULL; xmlNode *child = NULL; xmlAttrPtr pIter = NULL; if(data == NULL) { return; } p = data->_private; prefix_m = strdup(prefix); prefix_m[1] = '+'; if(is_set(p->flags, xpf_dirty) && is_set(p->flags, xpf_created)) { /* Continue and log full subtree */ __xml_log_element(log_level, file, function, line, prefix_m, data, depth, options|xml_log_option_open|xml_log_option_close|xml_log_option_children); } else if(is_set(p->flags, xpf_dirty)) { char *spaces = calloc(80, 1); int s_count = 0, s_max = 80; char *prefix_del = NULL; char *prefix_moved = NULL; const char *flags = prefix; insert_prefix(options, &spaces, &s_count, &s_max, depth); prefix_del = strdup(prefix); prefix_del[0] = '-'; prefix_del[1] = '-'; prefix_moved = strdup(prefix); prefix_moved[1] = '~'; if(is_set(p->flags, xpf_moved)) { flags = prefix_moved; } else { flags = prefix; } __xml_log_element(log_level, file, function, line, flags, data, depth, options|xml_log_option_open); for (pIter = crm_first_attr(data); pIter != NULL; pIter = pIter->next) { const char *aname = (const char*)pIter->name; p = pIter->_private; if(is_set(p->flags, xpf_deleted)) { const char *value = crm_element_value(data, aname); flags = prefix_del; do_crm_log_alias(log_level, file, function, line, "%s %s @%s=%s", flags, spaces, aname, value); } else if(is_set(p->flags, xpf_dirty)) { const char *value = crm_element_value(data, aname); if(is_set(p->flags, xpf_created)) { flags = prefix_m; } else if(is_set(p->flags, xpf_modified)) { flags = prefix; } else if(is_set(p->flags, xpf_moved)) { flags = prefix_moved; } else { flags = prefix; } do_crm_log_alias(log_level, file, function, line, "%s %s @%s=%s", flags, spaces, aname, value); } } free(prefix_moved); free(prefix_del); free(spaces); for (child = __xml_first_child(data); child != NULL; child = __xml_next(child)) { __xml_log_change_element(log_level, file, function, line, prefix, child, depth + 1, options); } __xml_log_element(log_level, file, function, line, prefix, data, depth, options|xml_log_option_close); } else { for (child = __xml_first_child(data); child != NULL; child = __xml_next(child)) { __xml_log_change_element(log_level, file, function, line, prefix, child, depth + 1, options); } } free(prefix_m); } void log_data_element(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode * data, int depth, int options) { xmlNode *a_child = NULL; char *prefix_m = NULL; if (prefix == NULL) { prefix = ""; } /* Since we use the same file and line, to avoid confusing libqb, we need to use the same format strings */ if (data == NULL) { do_crm_log_alias(log_level, file, function, line, "%s: %s", prefix, "No data to dump as XML"); return; } if(is_set(options, xml_log_option_dirty_add) || is_set(options, xml_log_option_dirty_add)) { __xml_log_change_element(log_level, file, function, line, prefix, data, depth, options); return; } if (is_set(options, xml_log_option_formatted)) { if (is_set(options, xml_log_option_diff_plus) && (data->children == NULL || crm_element_value(data, XML_DIFF_MARKER))) { options |= xml_log_option_diff_all; prefix_m = strdup(prefix); prefix_m[1] = '+'; prefix = prefix_m; } else if (is_set(options, xml_log_option_diff_minus) && (data->children == NULL || crm_element_value(data, XML_DIFF_MARKER))) { options |= xml_log_option_diff_all; prefix_m = strdup(prefix); prefix_m[1] = '-'; prefix = prefix_m; } } if (is_set(options, xml_log_option_diff_short) && is_not_set(options, xml_log_option_diff_all)) { /* Still searching for the actual change */ for (a_child = __xml_first_child(data); a_child != NULL; a_child = __xml_next(a_child)) { log_data_element(log_level, file, function, line, prefix, a_child, depth + 1, options); } return; } __xml_log_element(log_level, file, function, line, prefix, data, depth, options|xml_log_option_open|xml_log_option_close|xml_log_option_children); free(prefix_m); } static void dump_filtered_xml(xmlNode * data, int options, char **buffer, int *offset, int *max) { int lpc; xmlAttrPtr xIter = NULL; static int filter_len = DIMOF(filter); for (lpc = 0; options && lpc < filter_len; lpc++) { filter[lpc].found = FALSE; } for (xIter = crm_first_attr(data); xIter != NULL; xIter = xIter->next) { bool skip = FALSE; const char *p_name = (const char *)xIter->name; for (lpc = 0; skip == FALSE && lpc < filter_len; lpc++) { if (filter[lpc].found == FALSE && strcmp(p_name, filter[lpc].string) == 0) { filter[lpc].found = TRUE; skip = TRUE; break; } } if (skip == FALSE) { dump_xml_attr(xIter, options, buffer, offset, max); } } } static void dump_xml_element(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { const char *name = NULL; CRM_ASSERT(max != NULL); CRM_ASSERT(offset != NULL); CRM_ASSERT(buffer != NULL); if (data == NULL) { crm_trace("Nothing to dump"); return; } if (*buffer == NULL) { *offset = 0; *max = 0; } name = crm_element_name(data); CRM_ASSERT(name != NULL); insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, "<%s", name); if (options & xml_log_option_filtered) { dump_filtered_xml(data, options, buffer, offset, max); } else { xmlAttrPtr xIter = NULL; for (xIter = crm_first_attr(data); xIter != NULL; xIter = xIter->next) { dump_xml_attr(xIter, options, buffer, offset, max); } } if (data->children == NULL) { buffer_print(*buffer, *max, *offset, "/>"); } else { buffer_print(*buffer, *max, *offset, ">"); } if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } if (data->children) { xmlNode *xChild = NULL; for (xChild = __xml_first_child(data); xChild != NULL; xChild = __xml_next(xChild)) { crm_xml_dump(xChild, options, buffer, offset, max, depth + 1); } insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, "</%s>", name); if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } } } static void dump_xml_comment(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { CRM_ASSERT(max != NULL); CRM_ASSERT(offset != NULL); CRM_ASSERT(buffer != NULL); if (data == NULL) { crm_trace("Nothing to dump"); return; } if (*buffer == NULL) { *offset = 0; *max = 0; } insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, "<!--"); buffer_print(*buffer, *max, *offset, "%s", data->content); buffer_print(*buffer, *max, *offset, "-->"); if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } } void crm_xml_dump(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { #if 0 if (is_not_set(options, xml_log_option_filtered)) { /* Turning this code on also changes the PE tests for some reason * (not just newlines). Figure out why before considering to * enable this permanently. * * It exists to help debug slowness in xmlNodeDump() and * potentially if we ever want to go back to it. * * In theory its a good idea (reuse) but our custom version does * better for the filtered case and avoids the final strdup() for * everything */ time_t now, next; xmlDoc *doc = NULL; xmlBuffer *xml_buffer = NULL; *buffer = NULL; doc = getDocPtr(data); /* doc will only be NULL if data is */ CRM_CHECK(doc != NULL, return); now = time(NULL); xml_buffer = xmlBufferCreate(); CRM_ASSERT(xml_buffer != NULL); /* The default allocator XML_BUFFER_ALLOC_EXACT does far too many * realloc()s and it can take upwards of 18 seconds (yes, seconds) * to dump a 28kb tree which XML_BUFFER_ALLOC_DOUBLEIT can do in * less than 1 second. * * We could also use xmlBufferCreateSize() to start with a * sane-ish initial size and avoid the first few doubles. */ xmlBufferSetAllocationScheme(xml_buffer, XML_BUFFER_ALLOC_DOUBLEIT); *max = xmlNodeDump(xml_buffer, doc, data, 0, (options & xml_log_option_formatted)); if (*max > 0) { *buffer = strdup((char *)xml_buffer->content); } next = time(NULL); if ((now + 1) < next) { crm_log_xml_trace(data, "Long time"); crm_err("xmlNodeDump() -> %dbytes took %ds", *max, next - now); } xmlBufferFree(xml_buffer); return; } #endif switch(data->type) { case XML_ELEMENT_NODE: /* Handle below */ dump_xml_element(data, options, buffer, offset, max, depth); break; case XML_TEXT_NODE: /* Ignore */ return; case XML_COMMENT_NODE: dump_xml_comment(data, options, buffer, offset, max, depth); break; default: crm_warn("Unhandled type: %d", data->type); return; /* XML_ATTRIBUTE_NODE = 2 XML_CDATA_SECTION_NODE = 4 XML_ENTITY_REF_NODE = 5 XML_ENTITY_NODE = 6 XML_PI_NODE = 7 XML_DOCUMENT_NODE = 9 XML_DOCUMENT_TYPE_NODE = 10 XML_DOCUMENT_FRAG_NODE = 11 XML_NOTATION_NODE = 12 XML_HTML_DOCUMENT_NODE = 13 XML_DTD_NODE = 14 XML_ELEMENT_DECL = 15 XML_ATTRIBUTE_DECL = 16 XML_ENTITY_DECL = 17 XML_NAMESPACE_DECL = 18 XML_XINCLUDE_START = 19 XML_XINCLUDE_END = 20 XML_DOCB_DOCUMENT_NODE = 21 */ } } void crm_buffer_add_char(char **buffer, int *offset, int *max, char c) { buffer_print(*buffer, *max, *offset, "%c", c); } char * dump_xml_formatted(xmlNode * an_xml_node) { char *buffer = NULL; int offset = 0, max = 0; crm_xml_dump(an_xml_node, xml_log_option_formatted, &buffer, &offset, &max, 0); return buffer; } char * dump_xml_unformatted(xmlNode * an_xml_node) { char *buffer = NULL; int offset = 0, max = 0; crm_xml_dump(an_xml_node, 0, &buffer, &offset, &max, 0); return buffer; } gboolean xml_has_children(const xmlNode * xml_root) { if (xml_root != NULL && xml_root->children != NULL) { return TRUE; } return FALSE; } int crm_element_value_int(xmlNode * data, const char *name, int *dest) { const char *value = crm_element_value(data, name); CRM_CHECK(dest != NULL, return -1); if (value) { *dest = crm_int_helper(value, NULL); return 0; } return -1; } int crm_element_value_const_int(const xmlNode * data, const char *name, int *dest) { return crm_element_value_int((xmlNode *) data, name, dest); } const char * crm_element_value_const(const xmlNode * data, const char *name) { return crm_element_value((xmlNode *) data, name); } char * crm_element_value_copy(xmlNode * data, const char *name) { char *value_copy = NULL; const char *value = crm_element_value(data, name); if (value != NULL) { value_copy = strdup(value); } return value_copy; } void xml_remove_prop(xmlNode * obj, const char *name) { if(__xml_acl_check(obj, NULL, xpf_acl_write) == FALSE) { crm_trace("Cannot remove %s from %s", name, obj->name); } else if(TRACKING_CHANGES(obj)) { /* Leave in place (marked for removal) until after the diff is calculated */ xml_private_t *p = NULL; xmlAttr *attr = xmlHasProp(obj, (const xmlChar *)name); p = attr->_private; set_parent_flag(obj, xpf_dirty); p->flags |= xpf_deleted; /* crm_trace("Setting flag %x due to %s[@id=%s].%s", xpf_dirty, obj->name, ID(obj), name); */ } else { xmlUnsetProp(obj, (const xmlChar *)name); } } void purge_diff_markers(xmlNode * a_node) { xmlNode *child = NULL; CRM_CHECK(a_node != NULL, return); xml_remove_prop(a_node, XML_DIFF_MARKER); for (child = __xml_first_child(a_node); child != NULL; child = __xml_next(child)) { purge_diff_markers(child); } } void save_xml_to_file(xmlNode * xml, const char *desc, const char *filename) { char *f = NULL; if (filename == NULL) { char *uuid = crm_generate_uuid(); f = crm_strdup_printf("/tmp/%s", uuid); filename = f; free(uuid); } crm_info("Saving %s to %s", desc, filename); write_xml_file(xml, filename, FALSE); free(f); } gboolean apply_xml_diff(xmlNode * old, xmlNode * diff, xmlNode ** new) { gboolean result = TRUE; int root_nodes_seen = 0; static struct qb_log_callsite *digest_cs = NULL; const char *digest = crm_element_value(diff, XML_ATTR_DIGEST); const char *version = crm_element_value(diff, XML_ATTR_CRM_VERSION); xmlNode *child_diff = NULL; xmlNode *added = find_xml_node(diff, "diff-added", FALSE); xmlNode *removed = find_xml_node(diff, "diff-removed", FALSE); CRM_CHECK(new != NULL, return FALSE); if (digest_cs == NULL) { digest_cs = qb_log_callsite_get(__func__, __FILE__, "diff-digest", LOG_TRACE, __LINE__, crm_trace_nonlog); } crm_trace("Substraction Phase"); for (child_diff = __xml_first_child(removed); child_diff != NULL; child_diff = __xml_next(child_diff)) { CRM_CHECK(root_nodes_seen == 0, result = FALSE); if (root_nodes_seen == 0) { *new = subtract_xml_object(NULL, old, child_diff, FALSE, NULL, NULL); } root_nodes_seen++; } if (root_nodes_seen == 0) { *new = copy_xml(old); } else if (root_nodes_seen > 1) { crm_err("(-) Diffs cannot contain more than one change set..." " saw %d", root_nodes_seen); result = FALSE; } root_nodes_seen = 0; crm_trace("Addition Phase"); if (result) { xmlNode *child_diff = NULL; for (child_diff = __xml_first_child(added); child_diff != NULL; child_diff = __xml_next(child_diff)) { CRM_CHECK(root_nodes_seen == 0, result = FALSE); if (root_nodes_seen == 0) { add_xml_object(NULL, *new, child_diff, TRUE); } root_nodes_seen++; } } if (root_nodes_seen > 1) { crm_err("(+) Diffs cannot contain more than one change set..." " saw %d", root_nodes_seen); result = FALSE; } else if (result && digest) { char *new_digest = NULL; purge_diff_markers(*new); /* Purge now so the diff is ok */ new_digest = calculate_xml_versioned_digest(*new, FALSE, TRUE, version); if (safe_str_neq(new_digest, digest)) { crm_info("Digest mis-match: expected %s, calculated %s", digest, new_digest); result = FALSE; crm_trace("%p %0.6x", digest_cs, digest_cs ? digest_cs->targets : 0); if (digest_cs && digest_cs->targets) { save_xml_to_file(old, "diff:original", NULL); save_xml_to_file(diff, "diff:input", NULL); save_xml_to_file(*new, "diff:new", NULL); } } else { crm_trace("Digest matched: expected %s, calculated %s", digest, new_digest); } free(new_digest); } else if (result) { purge_diff_markers(*new); /* Purge now so the diff is ok */ } return result; } static void __xml_diff_object(xmlNode * old, xmlNode * new) { xmlNode *cIter = NULL; xmlAttr *pIter = NULL; CRM_CHECK(new != NULL, return); if(old == NULL) { crm_node_created(new); __xml_acl_post_process(new); /* Check creation is allowed */ return; } else { xml_private_t *p = new->_private; if(p->flags & xpf_processed) { /* Avoid re-comparing nodes */ return; } p->flags |= xpf_processed; } for (pIter = crm_first_attr(new); pIter != NULL; pIter = pIter->next) { xml_private_t *p = pIter->_private; /* Assume everything was just created and take it from there */ p->flags |= xpf_created; } for (pIter = crm_first_attr(old); pIter != NULL; ) { xmlAttr *prop = pIter; xml_private_t *p = NULL; const char *name = (const char *)pIter->name; const char *old_value = crm_element_value(old, name); xmlAttr *exists = xmlHasProp(new, pIter->name); pIter = pIter->next; if(exists == NULL) { p = new->doc->_private; /* Prevent the dirty flag being set recursively upwards */ clear_bit(p->flags, xpf_tracking); exists = xmlSetProp(new, (const xmlChar *)name, (const xmlChar *)old_value); set_bit(p->flags, xpf_tracking); p = exists->_private; p->flags = 0; crm_trace("Lost %s@%s=%s", old->name, name, old_value); xml_remove_prop(new, name); } else { int p_new = __xml_offset((xmlNode*)exists); int p_old = __xml_offset((xmlNode*)prop); const char *value = crm_element_value(new, name); p = exists->_private; p->flags = (p->flags & ~xpf_created); if(strcmp(value, old_value) != 0) { /* Restore the original value, so we can call crm_xml_add() whcih checks ACLs */ char *vcopy = crm_element_value_copy(new, name); crm_trace("Modified %s@%s %s->%s", old->name, name, old_value, vcopy); xmlSetProp(new, prop->name, (const xmlChar *)old_value); crm_xml_add(new, name, vcopy); free(vcopy); } else if(p_old != p_new) { crm_info("Moved %s@%s (%d -> %d)", old->name, name, p_old, p_new); __xml_node_dirty(new); p->flags |= xpf_dirty|xpf_moved; if(p_old > p_new) { p = prop->_private; p->flags |= xpf_skip; } else { p = exists->_private; p->flags |= xpf_skip; } } } } for (pIter = crm_first_attr(new); pIter != NULL; ) { xmlAttr *prop = pIter; xml_private_t *p = pIter->_private; pIter = pIter->next; if(is_set(p->flags, xpf_created)) { char *name = strdup((const char *)prop->name); char *value = crm_element_value_copy(new, name); crm_trace("Created %s@%s=%s", new->name, name, value); /* Remove plus create wont work as it will modify the relative attribute ordering */ if(__xml_acl_check(new, name, xpf_acl_write)) { crm_attr_dirty(prop); } else { xmlUnsetProp(new, prop->name); /* Remove - change not allowed */ } free(value); free(name); } } for (cIter = __xml_first_child(old); cIter != NULL; ) { xmlNode *old_child = cIter; xmlNode *new_child = find_entity(new, crm_element_name(cIter), ID(cIter)); cIter = __xml_next(cIter); if(new_child) { __xml_diff_object(old_child, new_child); } else { xml_private_t *p = old_child->_private; /* Create then free (which will check the acls if necessary) */ xmlNode *candidate = add_node_copy(new, old_child); xmlNode *top = xmlDocGetRootElement(candidate->doc); __xml_node_clean(candidate); __xml_acl_apply(top); /* Make sure any ACLs are applied to 'candidate' */ free_xml(candidate); if(NULL == find_entity(new, crm_element_name(old_child), ID(old_child))) { p->flags |= xpf_skip; } } } for (cIter = __xml_first_child(new); cIter != NULL; ) { xmlNode *new_child = cIter; xmlNode *old_child = find_entity(old, crm_element_name(cIter), ID(cIter)); cIter = __xml_next(cIter); if(old_child == NULL) { xml_private_t *p = new_child->_private; p->flags |= xpf_skip; __xml_diff_object(old_child, new_child); } else { /* Check for movement, we already checked for differences */ int p_new = __xml_offset(new_child); int p_old = __xml_offset(old_child); xml_private_t *p = new_child->_private; if(p_old != p_new) { crm_info("%s.%s moved from %d to %d - %d", new_child->name, ID(new_child), p_old, p_new); p->flags |= xpf_moved; if(p_old > p_new) { p = old_child->_private; p->flags |= xpf_skip; } else { p = new_child->_private; p->flags |= xpf_skip; } } } } } void xml_calculate_changes(xmlNode * old, xmlNode * new) { CRM_CHECK(safe_str_eq(crm_element_name(old), crm_element_name(new)), return); CRM_CHECK(safe_str_eq(ID(old), ID(new)), return); if(xml_tracking_changes(new) == FALSE) { xml_track_changes(new, NULL, NULL, FALSE); } __xml_diff_object(old, new); } xmlNode * diff_xml_object(xmlNode * old, xmlNode * new, gboolean suppress) { xmlNode *tmp1 = NULL; xmlNode *diff = create_xml_node(NULL, "diff"); xmlNode *removed = create_xml_node(diff, "diff-removed"); xmlNode *added = create_xml_node(diff, "diff-added"); crm_xml_add(diff, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); tmp1 = subtract_xml_object(removed, old, new, FALSE, NULL, "removed:top"); if (suppress && tmp1 != NULL && can_prune_leaf(tmp1)) { free_xml(tmp1); } tmp1 = subtract_xml_object(added, new, old, TRUE, NULL, "added:top"); if (suppress && tmp1 != NULL && can_prune_leaf(tmp1)) { free_xml(tmp1); } if (added->children == NULL && removed->children == NULL) { free_xml(diff); diff = NULL; } return diff; } gboolean can_prune_leaf(xmlNode * xml_node) { xmlNode *cIter = NULL; xmlAttrPtr pIter = NULL; gboolean can_prune = TRUE; const char *name = crm_element_name(xml_node); if (safe_str_eq(name, XML_TAG_RESOURCE_REF) || safe_str_eq(name, XML_CIB_TAG_OBJ_REF) || safe_str_eq(name, XML_ACL_TAG_ROLE_REF) || safe_str_eq(name, XML_ACL_TAG_ROLE_REFv1)) { return FALSE; } for (pIter = crm_first_attr(xml_node); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; if (strcmp(p_name, XML_ATTR_ID) == 0) { continue; } can_prune = FALSE; } cIter = __xml_first_child(xml_node); while (cIter) { xmlNode *child = cIter; cIter = __xml_next(cIter); if (can_prune_leaf(child)) { free_xml(child); } else { can_prune = FALSE; } } return can_prune; } void diff_filter_context(int context, int upper_bound, int lower_bound, xmlNode * xml_node, xmlNode * parent) { xmlNode *us = NULL; xmlNode *child = NULL; xmlAttrPtr pIter = NULL; xmlNode *new_parent = parent; const char *name = crm_element_name(xml_node); CRM_CHECK(xml_node != NULL && name != NULL, return); us = create_xml_node(parent, name); for (pIter = crm_first_attr(xml_node); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); lower_bound = context; crm_xml_add(us, p_name, p_value); } if (lower_bound >= 0 || upper_bound >= 0) { crm_xml_add(us, XML_ATTR_ID, ID(xml_node)); new_parent = us; } else { upper_bound = in_upper_context(0, context, xml_node); if (upper_bound >= 0) { crm_xml_add(us, XML_ATTR_ID, ID(xml_node)); new_parent = us; } else { free_xml(us); us = NULL; } } for (child = __xml_first_child(us); child != NULL; child = __xml_next(child)) { diff_filter_context(context, upper_bound - 1, lower_bound - 1, child, new_parent); } } int in_upper_context(int depth, int context, xmlNode * xml_node) { if (context == 0) { return 0; } if (xml_node->properties) { return depth; } else if (depth < context) { xmlNode *child = NULL; for (child = __xml_first_child(xml_node); child != NULL; child = __xml_next(child)) { if (in_upper_context(depth + 1, context, child)) { return depth; } } } return 0; } static xmlNode * find_xml_comment(xmlNode * root, xmlNode * search_comment) { xmlNode *a_child = NULL; CRM_CHECK(search_comment->type == XML_COMMENT_NODE, return NULL); for (a_child = __xml_first_child(root); a_child != NULL; a_child = __xml_next(a_child)) { if (a_child->type != XML_COMMENT_NODE) { continue; } if (safe_str_eq((const char *)a_child->content, (const char *)search_comment->content)) { return a_child; } } return NULL; } static xmlNode * subtract_xml_comment(xmlNode * parent, xmlNode * left, xmlNode * right, gboolean * changed) { CRM_CHECK(left != NULL, return NULL); CRM_CHECK(left->type == XML_COMMENT_NODE, return NULL); if (right == NULL || safe_str_neq((const char *)left->content, (const char *)right->content)) { xmlNode *deleted = NULL; deleted = add_node_copy(parent, left); *changed = TRUE; return deleted; } return NULL; } xmlNode * subtract_xml_object(xmlNode * parent, xmlNode * left, xmlNode * right, gboolean full, gboolean * changed, const char *marker) { gboolean dummy = FALSE; gboolean skip = FALSE; xmlNode *diff = NULL; xmlNode *right_child = NULL; xmlNode *left_child = NULL; xmlAttrPtr xIter = NULL; const char *id = NULL; const char *name = NULL; const char *value = NULL; const char *right_val = NULL; int lpc = 0; static int filter_len = DIMOF(filter); if (changed == NULL) { changed = &dummy; } if (left == NULL) { return NULL; } if (left->type == XML_COMMENT_NODE) { return subtract_xml_comment(parent, left, right, changed); } id = ID(left); if (right == NULL) { xmlNode *deleted = NULL; crm_trace("Processing <%s id=%s> (complete copy)", crm_element_name(left), id); deleted = add_node_copy(parent, left); crm_xml_add(deleted, XML_DIFF_MARKER, marker); *changed = TRUE; return deleted; } name = crm_element_name(left); CRM_CHECK(name != NULL, return NULL); CRM_CHECK(safe_str_eq(crm_element_name(left), crm_element_name(right)), return NULL); /* check for XML_DIFF_MARKER in a child */ value = crm_element_value(right, XML_DIFF_MARKER); if (value != NULL && strcmp(value, "removed:top") == 0) { crm_trace("We are the root of the deletion: %s.id=%s", name, id); *changed = TRUE; return NULL; } /* Avoiding creating the full heirarchy would save even more work here */ diff = create_xml_node(parent, name); /* Reset filter */ for (lpc = 0; lpc < filter_len; lpc++) { filter[lpc].found = FALSE; } /* changes to child objects */ for (left_child = __xml_first_child(left); left_child != NULL; left_child = __xml_next(left_child)) { gboolean child_changed = FALSE; if (left_child->type == XML_COMMENT_NODE) { right_child = find_xml_comment(right, left_child); } else { right_child = find_entity(right, crm_element_name(left_child), ID(left_child)); } subtract_xml_object(diff, left_child, right_child, full, &child_changed, marker); if (child_changed) { *changed = TRUE; } } if (*changed == FALSE) { /* Nothing to do */ } else if (full) { xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(left); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); xmlSetProp(diff, (const xmlChar *)p_name, (const xmlChar *)p_value); } /* We already have everything we need... */ goto done; } else if (id) { xmlSetProp(diff, (const xmlChar *)XML_ATTR_ID, (const xmlChar *)id); } /* changes to name/value pairs */ for (xIter = crm_first_attr(left); xIter != NULL; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; if (strcmp(prop_name, XML_ATTR_ID) == 0) { continue; } skip = FALSE; for (lpc = 0; skip == FALSE && lpc < filter_len; lpc++) { if (filter[lpc].found == FALSE && strcmp(prop_name, filter[lpc].string) == 0) { filter[lpc].found = TRUE; skip = TRUE; break; } } if (skip) { continue; } right_val = crm_element_value(right, prop_name); if (right_val == NULL) { /* new */ *changed = TRUE; if (full) { xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(left); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); xmlSetProp(diff, (const xmlChar *)p_name, (const xmlChar *)p_value); } break; } else { const char *left_value = crm_element_value(left, prop_name); xmlSetProp(diff, (const xmlChar *)prop_name, (const xmlChar *)value); crm_xml_add(diff, prop_name, left_value); } } else { /* Only now do we need the left value */ const char *left_value = crm_element_value(left, prop_name); if (strcmp(left_value, right_val) == 0) { /* unchanged */ } else { *changed = TRUE; if (full) { xmlAttrPtr pIter = NULL; crm_trace("Changes detected to %s in <%s id=%s>", prop_name, crm_element_name(left), id); for (pIter = crm_first_attr(left); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); xmlSetProp(diff, (const xmlChar *)p_name, (const xmlChar *)p_value); } break; } else { crm_trace("Changes detected to %s (%s -> %s) in <%s id=%s>", prop_name, left_value, right_val, crm_element_name(left), id); crm_xml_add(diff, prop_name, left_value); } } } } if (*changed == FALSE) { free_xml(diff); return NULL; } else if (full == FALSE && id) { crm_xml_add(diff, XML_ATTR_ID, id); } done: return diff; } static int add_xml_comment(xmlNode * parent, xmlNode * target, xmlNode * update) { CRM_CHECK(update != NULL, return 0); CRM_CHECK(update->type == XML_COMMENT_NODE, return 0); if (target == NULL) { target = find_xml_comment(parent, update); } if (target == NULL) { add_node_copy(parent, update); /* We wont reach here currently */ } else if (safe_str_neq((const char *)target->content, (const char *)update->content)) { xmlFree(target->content); target->content = xmlStrdup(update->content); } return 0; } int add_xml_object(xmlNode * parent, xmlNode * target, xmlNode * update, gboolean as_diff) { xmlNode *a_child = NULL; const char *object_id = NULL; const char *object_name = NULL; #if XML_PARSE_DEBUG crm_log_xml_trace("update:", update); crm_log_xml_trace("target:", target); #endif CRM_CHECK(update != NULL, return 0); if (update->type == XML_COMMENT_NODE) { return add_xml_comment(parent, target, update); } object_name = crm_element_name(update); object_id = ID(update); CRM_CHECK(object_name != NULL, return 0); if (target == NULL && object_id == NULL) { /* placeholder object */ target = find_xml_node(parent, object_name, FALSE); } else if (target == NULL) { target = find_entity(parent, object_name, object_id); } if (target == NULL) { target = create_xml_node(parent, object_name); CRM_CHECK(target != NULL, return 0); #if XML_PARSER_DEBUG crm_trace("Added <%s%s%s/>", crm_str(object_name), object_id ? " id=" : "", object_id ? object_id : ""); } else { crm_trace("Found node <%s%s%s/> to update", crm_str(object_name), object_id ? " id=" : "", object_id ? object_id : ""); #endif } CRM_CHECK(safe_str_eq(crm_element_name(target), crm_element_name(update)), return 0); if (as_diff == FALSE) { /* So that expand_plus_plus() gets called */ copy_in_properties(target, update); } else { /* No need for expand_plus_plus(), just raw speed */ xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(update); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); /* Remove it first so the ordering of the update is preserved */ xmlUnsetProp(target, (const xmlChar *)p_name); xmlSetProp(target, (const xmlChar *)p_name, (const xmlChar *)p_value); } } for (a_child = __xml_first_child(update); a_child != NULL; a_child = __xml_next(a_child)) { #if XML_PARSER_DEBUG crm_trace("Updating child <%s id=%s>", crm_element_name(a_child), ID(a_child)); #endif add_xml_object(target, NULL, a_child, as_diff); } #if XML_PARSER_DEBUG crm_trace("Finished with <%s id=%s>", crm_str(object_name), crm_str(object_id)); #endif return 0; } gboolean update_xml_child(xmlNode * child, xmlNode * to_update) { gboolean can_update = TRUE; xmlNode *child_of_child = NULL; CRM_CHECK(child != NULL, return FALSE); CRM_CHECK(to_update != NULL, return FALSE); if (safe_str_neq(crm_element_name(to_update), crm_element_name(child))) { can_update = FALSE; } else if (safe_str_neq(ID(to_update), ID(child))) { can_update = FALSE; } else if (can_update) { #if XML_PARSER_DEBUG crm_log_xml_trace(child, "Update match found..."); #endif add_xml_object(NULL, child, to_update, FALSE); } for (child_of_child = __xml_first_child(child); child_of_child != NULL; child_of_child = __xml_next(child_of_child)) { /* only update the first one */ if (can_update) { break; } can_update = update_xml_child(child_of_child, to_update); } return can_update; } int find_xml_children(xmlNode ** children, xmlNode * root, const char *tag, const char *field, const char *value, gboolean search_matches) { int match_found = 0; CRM_CHECK(root != NULL, return FALSE); CRM_CHECK(children != NULL, return FALSE); if (tag != NULL && safe_str_neq(tag, crm_element_name(root))) { } else if (value != NULL && safe_str_neq(value, crm_element_value(root, field))) { } else { if (*children == NULL) { *children = create_xml_node(NULL, __FUNCTION__); } add_node_copy(*children, root); match_found = 1; } if (search_matches || match_found == 0) { xmlNode *child = NULL; for (child = __xml_first_child(root); child != NULL; child = __xml_next(child)) { match_found += find_xml_children(children, child, tag, field, value, search_matches); } } return match_found; } gboolean replace_xml_child(xmlNode * parent, xmlNode * child, xmlNode * update, gboolean delete_only) { gboolean can_delete = FALSE; xmlNode *child_of_child = NULL; const char *up_id = NULL; const char *child_id = NULL; const char *right_val = NULL; CRM_CHECK(child != NULL, return FALSE); CRM_CHECK(update != NULL, return FALSE); up_id = ID(update); child_id = ID(child); if (up_id == NULL || (child_id && strcmp(child_id, up_id) == 0)) { can_delete = TRUE; } if (safe_str_neq(crm_element_name(update), crm_element_name(child))) { can_delete = FALSE; } if (can_delete && delete_only) { xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(update); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); right_val = crm_element_value(child, p_name); if (safe_str_neq(p_value, right_val)) { can_delete = FALSE; } } } if (can_delete && parent != NULL) { crm_log_xml_trace(child, "Delete match found..."); if (delete_only || update == NULL) { free_xml(child); } else { xmlNode *tmp = copy_xml(update); xmlDoc *doc = tmp->doc; xmlNode *old = NULL; xml_accept_changes(tmp); old = xmlReplaceNode(child, tmp); if(xml_tracking_changes(tmp)) { /* Replaced sections may have included relevant ACLs */ __xml_acl_apply(tmp); } xml_calculate_changes(old, tmp); xmlDocSetRootElement(doc, old); free_xml(old); } child = NULL; return TRUE; } else if (can_delete) { crm_log_xml_debug(child, "Cannot delete the search root"); can_delete = FALSE; } child_of_child = __xml_first_child(child); while (child_of_child) { xmlNode *next = __xml_next(child_of_child); can_delete = replace_xml_child(child, child_of_child, update, delete_only); /* only delete the first one */ if (can_delete) { child_of_child = NULL; } else { child_of_child = next; } } return can_delete; } void hash2nvpair(gpointer key, gpointer value, gpointer user_data) { const char *name = key; const char *s_value = value; xmlNode *xml_node = user_data; xmlNode *xml_child = create_xml_node(xml_node, XML_CIB_TAG_NVPAIR); crm_xml_add(xml_child, XML_ATTR_ID, name); crm_xml_add(xml_child, XML_NVPAIR_ATTR_NAME, name); crm_xml_add(xml_child, XML_NVPAIR_ATTR_VALUE, s_value); crm_trace("dumped: name=%s value=%s", name, s_value); } void hash2smartfield(gpointer key, gpointer value, gpointer user_data) { const char *name = key; const char *s_value = value; xmlNode *xml_node = user_data; if (isdigit(name[0])) { xmlNode *tmp = create_xml_node(xml_node, XML_TAG_PARAM); crm_xml_add(tmp, XML_NVPAIR_ATTR_NAME, name); crm_xml_add(tmp, XML_NVPAIR_ATTR_VALUE, s_value); } else if (crm_element_value(xml_node, name) == NULL) { crm_xml_add(xml_node, name, s_value); crm_trace("dumped: %s=%s", name, s_value); } else { crm_trace("duplicate: %s=%s", name, s_value); } } void hash2field(gpointer key, gpointer value, gpointer user_data) { const char *name = key; const char *s_value = value; xmlNode *xml_node = user_data; if (crm_element_value(xml_node, name) == NULL) { crm_xml_add(xml_node, name, s_value); } else { crm_trace("duplicate: %s=%s", name, s_value); } } void hash2metafield(gpointer key, gpointer value, gpointer user_data) { char *crm_name = NULL; if (key == NULL || value == NULL) { return; } else if (((char *)key)[0] == '#') { return; } else if (strstr(key, ":")) { return; } crm_name = crm_meta_name(key); hash2field(crm_name, value, user_data); free(crm_name); } GHashTable * xml2list(xmlNode * parent) { xmlNode *child = NULL; xmlAttrPtr pIter = NULL; xmlNode *nvpair_list = NULL; GHashTable *nvpair_hash = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); CRM_CHECK(parent != NULL, return nvpair_hash); nvpair_list = find_xml_node(parent, XML_TAG_ATTRS, FALSE); if (nvpair_list == NULL) { crm_trace("No attributes in %s", crm_element_name(parent)); crm_log_xml_trace(parent, "No attributes for resource op"); } crm_log_xml_trace(nvpair_list, "Unpacking"); for (pIter = crm_first_attr(nvpair_list); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); crm_trace("Added %s=%s", p_name, p_value); g_hash_table_insert(nvpair_hash, strdup(p_name), strdup(p_value)); } for (child = __xml_first_child(nvpair_list); child != NULL; child = __xml_next(child)) { if (strcmp((const char *)child->name, XML_TAG_PARAM) == 0) { const char *key = crm_element_value(child, XML_NVPAIR_ATTR_NAME); const char *value = crm_element_value(child, XML_NVPAIR_ATTR_VALUE); crm_trace("Added %s=%s", key, value); if (key != NULL && value != NULL) { g_hash_table_insert(nvpair_hash, strdup(key), strdup(value)); } } } return nvpair_hash; } typedef struct name_value_s { const char *name; const void *value; } name_value_t; static gint sort_pairs(gconstpointer a, gconstpointer b) { int rc = 0; const name_value_t *pair_a = a; const name_value_t *pair_b = b; CRM_ASSERT(a != NULL); CRM_ASSERT(pair_a->name != NULL); CRM_ASSERT(b != NULL); CRM_ASSERT(pair_b->name != NULL); rc = strcmp(pair_a->name, pair_b->name); if (rc < 0) { return -1; } else if (rc > 0) { return 1; } return 0; } static void dump_pair(gpointer data, gpointer user_data) { name_value_t *pair = data; xmlNode *parent = user_data; crm_xml_add(parent, pair->name, pair->value); } xmlNode * sorted_xml(xmlNode * input, xmlNode * parent, gboolean recursive) { xmlNode *child = NULL; GListPtr sorted = NULL; GListPtr unsorted = NULL; name_value_t *pair = NULL; xmlNode *result = NULL; const char *name = NULL; xmlAttrPtr pIter = NULL; CRM_CHECK(input != NULL, return NULL); name = crm_element_name(input); CRM_CHECK(name != NULL, return NULL); result = create_xml_node(parent, name); for (pIter = crm_first_attr(input); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); pair = calloc(1, sizeof(name_value_t)); pair->name = p_name; pair->value = p_value; unsorted = g_list_prepend(unsorted, pair); pair = NULL; } sorted = g_list_sort(unsorted, sort_pairs); g_list_foreach(sorted, dump_pair, result); g_list_free_full(sorted, free); for (child = __xml_first_child(input); child != NULL; child = __xml_next(child)) { if (recursive) { sorted_xml(child, result, recursive); } else { add_node_copy(result, child); } } return result; } static gboolean validate_with_dtd(xmlDocPtr doc, gboolean to_logs, const char *dtd_file) { gboolean valid = TRUE; xmlDtdPtr dtd = NULL; xmlValidCtxtPtr cvp = NULL; CRM_CHECK(doc != NULL, return FALSE); CRM_CHECK(dtd_file != NULL, return FALSE); dtd = xmlParseDTD(NULL, (const xmlChar *)dtd_file); if(dtd == NULL) { crm_err("Could not locate/parse DTD: %s", dtd_file); return TRUE; } cvp = xmlNewValidCtxt(); if(cvp) { if (to_logs) { cvp->userData = (void *)LOG_ERR; cvp->error = (xmlValidityErrorFunc) xml_log; cvp->warning = (xmlValidityWarningFunc) xml_log; } else { cvp->userData = (void *)stderr; cvp->error = (xmlValidityErrorFunc) fprintf; cvp->warning = (xmlValidityWarningFunc) fprintf; } if (!xmlValidateDtd(cvp, doc, dtd)) { valid = FALSE; } xmlFreeValidCtxt(cvp); } else { crm_err("Internal error: No valid context"); } xmlFreeDtd(dtd); return valid; } xmlNode * first_named_child(xmlNode * parent, const char *name) { xmlNode *match = NULL; for (match = __xml_first_child(parent); match != NULL; match = __xml_next(match)) { /* * name == NULL gives first child regardless of name; this is * semantically incorrect in this funciton, but may be necessary * due to prior use of xml_child_iter_filter */ if (name == NULL || strcmp((const char *)match->name, name) == 0) { return match; } } return NULL; } #if 0 static void relaxng_invalid_stderr(void *userData, xmlErrorPtr error) { /* Structure xmlError struct _xmlError { int domain : What part of the library raised this er int code : The error code, e.g. an xmlParserError char * message : human-readable informative error messag xmlErrorLevel level : how consequent is the error char * file : the filename int line : the line number if available char * str1 : extra string information char * str2 : extra string information char * str3 : extra string information int int1 : extra number information int int2 : column number of the error or 0 if N/A void * ctxt : the parser context if available void * node : the node in the tree } */ crm_err("Structured error: line=%d, level=%d %s", error->line, error->level, error->message); } #endif static gboolean validate_with_relaxng(xmlDocPtr doc, gboolean to_logs, const char *relaxng_file, relaxng_ctx_cache_t ** cached_ctx) { int rc = 0; gboolean valid = TRUE; relaxng_ctx_cache_t *ctx = NULL; CRM_CHECK(doc != NULL, return FALSE); CRM_CHECK(relaxng_file != NULL, return FALSE); if (cached_ctx && *cached_ctx) { ctx = *cached_ctx; } else { crm_info("Creating RNG parser context"); ctx = calloc(1, sizeof(relaxng_ctx_cache_t)); xmlLoadExtDtdDefaultValue = 1; ctx->parser = xmlRelaxNGNewParserCtxt(relaxng_file); CRM_CHECK(ctx->parser != NULL, goto cleanup); if (to_logs) { xmlRelaxNGSetParserErrors(ctx->parser, (xmlRelaxNGValidityErrorFunc) xml_log, (xmlRelaxNGValidityWarningFunc) xml_log, GUINT_TO_POINTER(LOG_ERR)); } else { xmlRelaxNGSetParserErrors(ctx->parser, (xmlRelaxNGValidityErrorFunc) fprintf, (xmlRelaxNGValidityWarningFunc) fprintf, stderr); } ctx->rng = xmlRelaxNGParse(ctx->parser); CRM_CHECK(ctx->rng != NULL, crm_err("Could not find/parse %s", relaxng_file); goto cleanup); ctx->valid = xmlRelaxNGNewValidCtxt(ctx->rng); CRM_CHECK(ctx->valid != NULL, goto cleanup); if (to_logs) { xmlRelaxNGSetValidErrors(ctx->valid, (xmlRelaxNGValidityErrorFunc) xml_log, (xmlRelaxNGValidityWarningFunc) xml_log, GUINT_TO_POINTER(LOG_ERR)); } else { xmlRelaxNGSetValidErrors(ctx->valid, (xmlRelaxNGValidityErrorFunc) fprintf, (xmlRelaxNGValidityWarningFunc) fprintf, stderr); } } /* xmlRelaxNGSetValidStructuredErrors( */ /* valid, relaxng_invalid_stderr, valid); */ xmlLineNumbersDefault(1); rc = xmlRelaxNGValidateDoc(ctx->valid, doc); if (rc > 0) { valid = FALSE; } else if (rc < 0) { crm_err("Internal libxml error during validation\n"); } cleanup: if (cached_ctx) { *cached_ctx = ctx; } else { if (ctx->parser != NULL) { xmlRelaxNGFreeParserCtxt(ctx->parser); } if (ctx->valid != NULL) { xmlRelaxNGFreeValidCtxt(ctx->valid); } if (ctx->rng != NULL) { xmlRelaxNGFree(ctx->rng); } free(ctx); } return valid; } void crm_xml_init(void) { static bool init = TRUE; if(init) { init = FALSE; /* The default allocator XML_BUFFER_ALLOC_EXACT does far too many * realloc_safe()s and it can take upwards of 18 seconds (yes, seconds) * to dump a 28kb tree which XML_BUFFER_ALLOC_DOUBLEIT can do in * less than 1 second. */ xmlSetBufferAllocationScheme(XML_BUFFER_ALLOC_DOUBLEIT); /* Populate and free the _private field when nodes are created and destroyed */ xmlDeregisterNodeDefault(pcmkDeregisterNode); xmlRegisterNodeDefault(pcmkRegisterNode); __xml_build_schema_list(); } } void crm_xml_cleanup(void) { int lpc = 0; relaxng_ctx_cache_t *ctx = NULL; crm_info("Cleaning up memory from libxml2"); for (; lpc < xml_schema_max; lpc++) { switch (known_schemas[lpc].type) { case 0: /* None */ break; case 1: /* DTD - Not cached */ break; case 2: /* RNG - Cached */ ctx = (relaxng_ctx_cache_t *) known_schemas[lpc].cache; if (ctx == NULL) { break; } if (ctx->parser != NULL) { xmlRelaxNGFreeParserCtxt(ctx->parser); } if (ctx->valid != NULL) { xmlRelaxNGFreeValidCtxt(ctx->valid); } if (ctx->rng != NULL) { xmlRelaxNGFree(ctx->rng); } free(ctx); known_schemas[lpc].cache = NULL; break; default: break; } free(known_schemas[lpc].name); free(known_schemas[lpc].location); free(known_schemas[lpc].transform); } free(known_schemas); xsltCleanupGlobals(); xmlCleanupParser(); } static gboolean validate_with(xmlNode * xml, int method, gboolean to_logs) { xmlDocPtr doc = NULL; gboolean valid = FALSE; int type = 0; char *file = NULL; if(method < 0) { return FALSE; } type = known_schemas[method].type; if(type == 0) { return TRUE; } CRM_CHECK(xml != NULL, return FALSE); doc = getDocPtr(xml); file = get_schema_path(known_schemas[method].name, known_schemas[method].location); crm_trace("Validating with: %s (type=%d)", crm_str(file), type); switch (type) { case 1: valid = validate_with_dtd(doc, to_logs, file); break; case 2: valid = validate_with_relaxng(doc, to_logs, file, (relaxng_ctx_cache_t **) & (known_schemas[method].cache)); break; default: crm_err("Unknown validator type: %d", type); break; } free(file); return valid; } #include <stdio.h> static void dump_file(const char *filename) { FILE *fp = NULL; int ch, line = 0; CRM_CHECK(filename != NULL, return); fp = fopen(filename, "r"); CRM_CHECK(fp != NULL, return); fprintf(stderr, "%4d ", ++line); do { ch = getc(fp); if (ch == EOF) { putc('\n', stderr); break; } else if (ch == '\n') { fprintf(stderr, "\n%4d ", ++line); } else { putc(ch, stderr); } } while (1); fclose(fp); } gboolean validate_xml_verbose(xmlNode * xml_blob) { int fd = 0; xmlDoc *doc = NULL; xmlNode *xml = NULL; gboolean rc = FALSE; char *filename = strdup(CRM_STATE_DIR "/cib-invalid.XXXXXX"); umask(S_IWGRP | S_IWOTH | S_IROTH); fd = mkstemp(filename); write_xml_fd(xml_blob, filename, fd, FALSE); dump_file(filename); doc = xmlParseFile(filename); xml = xmlDocGetRootElement(doc); rc = validate_xml(xml, NULL, FALSE); free_xml(xml); unlink(filename); free(filename); return rc; } gboolean validate_xml(xmlNode * xml_blob, const char *validation, gboolean to_logs) { int version = 0; if (validation == NULL) { validation = crm_element_value(xml_blob, XML_ATTR_VALIDATION); } if (validation == NULL) { int lpc = 0; bool valid = FALSE; validation = crm_element_value(xml_blob, "ignore-dtd"); if (crm_is_true(validation)) { /* Legacy compatibilty */ crm_xml_add(xml_blob, XML_ATTR_VALIDATION, "none"); return TRUE; } /* Work it out */ for (lpc = 0; lpc < xml_schema_max; lpc++) { if(validate_with(xml_blob, lpc, FALSE)) { valid = TRUE; crm_xml_add(xml_blob, XML_ATTR_VALIDATION, known_schemas[lpc].name); crm_info("XML validated against %s", known_schemas[lpc].name); if(known_schemas[lpc].after_transform == 0) { break; } } } return valid; } version = get_schema_version(validation); if (strcmp(validation, "none") == 0) { return TRUE; } else if(version < xml_schema_max) { return validate_with(xml_blob, version, to_logs); } crm_err("Unknown validator: %s", validation); return FALSE; } #if HAVE_LIBXSLT static xmlNode * apply_transformation(xmlNode * xml, const char *transform) { char *xform = NULL; xmlNode *out = NULL; xmlDocPtr res = NULL; xmlDocPtr doc = NULL; xsltStylesheet *xslt = NULL; CRM_CHECK(xml != NULL, return FALSE); doc = getDocPtr(xml); xform = get_schema_path(NULL, transform); xmlLoadExtDtdDefaultValue = 1; xmlSubstituteEntitiesDefault(1); xslt = xsltParseStylesheetFile((const xmlChar *)xform); CRM_CHECK(xslt != NULL, goto cleanup); res = xsltApplyStylesheet(xslt, doc, NULL); CRM_CHECK(res != NULL, goto cleanup); out = xmlDocGetRootElement(res); cleanup: if (xslt) { xsltFreeStylesheet(xslt); } free(xform); return out; } #endif const char * get_schema_name(int version) { if (version < 0 || version >= xml_schema_max) { return "unknown"; } return known_schemas[version].name; } int get_schema_version(const char *name) { int lpc = 0; if(name == NULL) { name = "none"; } for (; lpc < xml_schema_max; lpc++) { if (safe_str_eq(name, known_schemas[lpc].name)) { return lpc; } } return -1; } /* set which validation to use */ #include <crm/cib.h> int update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, gboolean to_logs) { xmlNode *xml = NULL; char *value = NULL; int max_stable_schemas = xml_latest_schema_index(); int lpc = 0, match = -1, rc = pcmk_ok; CRM_CHECK(best != NULL, return -EINVAL); CRM_CHECK(xml_blob != NULL, return -EINVAL); CRM_CHECK(*xml_blob != NULL, return -EINVAL); *best = 0; xml = *xml_blob; value = crm_element_value_copy(xml, XML_ATTR_VALIDATION); if (value != NULL) { match = get_schema_version(value); lpc = match; if (lpc >= 0 && transform == FALSE) { lpc++; } else if (lpc < 0) { crm_debug("Unknown validation type"); lpc = 0; } } if (match >= max_stable_schemas) { /* nothing to do */ free(value); *best = match; return pcmk_ok; } while(lpc <= max_stable_schemas) { gboolean valid = TRUE; crm_debug("Testing '%s' validation (%d of %d)", known_schemas[lpc].name ? known_schemas[lpc].name : "<unset>", lpc, max_stable_schemas); valid = validate_with(xml, lpc, to_logs); if (valid) { *best = lpc; } else { crm_trace("%s validation failed", known_schemas[lpc].name ? known_schemas[lpc].name : "<unset>"); } if (valid && transform) { xmlNode *upgrade = NULL; int next = known_schemas[lpc].after_transform; if (next < 0) { crm_trace("Stopping at %s", known_schemas[lpc].name); break; } else if (max > 0 && lpc == max) { crm_trace("Upgrade limit reached at %s (lpc=%d, next=%d, max=%d)", known_schemas[lpc].name, lpc, next, max); break; } else if (max > 0 && next > max) { crm_debug("Upgrade limit reached at %s (lpc=%d, next=%d, max=%d)", known_schemas[lpc].name, lpc, next, max); break; } else if (known_schemas[lpc].transform == NULL) { crm_notice("%s-style configuration is also valid for %s", known_schemas[lpc].name, known_schemas[next].name); if (validate_with(xml, next, to_logs)) { crm_debug("Configuration valid for schema: %s", known_schemas[next].name); lpc = next; *best = next; rc = pcmk_ok; } else { crm_info("Configuration not valid for schema: %s", known_schemas[next].name); } } else { crm_debug("Upgrading %s-style configuration to %s with %s", known_schemas[lpc].name, known_schemas[next].name, known_schemas[lpc].transform ? known_schemas[lpc].transform : "no-op"); #if HAVE_LIBXSLT upgrade = apply_transformation(xml, known_schemas[lpc].transform); #endif if (upgrade == NULL) { crm_err("Transformation %s failed", known_schemas[lpc].transform); rc = -pcmk_err_transform_failed; } else if (validate_with(upgrade, next, to_logs)) { crm_info("Transformation %s successful", known_schemas[lpc].transform); lpc = next; *best = next; free_xml(xml); xml = upgrade; rc = pcmk_ok; } else { crm_err("Transformation %s did not produce a valid configuration", known_schemas[lpc].transform); crm_log_xml_info(upgrade, "transform:bad"); free_xml(upgrade); rc = -pcmk_err_schema_validation; } } } } if (*best > match) { crm_info("%s the configuration from %s to %s", transform?"Transformed":"Upgraded", value ? value : "<none>", known_schemas[*best].name); crm_xml_add(xml, XML_ATTR_VALIDATION, known_schemas[*best].name); } *xml_blob = xml; free(value); return rc; } /* * From xpath2.c * * All the elements returned by an XPath query are pointers to * elements from the tree *except* namespace nodes where the XPath * semantic is different from the implementation in libxml2 tree. * As a result when a returned node set is freed when * xmlXPathFreeObject() is called, that routine must check the * element type. But node from the returned set may have been removed * by xmlNodeSetContent() resulting in access to freed data. * * This can be exercised by running * valgrind xpath2 test3.xml '//discarded' discarded * * There is 2 ways around it: * - make a copy of the pointers to the nodes from the result set * then call xmlXPathFreeObject() and then modify the nodes * or * - remove the references from the node set, if they are not namespace nodes, before calling xmlXPathFreeObject(). */ void freeXpathObject(xmlXPathObjectPtr xpathObj) { int lpc, max = numXpathResults(xpathObj); if(xpathObj == NULL) { return; } for(lpc = 0; lpc < max; lpc++) { if (xpathObj->nodesetval->nodeTab[lpc] && xpathObj->nodesetval->nodeTab[lpc]->type != XML_NAMESPACE_DECL) { xpathObj->nodesetval->nodeTab[lpc] = NULL; } } /* _Now_ its safe to free it */ xmlXPathFreeObject(xpathObj); } xmlNode * getXpathResult(xmlXPathObjectPtr xpathObj, int index) { xmlNode *match = NULL; int max = numXpathResults(xpathObj); CRM_CHECK(index >= 0, return NULL); CRM_CHECK(xpathObj != NULL, return NULL); if (index >= max) { crm_err("Requested index %d of only %d items", index, max); return NULL; } else if(xpathObj->nodesetval->nodeTab[index] == NULL) { /* Previously requested */ return NULL; } match = xpathObj->nodesetval->nodeTab[index]; CRM_CHECK(match != NULL, return NULL); if (xpathObj->nodesetval->nodeTab[index]->type != XML_NAMESPACE_DECL) { /* See the comment for freeXpathObject() */ xpathObj->nodesetval->nodeTab[index] = NULL; } if (match->type == XML_DOCUMENT_NODE) { /* Will happen if section = '/' */ match = match->children; } else if (match->type != XML_ELEMENT_NODE && match->parent && match->parent->type == XML_ELEMENT_NODE) { /* reurning the parent instead */ match = match->parent; } else if (match->type != XML_ELEMENT_NODE) { /* We only support searching nodes */ crm_err("We only support %d not %d", XML_ELEMENT_NODE, match->type); match = NULL; } return match; } /* the caller needs to check if the result contains a xmlDocPtr or xmlNodePtr */ xmlXPathObjectPtr xpath_search(xmlNode * xml_top, const char *path) { xmlDocPtr doc = NULL; xmlXPathObjectPtr xpathObj = NULL; xmlXPathContextPtr xpathCtx = NULL; const xmlChar *xpathExpr = (const xmlChar *)path; CRM_CHECK(path != NULL, return NULL); CRM_CHECK(xml_top != NULL, return NULL); CRM_CHECK(strlen(path) > 0, return NULL); doc = getDocPtr(xml_top); xpathCtx = xmlXPathNewContext(doc); CRM_ASSERT(xpathCtx != NULL); xpathObj = xmlXPathEvalExpression(xpathExpr, xpathCtx); xmlXPathFreeContext(xpathCtx); return xpathObj; } gboolean cli_config_update(xmlNode ** xml, int *best_version, gboolean to_logs) { gboolean rc = TRUE; const char *value = crm_element_value(*xml, XML_ATTR_VALIDATION); int version = get_schema_version(value); int min_version = xml_minimum_schema_index(); if (version < min_version) { xmlNode *converted = NULL; converted = copy_xml(*xml); update_validation(&converted, &version, 0, TRUE, to_logs); value = crm_element_value(converted, XML_ATTR_VALIDATION); if (version < min_version) { if (to_logs) { crm_config_err("Your current configuration could only be upgraded to %s... " "the minimum requirement is %s.\n", crm_str(value), get_schema_name(min_version)); } else { fprintf(stderr, "Your current configuration could only be upgraded to %s... " "the minimum requirement is %s.\n", crm_str(value), get_schema_name(min_version)); } free_xml(converted); converted = NULL; rc = FALSE; } else { free_xml(*xml); *xml = converted; if (version < xml_latest_schema_index()) { crm_config_warn("Your configuration was internally updated to %s... " "which is acceptable but not the most recent", get_schema_name(version)); } else if (to_logs) { crm_info("Your configuration was internally updated to the latest version (%s)", get_schema_name(version)); } } } else if (version >= get_schema_version("none")) { if (to_logs) { crm_config_warn("Configuration validation is currently disabled." " It is highly encouraged and prevents many common cluster issues."); } else { fprintf(stderr, "Configuration validation is currently disabled." " It is highly encouraged and prevents many common cluster issues.\n"); } } if (best_version) { *best_version = version; } return rc; } xmlNode * expand_idref(xmlNode * input, xmlNode * top) { const char *tag = NULL; const char *ref = NULL; xmlNode *result = input; char *xpath_string = NULL; if (result == NULL) { return NULL; } else if (top == NULL) { top = input; } tag = crm_element_name(result); ref = crm_element_value(result, XML_ATTR_IDREF); if (ref != NULL) { int xpath_max = 512, offset = 0; xpath_string = calloc(1, xpath_max); offset += snprintf(xpath_string + offset, xpath_max - offset, "//%s[@id='%s']", tag, ref); CRM_LOG_ASSERT(offset > 0); result = get_xpath_object(xpath_string, top, LOG_ERR); if (result == NULL) { char *nodePath = (char *)xmlGetNodePath(top); crm_err("No match for %s found in %s: Invalid configuration", xpath_string, crm_str(nodePath)); free(nodePath); } } free(xpath_string); return result; } xmlNode * get_xpath_object_relative(const char *xpath, xmlNode * xml_obj, int error_level) { int len = 0; xmlNode *result = NULL; char *xpath_full = NULL; char *xpath_prefix = NULL; if (xml_obj == NULL || xpath == NULL) { return NULL; } xpath_prefix = (char *)xmlGetNodePath(xml_obj); len += strlen(xpath_prefix); len += strlen(xpath); xpath_full = strdup(xpath_prefix); xpath_full = realloc_safe(xpath_full, len + 1); strncat(xpath_full, xpath, len); result = get_xpath_object(xpath_full, xml_obj, error_level); free(xpath_prefix); free(xpath_full); return result; } xmlNode * get_xpath_object(const char *xpath, xmlNode * xml_obj, int error_level) { int max; xmlNode *result = NULL; xmlXPathObjectPtr xpathObj = NULL; char *nodePath = NULL; char *matchNodePath = NULL; if (xpath == NULL) { return xml_obj; /* or return NULL? */ } xpathObj = xpath_search(xml_obj, xpath); nodePath = (char *)xmlGetNodePath(xml_obj); max = numXpathResults(xpathObj); if (max < 1) { do_crm_log(error_level, "No match for %s in %s", xpath, crm_str(nodePath)); crm_log_xml_explicit(xml_obj, "Unexpected Input"); } else if (max > 1) { int lpc = 0; do_crm_log(error_level, "Too many matches for %s in %s", xpath, crm_str(nodePath)); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if(match != NULL) { matchNodePath = (char *)xmlGetNodePath(match); do_crm_log(error_level, "%s[%d] = %s", xpath, lpc, crm_str(matchNodePath)); free(matchNodePath); } } crm_log_xml_explicit(xml_obj, "Bad Input"); } else { result = getXpathResult(xpathObj, 0); } freeXpathObject(xpathObj); free(nodePath); return result; } const char * crm_element_value(xmlNode * data, const char *name) { xmlAttr *attr = NULL; if (data == NULL) { crm_err("Couldn't find %s in NULL", name ? name : "<null>"); CRM_LOG_ASSERT(data != NULL); return NULL; } else if (name == NULL) { crm_err("Couldn't find NULL in %s", crm_element_name(data)); return NULL; } attr = xmlHasProp(data, (const xmlChar *)name); if (attr == NULL || attr->children == NULL) { return NULL; } return (const char *)attr->children->content; }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_1504_0
crossvul-cpp_data_good_3470_1
/* * npw-viewer.c - Target plugin loader and viewer * * nspluginwrapper (C) 2005-2009 Gwenole Beauchesne * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #define _GNU_SOURCE 1 /* RTLD_NEXT */ #include "sysdeps.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <strings.h> #include <dlfcn.h> #include <unistd.h> #include <errno.h> #include <pthread.h> #include <fcntl.h> #include <X11/X.h> #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Intrinsic.h> #include <X11/Shell.h> #include <X11/StringDefs.h> #include <X11/extensions/XShm.h> #include <gtk/gtk.h> #include <gdk/gdkx.h> #include "utils.h" #include "xembed.h" #include "npw-common.h" #include "npw-malloc.h" #define DEBUG 1 #include "debug.h" // [UNIMPLEMENTED] Define to use XPCOM emulation #define USE_XPCOM 0 // Define to use XEMBED hack (don't let browser kill our window) #define USE_XEMBED_HACK 1 // Define to allow windowless plugins #define ALLOW_WINDOWLESS_PLUGINS 1 // Define to use NPIdentifier cache #define USE_NPIDENTIFIER_CACHE 1 #define NPIDENTIFIER_CACHE_SIZE 256 // RPC global connections rpc_connection_t *g_rpc_connection attribute_hidden = NULL; // Viewer main thread - make sure we call into the browser from the main thread static pthread_t g_main_thread = 0; // Instance state information about the plugin typedef struct _PluginInstance { NPW_DECL_PLUGIN_INSTANCE; bool use_xembed; bool is_windowless; NPWindow window; uint32_t width, height; void *toolkit_data; GdkWindow *browser_toplevel; } PluginInstance; #define PLUGIN_INSTANCE(instance) \ ((PluginInstance *)NPW_PLUGIN_INSTANCE(instance)) #define PLUGIN_INSTANCE_NPP(plugin) \ NPW_PLUGIN_INSTANCE_NPP((NPW_PluginInstance *)(plugin)) // Browser side data for an NPStream instance typedef struct _StreamInstance { NPW_DECL_STREAM_INSTANCE; } StreamInstance; // Xt wrapper data typedef struct _XtData { Window browser_window; Widget top_widget; Widget form; } XtData; // Gtk wrapper data typedef struct _GtkData { GtkWidget *container; GtkWidget *socket; } GtkData; // Prototypes static void destroy_window(PluginInstance *plugin); static int xt_source_create(void); static void xt_source_destroy(void); /* ====================================================================== */ /* === Helpers === */ /* ====================================================================== */ // PluginInstance vfuncs static void *plugin_instance_allocate(void); static void plugin_instance_deallocate(PluginInstance *plugin); static void plugin_instance_finalize(PluginInstance *plugin); static void plugin_instance_invalidate(PluginInstance *plugin); static NPW_PluginInstanceClass PluginInstanceClass = { (NPW_PluginInstanceAllocateFunctionPtr)plugin_instance_allocate, (NPW_PluginInstanceDeallocateFunctionPtr)plugin_instance_deallocate, (NPW_PluginInstanceFinalizeFunctionPtr)plugin_instance_finalize, (NPW_PluginInstanceInvalidateFunctionPtr)plugin_instance_invalidate }; static void *plugin_instance_allocate(void) { return NPW_MemNew0(PluginInstance, 1); } static void plugin_instance_deallocate(PluginInstance *plugin) { NPW_MemFree(plugin); } static void plugin_instance_finalize(PluginInstance *plugin) { if (plugin->browser_toplevel) { g_object_unref(plugin->browser_toplevel); plugin->browser_toplevel = NULL; } if (plugin->instance) { free(plugin->instance); plugin->instance = NULL; } } static void plugin_instance_invalidate(PluginInstance *plugin) { destroy_window(plugin); /* NPP instance is no longer valid beyond this point. Drop the link to the PluginInstance now so that future RPC with this PluginInstance will actually emit a NULL instance, which the other side will deal as a no-op for all functions but NPN_GetValue(). However, don't free() the NPP instance yet as it could be used later, e.g. in some NPObject::Invalidate()... Note: this also means we forbid that function to call into the browser in an NPP instance. */ if (plugin->instance_id) { id_remove(plugin->instance_id); plugin->instance_id = 0; } } // Thread support routines static void thread_check_init(void) { g_main_thread = pthread_self(); } #ifdef ENABLE_THREAD_CHECK static bool is_thread_check_enabled_1(void) { const char *thread_check_str; if ((thread_check_str = getenv("NPW_THREAD_CHECK")) != NULL) return ((strcmp(thread_check_str, "yes") == 0) || (strcmp(thread_check_str, "1") == 0)); /* enable main-thread checks by default for all builds from snapshots */ return NPW_SNAPSHOT > 0; } static inline bool is_thread_check_enabled(void) { static int thread_check = -1; if (thread_check < 0) thread_check = is_thread_check_enabled_1(); return thread_check; } #endif bool thread_check(void) { #ifdef ENABLE_THREAD_CHECK if (is_thread_check_enabled()) return (g_main_thread == pthread_self()); #endif return true; } // Delayed calls machinery // XXX: use a pipe, this should be faster (avoids GSource creation and // explicit memory allocation) enum { RPC_DELAYED_NPN_RELEASE_OBJECT = 1 }; typedef struct _DelayedCall { gint type; gpointer data; } DelayedCall; static GList *g_delayed_calls = NULL; static guint g_delayed_calls_id = 0; // We put delayed NPP_Destroy calls on a separate list because, unlike // NPN_ReleaseObject, these must be called on a clean stack and have no // other cause to get cleared. Otherwise, it is possible for the // delayed_calls_process in g_NPP_Destroy_Now to call it early. static GList *g_delayed_destroys = NULL; static guint g_delayed_destroys_id = 0; static void g_NPN_ReleaseObject_Now(NPObject *npobj); static NPError g_NPP_Destroy_Now(PluginInstance *plugin, NPSavedData **sdata); static gboolean delayed_calls_process_cb(gpointer user_data); static gboolean delayed_destroys_process_cb(gpointer user_data); static void delayed_calls_add(int type, gpointer data) { DelayedCall *dcall = NPW_MemNew(DelayedCall, 1); if (dcall == NULL) return; dcall->type = type; dcall->data = data; g_delayed_calls = g_list_append(g_delayed_calls, dcall); if (g_delayed_calls_id == 0) g_delayed_calls_id = g_idle_add_full(G_PRIORITY_LOW, delayed_calls_process_cb, NULL, NULL); } static void delayed_destroys_add(PluginInstance *plugin) { g_delayed_destroys = g_list_append(g_delayed_destroys, plugin); if (g_delayed_destroys_id == 0) g_delayed_destroys_id = g_idle_add_full(G_PRIORITY_LOW, delayed_destroys_process_cb, NULL, NULL); } // Returns whether there are pending calls left in the queue static gboolean delayed_calls_process(PluginInstance *plugin, gboolean is_in_NPP_Destroy) { while (g_delayed_calls != NULL) { if (!is_in_NPP_Destroy) { /* Continue later if there is incoming RPC */ if (rpc_wait_dispatch(g_rpc_connection, 0) > 0) return TRUE; } DelayedCall *dcall = (DelayedCall *)g_delayed_calls->data; /* XXX: Remove the link first; this function /must/ be * re-entrant. We may be called again while processing the * delayed call. */ g_delayed_calls = g_list_delete_link(g_delayed_calls, g_delayed_calls); switch (dcall->type) { case RPC_DELAYED_NPN_RELEASE_OBJECT: { NPObject *npobj = (NPObject *)dcall->data; g_NPN_ReleaseObject_Now(npobj); break; } } NPW_MemFree(dcall); } if (g_delayed_calls) return TRUE; if (g_delayed_calls_id) { g_source_remove(g_delayed_calls_id); g_delayed_calls_id = 0; } return FALSE; } static gboolean delayed_calls_process_cb(gpointer user_data) { return delayed_calls_process(NULL, FALSE); } static gboolean delayed_destroys_process_cb(gpointer user_data) { while (g_delayed_destroys != NULL) { PluginInstance *plugin = (PluginInstance *)g_delayed_destroys->data; g_delayed_destroys = g_list_delete_link(g_delayed_destroys, g_delayed_destroys); g_NPP_Destroy_Now(plugin, NULL); } if (g_delayed_destroys) return TRUE; if (g_delayed_destroys_id) { g_source_remove(g_delayed_destroys_id); g_delayed_destroys_id = 0; } return FALSE; } // NPIdentifier cache static inline bool use_npidentifier_cache(void) { return USE_NPIDENTIFIER_CACHE && npruntime_use_cache(); } #if USE_NPIDENTIFIER_CACHE /* XXX: NPIdentifierInfo could become the NPIdentifier, thus avoiding the global hash table, if there is a garbage collector. Otherwise, we will be leaking memory since there is no function that kills an NPIdentifier. */ typedef struct _NPIdentifierInfo { guint string_len; /* >0 implies 1+strlen(string), =0 implies an integer */ union { gchar *string; int32_t value; } u; } NPIdentifierInfo; static GHashTable *g_npidentifier_cache = NULL; static inline NPIdentifierInfo *npidentifier_info_new(void) { return NPW_MemNew(NPIdentifierInfo, 1); } static inline void npidentifier_info_destroy(NPIdentifierInfo *npi) { if (G_UNLIKELY(npi == NULL)) return; if (npi->string_len > 0) { NPW_MemFree(npi->u.string); npi->u.string = NULL; } NPW_MemFree(npi); } static inline void npidentifier_cache_create(void) { g_npidentifier_cache = g_hash_table_new_full(NULL, NULL, NULL, (GDestroyNotify)npidentifier_info_destroy); } static inline void npidentifier_cache_destroy(void) { if (g_npidentifier_cache) { g_hash_table_destroy(g_npidentifier_cache); g_npidentifier_cache = NULL; } } static void npidentifier_cache_invalidate(void) { #if defined(HAVE_G_HASH_TABLE_REMOVE_ALL) && !defined(BUILD_GENERIC) if (g_npidentifier_cache) g_hash_table_remove_all(g_npidentifier_cache); #else npidentifier_cache_destroy(); npidentifier_cache_create(); #endif } static void npidentifier_cache_reserve(int n_entries) { if (G_UNLIKELY(g_npidentifier_cache == NULL)) npidentifier_cache_create(); if (g_hash_table_size(g_npidentifier_cache) + n_entries > NPIDENTIFIER_CACHE_SIZE) npidentifier_cache_invalidate(); } static inline NPIdentifierInfo *npidentifier_cache_lookup(NPIdentifier ident) { if (G_UNLIKELY(g_npidentifier_cache == NULL)) return NULL; return g_hash_table_lookup(g_npidentifier_cache, ident); } static void npidentifier_cache_add_int(NPIdentifier ident, int32_t value) { if (G_UNLIKELY(g_npidentifier_cache == NULL)) return; NPIdentifierInfo *npi = npidentifier_info_new(); if (G_UNLIKELY(npi == NULL)) return; npi->string_len = 0; npi->u.value = value; g_hash_table_insert(g_npidentifier_cache, ident, npi); } typedef struct _NPIdentifierFindArgs { NPIdentifierInfo info; /* in */ NPIdentifier ident; /* out */ } NPIdentifierFindArgs; static gboolean npidentifier_cache_find_info(gpointer key, gpointer value, gpointer user_data) { NPIdentifier *ident = (NPIdentifier)key; NPIdentifierInfo *npi = (NPIdentifierInfo *)value; NPIdentifierFindArgs *args = (NPIdentifierFindArgs *)user_data; #if !defined(HAVE_G_HASH_TABLE_FIND) || defined(BUILD_GENERIC) if (args->ident) return FALSE; #endif if (npi->string_len != args->info.string_len) return FALSE; if (args->info.string_len > 0) { /* a string */ if (memcmp(args->info.u.string, npi->u.string, args->info.string_len) == 0) { args->ident = ident; return TRUE; } } else { /* an integer */ if (args->info.u.value == npi->u.value) { args->ident = ident; return TRUE; } } return FALSE; } static inline bool npidentifier_cache_find(NPIdentifierFindArgs *args, NPIdentifier *pident) { args->ident = NULL; #if defined(HAVE_G_HASH_TABLE_FIND) && !defined(BUILD_GENERIC) if (!g_hash_table_find(g_npidentifier_cache, npidentifier_cache_find_info, args)) return false; #else g_hash_table_foreach(g_npidentifier_cache, (GHFunc)npidentifier_cache_find_info, args); if (args->ident == NULL) return false; #endif if (pident) *pident = args->ident; return true; } static inline bool npidentifier_cache_has_int(int32_t value, NPIdentifier *pident) { if (G_UNLIKELY(g_npidentifier_cache == NULL)) return false; NPIdentifierFindArgs args; args.info.string_len = 0; args.info.u.value = value; return npidentifier_cache_find(&args, pident); } static inline int32_t npidentifier_cache_get_int(NPIdentifier ident) { NPIdentifierInfo *npi = npidentifier_cache_lookup(ident); if (G_UNLIKELY(npi == NULL || npi->string_len > 0)) return 0; return npi->u.value; } static void npidentifier_cache_add_string(NPIdentifier ident, const gchar *str) { if (G_UNLIKELY(g_npidentifier_cache == NULL)) return; NPIdentifierInfo *npi = npidentifier_info_new(); if (G_UNLIKELY(npi == NULL)) return; npi->string_len = strlen(str) + 1; if ((npi->u.string = NPW_MemAlloc(npi->string_len)) == NULL) { npidentifier_info_destroy(npi); return; } memcpy(npi->u.string, str, npi->string_len); g_hash_table_insert(g_npidentifier_cache, ident, npi); } static inline bool npidentifier_cache_has_string(const gchar *str, NPIdentifier *pident) { if (G_UNLIKELY(g_npidentifier_cache == NULL)) return false; NPIdentifierFindArgs args; args.info.string_len = strlen(str) + 1; args.info.u.string = (gchar *)str; return npidentifier_cache_find(&args, pident); } static inline NPUTF8 *npidentifier_cache_get_string_copy(NPIdentifier ident) { NPIdentifierInfo *npi = npidentifier_cache_lookup(ident); if (G_UNLIKELY(npi == NULL || npi->string_len == 0)) return NULL; return NPW_MemAllocCopy(npi->string_len, npi->u.string); } #endif /* ====================================================================== */ /* === X Toolkit glue === */ /* ====================================================================== */ static Display *x_display; static XtAppContext x_app_context; typedef struct _XtTMRec { XtTranslations translations; /* private to Translation Manager */ XtBoundActions proc_table; /* procedure bindings for actions */ struct _XtStateRec *current_state; /* Translation Manager state ptr */ unsigned long lastEventTime; } XtTMRec, *XtTM; typedef struct _CorePart { Widget self; /* pointer to widget itself */ WidgetClass widget_class; /* pointer to Widget's ClassRec */ Widget parent; /* parent widget */ XrmName xrm_name; /* widget resource name quarkified */ Boolean being_destroyed; /* marked for destroy */ XtCallbackList destroy_callbacks; /* who to call when widget destroyed */ XtPointer constraints; /* constraint record */ Position x, y; /* window position */ Dimension width, height; /* window dimensions */ Dimension border_width; /* window border width */ Boolean managed; /* is widget geometry managed? */ Boolean sensitive; /* is widget sensitive to user events*/ Boolean ancestor_sensitive; /* are all ancestors sensitive? */ XtEventTable event_table; /* private to event dispatcher */ XtTMRec tm; /* translation management */ XtTranslations accelerators; /* accelerator translations */ Pixel border_pixel; /* window border pixel */ Pixmap border_pixmap; /* window border pixmap or NULL */ WidgetList popup_list; /* list of popups */ Cardinal num_popups; /* how many popups */ String name; /* widget resource name */ Screen *screen; /* window's screen */ Colormap colormap; /* colormap */ Window window; /* window ID */ Cardinal depth; /* number of planes in window */ Pixel background_pixel; /* window background pixel */ Pixmap background_pixmap; /* window background pixmap or NULL */ Boolean visible; /* is window mapped and not occluded?*/ Boolean mapped_when_managed;/* map window if it's managed? */ } CorePart; typedef struct _WidgetRec { CorePart core; } WidgetRec, CoreRec; typedef struct _TimerEventRec { struct timeval te_timer_value; struct _TimerEventRec *te_next; XtTimerCallbackProc te_proc; XtAppContext app; XtPointer te_closure; } TimerEventRec; typedef struct _InputEvent { XtInputCallbackProc ie_proc; XtPointer ie_closure; struct _InputEvent *ie_next; struct _InputEvent *ie_oq; XtAppContext app; int ie_source; XtInputMask ie_condition; } InputEvent; typedef struct _SignalEventRec { XtSignalCallbackProc se_proc; XtPointer se_closure; struct _SignalEventRec *se_next; XtAppContext app; Boolean se_notice; } SignalEventRec; struct _XtAppStruct { XtAppContext next; /* link to next app in process context */ void *process; /* back pointer to our process context */ void *destroy_callbacks; Display **list; TimerEventRec *timerQueue; void *workQueue; InputEvent **input_list; InputEvent *outstandingQueue; SignalEventRec *signalQueue; XrmDatabase errorDB; XtErrorMsgHandler errorMsgHandler, warningMsgHandler; XtErrorHandler errorHandler, warningHandler; struct _ActionListRec *action_table; void *converterTable; unsigned long selectionTimeout; char __maxed__nfds[3*sizeof(fd_set)+4]; short __maybe__count; /* num of assigned entries in list */ short __maybe__max; /* allocate size of list */ short __maybe__last; short __maybe__input_count; short __maybe__input_max; /* elts input_list init'd with */ /* ... don't care about other members */ }; extern void XtResizeWidget( Widget /* widget */, _XtDimension /* width */, _XtDimension /* height */, _XtDimension /* border_width */ ); // Dummy X error handler static int trapped_error_code; static int (*old_error_handler)(Display *, XErrorEvent *); static int error_handler(Display *display, XErrorEvent *error) { trapped_error_code = error->error_code; return 0; } static void trap_errors(void) { trapped_error_code = 0; old_error_handler = XSetErrorHandler(error_handler); } static int untrap_errors(void) { XSetErrorHandler(old_error_handler); return trapped_error_code; } // Install the _XEMBED_INFO property static void xt_client_set_info(Widget w, unsigned long flags) { Atom atom_XEMBED_INFO = XInternAtom(x_display, "_XEMBED_INFO", False); unsigned long buffer[2]; buffer[1] = 0; /* Protocol version */ buffer[1] = flags; XChangeProperty(XtDisplay(w), XtWindow(w), atom_XEMBED_INFO, atom_XEMBED_INFO, 32, PropModeReplace, (unsigned char *)buffer, 2); } // Send an XEMBED message to the specified window static void send_xembed_message(Display *display, Window window, long message, long detail, long data1, long data2) { XEvent xevent; memset(&xevent, 0, sizeof(xevent)); xevent.xclient.window = window; xevent.xclient.type = ClientMessage; xevent.xclient.message_type = XInternAtom(display, "_XEMBED", False); xevent.xclient.format = 32; xevent.xclient.data.l[0] = CurrentTime; // XXX: evil? xevent.xclient.data.l[1] = message; xevent.xclient.data.l[2] = detail; xevent.xclient.data.l[3] = data1; xevent.xclient.data.l[4] = data2; trap_errors(); XSendEvent(display, xevent.xclient.window, False, NoEventMask, &xevent); XSync(display, False); untrap_errors(); } /* * NSPluginWrapper strategy to handle input focus. * * - XEMBED must be enabled with NPPVpluginNeedsXEmbed set to * PR_TRUE. This causes Firefox to pass a plain GtkSocket ID into * NPWindow::window. i.e. the GtkXtBin window ID is NOT used. * * - A click into the plugin window sends an XEMBED_REQUEST_FOCUS * event to the parent (socket) window. * * - An XFocusEvent is simulated when XEMBED_FOCUS_IN and * XEMBED_FOCUS_OUT messages arrive to the plugin window. * * - Key events are forwarded from the top widget (which window was * reparented to the socket window) to the actual canvas (form). * * Reference checkpoints, i.e. check the following test cases still * work if you want to change the policy. * * [1] Debian bug #435912 * <http://www.addictinggames.com/bloxors.html> * * Goto to stage 1, use arrow keys to move the block. Now, click * outside of the game window, use arrow keys to try to move the * block: it should NOT move. * * [2] User reported bug * <http://www.forom.com/> * * Choose a language and then a mirror. Do NOT move the cursor out of * the plugin window. Now, click into the "Login" input field and try * to type in something, you should have the input focus. * * [3] Additional test that came in during debugging * * Go to either [1] or [2], double-click the browser URL entry to * select it completely. Now, click into the Flash plugin area. The * URL selection MUST now be unselected AND using the Tab key selects * various fields in the Flash window (e.g. menu items for [1]). */ // Simulate client focus static void xt_client_simulate_focus(Widget w, int type) { XEvent xevent; memset(&xevent, 0, sizeof(xevent)); xevent.xfocus.type = type; xevent.xfocus.window = XtWindow(w); xevent.xfocus.display = XtDisplay(w); xevent.xfocus.mode = NotifyNormal; xevent.xfocus.detail = NotifyAncestor; trap_errors(); XSendEvent(XtDisplay(w), xevent.xfocus.window, False, NoEventMask, &xevent); XSync(XtDisplay(w), False); untrap_errors(); } // Various hacks for decent events filtery static void xt_client_event_handler(Widget w, XtPointer client_data, XEvent *event, Boolean *cont) { XtData *toolkit = (XtData *)client_data; switch (event->type) { case ClientMessage: // Handle XEMBED messages, in particular focus changes if (event->xclient.message_type == XInternAtom(x_display, "_XEMBED", False)) { switch (event->xclient.data.l[1]) { case XEMBED_FOCUS_IN: xt_client_simulate_focus(toolkit->form, FocusIn); break; case XEMBED_FOCUS_OUT: xt_client_simulate_focus(toolkit->form, FocusOut); break; } } break; case KeyPress: case KeyRelease: // Propagate key events down to the actual window if (event->xkey.window == XtWindow(toolkit->top_widget)) { event->xkey.window = XtWindow(toolkit->form); trap_errors(); XSendEvent(XtDisplay(toolkit->form), event->xfocus.window, False, NoEventMask, event); XSync(XtDisplay(toolkit->form), False); untrap_errors(); *cont = False; } break; case ButtonRelease: // Notify the embedder that we want the input focus send_xembed_message(XtDisplay(w), toolkit->browser_window, XEMBED_REQUEST_FOCUS, 0, 0, 0); break; } } /* ====================================================================== */ /* === XPCOM glue === */ /* ====================================================================== */ #if defined(__GNUC__) && (__GNUC__ > 2) #define NS_LIKELY(x) (__builtin_expect((x), 1)) #define NS_UNLIKELY(x) (__builtin_expect((x), 0)) #else #define NS_LIKELY(x) (x) #define NS_UNLIKELY(x) (x) #endif #define NS_FAILED(_nsresult) (NS_UNLIKELY((_nsresult) & 0x80000000)) #define NS_SUCCEEDED(_nsresult) (NS_LIKELY(!((_nsresult) & 0x80000000))) typedef uint32_t nsresult; typedef struct nsIServiceManager nsIServiceManager; extern nsresult NS_GetServiceManager(nsIServiceManager **result); /* ====================================================================== */ /* === Window utilities === */ /* ====================================================================== */ // Reconstruct window attributes static int create_window_attributes(NPSetWindowCallbackStruct *ws_info) { if (ws_info == NULL) return -1; GdkVisual *gdk_visual; if (ws_info->visual) gdk_visual = gdkx_visual_get((uintptr_t)ws_info->visual); else gdk_visual = gdk_visual_get_system(); if (gdk_visual == NULL) { npw_printf("ERROR: could not reconstruct XVisual from visualID\n"); return -2; } ws_info->display = x_display; ws_info->visual = gdk_x11_visual_get_xvisual(gdk_visual); return 0; } // Destroy window attributes struct static void destroy_window_attributes(NPSetWindowCallbackStruct *ws_info) { if (ws_info == NULL) return; NPW_MemFree(ws_info); } // Fix size hints in NPWindow (Flash Player doesn't like null width) static void fixup_size_hints(PluginInstance *plugin) { NPWindow *window = &plugin->window; // check global hints (got through EMBED plugin args) if (window->width == 0 || window->height == 0) { if (plugin->width && plugin->height) { window->width = plugin->width; window->height = plugin->height; return; } } // check actual window size and commit back to plugin data if (window->window && (window->width == 0 || window->height == 0)) { XWindowAttributes win_attr; if (XGetWindowAttributes(x_display, (Window)window->window, &win_attr)) { plugin->width = window->width = win_attr.width; plugin->height = window->height = win_attr.height; return; } } if (window->width == 0 || window->height == 0) npw_printf("WARNING: grmpf, despite much effort, I could not determine the actual plugin area size...\n"); } // Create a new window from NPWindow static int create_window(PluginInstance *plugin, NPWindow *window) { // XXX destroy previous window here? if (plugin->is_windowless) { destroy_window_attributes(plugin->window.ws_info); plugin->window.ws_info = NULL; } assert(plugin->window.ws_info == NULL); // cache new window information and reconstruct window attributes NPSetWindowCallbackStruct *ws_info; if ((ws_info = NPW_MemClone(NPSetWindowCallbackStruct, window->ws_info)) == NULL) return -1; if (create_window_attributes(ws_info) < 0) return -1; memcpy(&plugin->window, window, sizeof(*window)); window = &plugin->window; window->ws_info = ws_info; fixup_size_hints(plugin); // that's all for windowless plugins if (plugin->is_windowless) return 0; // create the new window if (plugin->use_xembed) { GtkData *toolkit = calloc(1, sizeof(*toolkit)); if (toolkit == NULL) return -1; toolkit->container = gtk_plug_new((GdkNativeWindow)window->window); if (toolkit->container == NULL) return -1; gtk_widget_set_size_request(toolkit->container, window->width, window->height); gtk_widget_show(toolkit->container); toolkit->socket = gtk_socket_new(); if (toolkit->socket == NULL) return -1; gtk_widget_show(toolkit->socket); gtk_container_add(GTK_CONTAINER(toolkit->container), toolkit->socket); gtk_widget_show_all(toolkit->container); window->window = (void *)gtk_socket_get_id(GTK_SOCKET(toolkit->socket)); plugin->toolkit_data = toolkit; #if USE_XEMBED_HACK // don't let the browser kill our window out of NPP_Destroy() scope g_signal_connect(toolkit->container, "delete-event", G_CALLBACK(gtk_true), NULL); #endif // make sure we don't try to destroy the widget again in destroy_window() g_signal_connect(toolkit->container, "destroy", G_CALLBACK(gtk_widget_destroyed), &toolkit->container); // keep the socket as the plugin tries to destroy the widget itself g_signal_connect(toolkit->socket, "plug_removed", G_CALLBACK(gtk_true), NULL); return 0; } XtData *toolkit = calloc(1, sizeof(*toolkit)); if (toolkit == NULL) return -1; String app_name, app_class; XtGetApplicationNameAndClass(x_display, &app_name, &app_class); Widget top_widget = XtVaAppCreateShell("drawingArea", app_class, topLevelShellWidgetClass, x_display, XtNoverrideRedirect, True, XtNborderWidth, 0, XtNbackgroundPixmap, None, XtNwidth, window->width, XtNheight, window->height, NULL); Widget form = XtVaCreateManagedWidget("form", compositeWidgetClass, top_widget, XtNdepth, ws_info->depth, XtNvisual, ws_info->visual, XtNcolormap, ws_info->colormap, XtNborderWidth, 0, XtNbackgroundPixmap, None, XtNwidth, window->width, XtNheight, window->height, NULL); XtRealizeWidget(top_widget); XReparentWindow(x_display, XtWindow(top_widget), (Window)window->window, 0, 0); XtRealizeWidget(form); XSelectInput(x_display, XtWindow(top_widget), 0x0fffff); XtAddEventHandler(top_widget, (SubstructureNotifyMask|KeyPress|KeyRelease), True, xt_client_event_handler, toolkit); XtAddEventHandler(form, (ButtonReleaseMask), True, xt_client_event_handler, toolkit); xt_client_set_info(form, 0); plugin->toolkit_data = toolkit; toolkit->top_widget = top_widget; toolkit->form = form; toolkit->browser_window = (Window)window->window; window->window = (void *)XtWindow(form); return 0; } // Update window information from NPWindow static int update_window(PluginInstance *plugin, NPWindow *window) { if (plugin->is_windowless) { npw_printf("ERROR: update_window() called for windowless plugin\n"); return -1; } if (window->ws_info == NULL) { npw_printf("ERROR: no window attributes for window %p\n", window->window); return -1; } // always synchronize window attributes NPSetWindowCallbackStruct *ws_info = plugin->window.ws_info; memcpy(ws_info, window->ws_info, sizeof(*ws_info)); create_window_attributes(ws_info); // synchronize cliprect memcpy(&plugin->window.clipRect, &window->clipRect, sizeof(window->clipRect));; // synchronize window position, if it changed if (plugin->window.x != window->x || plugin->window.y != window->y) { plugin->window.x = window->x; plugin->window.y = window->y; } // synchronize window size, if it changed if (plugin->window.width != window->width || plugin->window.height != window->height) { plugin->window.width = window->width; plugin->window.height = window->height; if (plugin->toolkit_data) { if (plugin->use_xembed) { // window size changes are already caught per the XEMBED protocol } else { XtData *toolkit = (XtData *)plugin->toolkit_data; if (toolkit->form) XtResizeWidget(toolkit->form, plugin->window.width, plugin->window.height, 0); if (toolkit->top_widget) XtResizeWidget(toolkit->top_widget, plugin->window.width, plugin->window.height, 0); } } } return 0; } // Destroy window static void destroy_window(PluginInstance *plugin) { if (plugin->toolkit_data) { if (plugin->use_xembed) { GtkData *toolkit = (GtkData *)plugin->toolkit_data; if (toolkit->container) { gdk_flush(); gtk_widget_destroy(toolkit->container); gdk_flush(); toolkit->container = NULL; } } else { XtData *toolkit = (XtData *)plugin->toolkit_data; if (toolkit->top_widget) { XSync(x_display, False); XtUnrealizeWidget(toolkit->top_widget); XtDestroyWidget(toolkit->top_widget); XSync(x_display, False); toolkit->top_widget = None; } } free(plugin->toolkit_data); plugin->toolkit_data = NULL; } if (plugin->window.ws_info) { destroy_window_attributes(plugin->window.ws_info); plugin->window.ws_info = NULL; } } /* ====================================================================== */ /* === Browser side plug-in API === */ /* ====================================================================== */ static char *g_user_agent = NULL; // Does browser have specified feature? #define NPN_HAS_FEATURE(FEATURE) ((mozilla_funcs.version & 0xff) >= NPVERS_HAS_##FEATURE) // Netscape exported functions static NPNetscapeFuncs mozilla_funcs; // Forces a repaint message for a windowless plug-in static void g_NPN_ForceRedraw(NPP instance) { D(bug("NPN_ForceRedraw instance=%p\n", instance)); NPW_UNIMPLEMENTED(); } // Asks the browser to create a stream for the specified URL static NPError invoke_NPN_GetURL(PluginInstance *plugin, const char *url, const char *target) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NPERR_GENERIC_ERROR); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_GET_URL, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_STRING, url, RPC_TYPE_STRING, target, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetURL() invoke", error); return NPERR_GENERIC_ERROR; } int32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetURL() wait for reply", error); return NPERR_GENERIC_ERROR; } return ret; } static NPError g_NPN_GetURL(NPP instance, const char *url, const char *target) { if (!thread_check()) { npw_printf("WARNING: NPN_GetURL not called from the main thread\n"); return NPERR_INVALID_INSTANCE_ERROR; } if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return NPERR_INVALID_INSTANCE_ERROR; D(bugiI("NPN_GetURL instance=%p\n", instance)); npw_plugin_instance_ref(plugin); NPError ret = invoke_NPN_GetURL(plugin, url, target); npw_plugin_instance_unref(plugin); D(bugiD("NPN_GetURL return: %d [%s]\n", ret, string_of_NPError(ret))); return ret; } // Requests creation of a new stream with the contents of the specified URL static NPError invoke_NPN_GetURLNotify(PluginInstance *plugin, const char *url, const char *target, void *notifyData) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NPERR_GENERIC_ERROR); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_GET_URL_NOTIFY, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_STRING, url, RPC_TYPE_STRING, target, RPC_TYPE_NP_NOTIFY_DATA, notifyData, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetURLNotify() invoke", error); return NPERR_GENERIC_ERROR; } int32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetURLNotify() wait for reply", error); return NPERR_GENERIC_ERROR; } return ret; } static NPError g_NPN_GetURLNotify(NPP instance, const char *url, const char *target, void *notifyData) { if (!thread_check()) { npw_printf("WARNING: NPN_GetURLNotify not called from the main thread\n"); return NPERR_INVALID_INSTANCE_ERROR; } if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return NPERR_INVALID_INSTANCE_ERROR; D(bugiI("NPN_GetURLNotify instance=%p\n", instance)); npw_plugin_instance_ref(plugin); NPError ret = invoke_NPN_GetURLNotify(plugin, url, target, notifyData); npw_plugin_instance_unref(plugin); D(bugiD("NPN_GetURLNotify return: %d [%s]\n", ret, string_of_NPError(ret))); return ret; } // Allows the plug-in to query the browser for information static NPError invoke_NPN_GetValue(PluginInstance *plugin, NPNVariable variable, void *value) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NPERR_GENERIC_ERROR); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_GET_VALUE, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_UINT32, variable, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetValue() invoke", error); return NPERR_GENERIC_ERROR; } int32_t ret; switch (rpc_type_of_NPNVariable(variable)) { case RPC_TYPE_UINT32: { uint32_t n = 0; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_UINT32, &n, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetValue() wait for reply", error); ret = NPERR_GENERIC_ERROR; } D(bug("-> value: %u\n", n)); *((unsigned int *)value) = n; break; } case RPC_TYPE_BOOLEAN: { uint32_t b = 0; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_BOOLEAN, &b, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetValue() wait for reply", error); ret = NPERR_GENERIC_ERROR; } D(bug("-> value: %s\n", b ? "true" : "false")); *((NPBool *)value) = b ? TRUE : FALSE; break; } case RPC_TYPE_NP_OBJECT: { NPObject *npobj = NULL; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_NP_OBJECT, &npobj, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetValue() wait for reply", error); ret = NPERR_GENERIC_ERROR; } D(bug("-> value: <object %p>\n", npobj)); *((NPObject **)value) = npobj; break; } } return ret; } static NPError g_NPN_GetValue_real(NPP instance, NPNVariable variable, void *value) { PluginInstance *plugin = NULL; if (instance) plugin = PLUGIN_INSTANCE(instance); npw_plugin_instance_ref(plugin); NPError ret = invoke_NPN_GetValue(plugin, variable, value); npw_plugin_instance_unref(plugin); return ret; } static NPError g_NPN_GetValue(NPP instance, NPNVariable variable, void *value) { D(bug("NPN_GetValue instance=%p, variable=%d [%s]\n", instance, variable, string_of_NPNVariable(variable))); if (!thread_check()) { npw_printf("WARNING: NPN_GetValue not called from the main thread\n"); return NPERR_INVALID_INSTANCE_ERROR; } PluginInstance *plugin = NULL; if (instance) plugin = PLUGIN_INSTANCE(instance); switch (variable) { case NPNVxDisplay: *(void **)value = x_display; break; case NPNVxtAppContext: *(void **)value = XtDisplayToApplicationContext(x_display); break; case NPNVToolkit: *(NPNToolkitType *)value = NPW_TOOLKIT; break; #if USE_XPCOM case NPNVserviceManager: { nsIServiceManager *sm; int ret = NS_GetServiceManager(&sm); if (NS_FAILED(ret)) { npw_printf("WARNING: NS_GetServiceManager failed\n"); return NPERR_GENERIC_ERROR; } *(nsIServiceManager **)value = sm; break; } case NPNVDOMWindow: case NPNVDOMElement: npw_printf("WARNING: %s is not supported by NPN_GetValue()\n", string_of_NPNVariable(variable)); return NPERR_INVALID_PARAM; #endif case NPNVnetscapeWindow: if (plugin == NULL) { npw_printf("ERROR: NPNVnetscapeWindow requires a non NULL instance\n"); return NPERR_INVALID_INSTANCE_ERROR; } if (plugin->browser_toplevel == NULL) { GdkNativeWindow netscape_xid = None; NPError error = g_NPN_GetValue_real(instance, variable, &netscape_xid); if (error != NPERR_NO_ERROR) return error; if (netscape_xid == None) return NPERR_GENERIC_ERROR; plugin->browser_toplevel = gdk_window_foreign_new(netscape_xid); if (plugin->browser_toplevel == NULL) return NPERR_GENERIC_ERROR; } *((GdkNativeWindow *)value) = GDK_WINDOW_XWINDOW(plugin->browser_toplevel); break; #if ALLOW_WINDOWLESS_PLUGINS case NPNVSupportsWindowless: #endif case NPNVSupportsXEmbedBool: case NPNVWindowNPObject: case NPNVPluginElementNPObject: case NPNVprivateModeBool: case NPNVsupportsAdvancedKeyHandling: return g_NPN_GetValue_real(instance, variable, value); default: switch (variable & 0xff) { case 13: /* NPNVToolkit */ if (NPW_TOOLKIT == NPNVGtk2) { // Gtk2 does not need to depend on a specific C++ ABI *(NPNToolkitType *)value = NPW_TOOLKIT; return NPERR_NO_ERROR; } break; } D(bug("WARNING: unhandled variable %d (%s) in NPN_GetValue()\n", variable, string_of_NPNVariable(variable))); return NPERR_INVALID_PARAM; } return NPERR_NO_ERROR; } // Invalidates specified drawing area prior to repainting or refreshing a windowless plug-in static void invoke_NPN_InvalidateRect(PluginInstance *plugin, NPRect *invalidRect) { npw_return_if_fail(rpc_method_invoke_possible(g_rpc_connection)); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_INVALIDATE_RECT, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_RECT, invalidRect, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_InvalidateRect() invoke", error); return; } error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_InvalidateRect() wait for reply", error); return; } } static void g_NPN_InvalidateRect(NPP instance, NPRect *invalidRect) { if (!thread_check()) { npw_printf("WARNING: NPN_InvalidateRect not called from the main thread\n"); return; } if (instance == NULL) return; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return; if (invalidRect == NULL) return; D(bugiI("NPN_InvalidateRect instance=%p\n", PLUGIN_INSTANCE_NPP(plugin))); npw_plugin_instance_ref(plugin); invoke_NPN_InvalidateRect(plugin, invalidRect); npw_plugin_instance_unref(plugin); D(bugiD("NPN_InvalidateRect done\n")); } // Invalidates specified region prior to repainting or refreshing a windowless plug-in static void g_NPN_InvalidateRegion(NPP instance, NPRegion invalidRegion) { D(bug("NPN_InvalidateRegion instance=%p\n", instance)); NPW_UNIMPLEMENTED(); } // Allocates memory from the browser's memory space static void * g_NPN_MemAlloc(uint32_t size) { D(bugiI("NPN_MemAlloc size=%d\n", size)); void *ptr = NPW_MemAlloc(size); D(bugiD("NPN_MemAlloc return: %p\n", ptr)); return ptr; } // Requests that the browser free a specified amount of memory static uint32_t g_NPN_MemFlush(uint32_t size) { D(bug("NPN_MemFlush size=%d\n", size)); return 0; } // Deallocates a block of allocated memory static void g_NPN_MemFree(void *ptr) { D(bugiI("NPN_MemFree ptr=%p\n", ptr)); NPW_MemFree(ptr); D(bugiD("NPN_MemFree done\n")); } // Posts data to a URL static NPError invoke_NPN_PostURL(PluginInstance *plugin, const char *url, const char *target, uint32_t len, const char *buf, NPBool file) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NPERR_GENERIC_ERROR); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_POST_URL, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_STRING, url, RPC_TYPE_STRING, target, RPC_TYPE_ARRAY, RPC_TYPE_CHAR, len, buf, RPC_TYPE_BOOLEAN, file, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_PostURL() invoke", error); return NPERR_GENERIC_ERROR; } int32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_PostURL() wait for reply", error); return NPERR_GENERIC_ERROR; } return ret; } static NPError g_NPN_PostURL(NPP instance, const char *url, const char *target, uint32_t len, const char *buf, NPBool file) { if (!thread_check()) { npw_printf("WARNING: NPN_PostURL not called from the main thread\n"); return NPERR_INVALID_INSTANCE_ERROR; } if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return NPERR_INVALID_INSTANCE_ERROR; D(bugiI("NPN_PostURL instance=%p\n", instance)); npw_plugin_instance_ref(plugin); NPError ret = invoke_NPN_PostURL(plugin, url, target, len, buf, file); npw_plugin_instance_unref(plugin); D(bugiD("NPN_PostURL return: %d [%s]\n", ret, string_of_NPError(ret))); return ret; } // Posts data to a URL, and receives notification of the result static NPError invoke_NPN_PostURLNotify(PluginInstance *plugin, const char *url, const char *target, uint32_t len, const char *buf, NPBool file, void *notifyData) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NPERR_GENERIC_ERROR); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_POST_URL_NOTIFY, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_STRING, url, RPC_TYPE_STRING, target, RPC_TYPE_ARRAY, RPC_TYPE_CHAR, len, buf, RPC_TYPE_BOOLEAN, file, RPC_TYPE_NP_NOTIFY_DATA, notifyData, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_PostURLNotify() invoke", error); return NPERR_GENERIC_ERROR; } int32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_PostURLNotify() wait for reply", error); return NPERR_GENERIC_ERROR; } return ret; } static NPError g_NPN_PostURLNotify(NPP instance, const char *url, const char *target, uint32_t len, const char *buf, NPBool file, void *notifyData) { if (!thread_check()) { npw_printf("WARNING: NPN_PostURLNotify not called from the main thread\n"); return NPERR_INVALID_INSTANCE_ERROR; } if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return NPERR_INVALID_INSTANCE_ERROR; D(bugiI("NPN_PostURLNotify instance=%p\n", instance)); npw_plugin_instance_ref(plugin); NPError ret = invoke_NPN_PostURLNotify(plugin, url, target, len, buf, file, notifyData); npw_plugin_instance_unref(plugin); D(bugiD("NPN_PostURLNotify return: %d [%s]\n", ret, string_of_NPError(ret))); return ret; } // Posts data to a URL, and receives notification of the result static void g_NPN_ReloadPlugins(NPBool reloadPages) { D(bug("NPN_ReloadPlugins reloadPages=%d\n", reloadPages)); NPW_UNIMPLEMENTED(); } // Returns the Java execution environment static void * g_NPN_GetJavaEnv(void) { D(bug("NPN_GetJavaEnv\n")); return NULL; } // Returns the Java object associated with the plug-in instance static void * g_NPN_GetJavaPeer(NPP instance) { D(bug("NPN_GetJavaPeer instance=%p\n", instance)); return NULL; } // Requests a range of bytes for a seekable stream static NPError invoke_NPN_RequestRead(NPStream *stream, NPByteRange *rangeList) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NPERR_GENERIC_ERROR); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_REQUEST_READ, RPC_TYPE_NP_STREAM, stream, RPC_TYPE_NP_BYTE_RANGE, rangeList, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_RequestRead() invoke", error); return NPERR_GENERIC_ERROR; } int32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_RequestRead() wait for reply", error); return NPERR_GENERIC_ERROR; } return ret; } static NPError g_NPN_RequestRead(NPStream *stream, NPByteRange *rangeList) { if (!thread_check()) { npw_printf("WARNING: NPN_RequestRead not called from the main thread\n"); return NPERR_INVALID_INSTANCE_ERROR; } if (stream == NULL || stream->ndata == NULL || rangeList == NULL) return NPERR_INVALID_PARAM; D(bugiI("NPN_RequestRead stream=%p\n", stream)); NPError ret = invoke_NPN_RequestRead(stream, rangeList); D(bugiD("NPN_RequestRead return: %d [%s]\n", ret, string_of_NPError(ret))); return ret; } // Sets various modes of plug-in operation static NPError invoke_NPN_SetValue(PluginInstance *plugin, NPPVariable variable, void *value) { switch (rpc_type_of_NPPVariable(variable)) { case RPC_TYPE_BOOLEAN: break; default: D(bug("WARNING: unhandled variable %d in NPN_SetValue()\n", variable)); return NPERR_INVALID_PARAM; } npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NPERR_GENERIC_ERROR); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_SET_VALUE, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_UINT32, variable, RPC_TYPE_BOOLEAN, (uint32_t)(uintptr_t)value, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_SetValue() invoke", error); return NPERR_GENERIC_ERROR; } int32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_SetValue() wait for reply", error); return NPERR_GENERIC_ERROR; } return ret; } static NPError g_NPN_SetValue(NPP instance, NPPVariable variable, void *value) { if (!thread_check()) { npw_printf("WARNING: NPN_SetValue not called from the main thread\n"); return NPERR_INVALID_INSTANCE_ERROR; } if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return NPERR_INVALID_INSTANCE_ERROR; D(bugiI("NPN_SetValue instance=%p, variable=%d [%s]\n", instance, variable, string_of_NPPVariable(variable))); npw_plugin_instance_ref(plugin); NPError ret = invoke_NPN_SetValue(plugin, variable, value); npw_plugin_instance_unref(plugin); D(bugiD("NPN_SetValue return: %d [%s]\n", ret, string_of_NPError(ret))); return ret; } // Displays a message on the status line of the browser window static void invoke_NPN_Status(PluginInstance *plugin, const char *message) { npw_return_if_fail(rpc_method_invoke_possible(g_rpc_connection)); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_STATUS, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_STRING, message, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_Status() invoke", error); return; } error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_Status() wait for reply", error); return; } } static void g_NPN_Status(NPP instance, const char *message) { if (!thread_check()) { npw_printf("WARNING: NPN_Status not called from the main thread\n"); return; } PluginInstance *plugin = NULL; if (instance) plugin = PLUGIN_INSTANCE(instance); D(bugiI("NPN_Status instance=%p, message='%s'\n", instance, message)); npw_plugin_instance_ref(plugin); invoke_NPN_Status(plugin, message); npw_plugin_instance_unref(plugin); D(bugiD("NPN_Status done\n")); } // Returns the browser's user agent field static char * invoke_NPN_UserAgent(void) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NULL); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_USER_AGENT, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_UserAgent() invoke", error); return NULL; } char *user_agent; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_STRING, &user_agent, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_UserAgent() wait for reply", error); return NULL; } return user_agent; } static const char * g_NPN_UserAgent(NPP instance) { if (!thread_check()) { npw_printf("WARNING: NPN_UserAgent not called from the main thread\n"); return NULL; } D(bugiI("NPN_UserAgent instance=%p\n", instance)); if (g_user_agent == NULL) g_user_agent = invoke_NPN_UserAgent(); D(bugiD("NPN_UserAgent return: '%s'\n", g_user_agent)); return g_user_agent; } // Requests the creation of a new data stream produced by the plug-in and consumed by the browser static NPError invoke_NPN_NewStream(PluginInstance *plugin, NPMIMEType type, const char *target, NPStream **pstream) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NPERR_GENERIC_ERROR); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_NEW_STREAM, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_STRING, type, RPC_TYPE_STRING, target, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_NewStream() invoke", error); return NPERR_OUT_OF_MEMORY_ERROR; } int32_t ret; uint32_t stream_id; char *url; uint32_t end; uint32_t lastmodified; void *notifyData; char *headers; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_UINT32, &stream_id, RPC_TYPE_STRING, &url, RPC_TYPE_UINT32, &end, RPC_TYPE_UINT32, &lastmodified, RPC_TYPE_NP_NOTIFY_DATA, &notifyData, RPC_TYPE_STRING, &headers, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_NewStream() wait for reply", error); return NPERR_GENERIC_ERROR; } NPStream *stream = NULL; if (ret == NPERR_NO_ERROR) { if ((stream = malloc(sizeof(*stream))) == NULL) return NPERR_OUT_OF_MEMORY_ERROR; memset(stream, 0, sizeof(*stream)); StreamInstance *stream_ndata; if ((stream_ndata = malloc(sizeof(*stream_ndata))) == NULL) { free(stream); return NPERR_OUT_OF_MEMORY_ERROR; } stream->ndata = stream_ndata; stream->url = url; stream->end = end; stream->lastmodified = lastmodified; stream->notifyData = notifyData; stream->headers = headers; memset(stream_ndata, 0, sizeof(*stream_ndata)); stream_ndata->stream_id = stream_id; id_link(stream_id, stream_ndata); stream_ndata->stream = stream; stream_ndata->is_plugin_stream = 1; } else { if (url) free(url); if (headers) free(headers); } *pstream = stream; return ret; } static NPError g_NPN_NewStream(NPP instance, NPMIMEType type, const char *target, NPStream **stream) { if (!thread_check()) { npw_printf("WARNING: NPN_NewStream not called from the main thread\n"); return NPERR_INVALID_INSTANCE_ERROR; } if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return NPERR_INVALID_INSTANCE_ERROR; if (stream == NULL) return NPERR_INVALID_PARAM; *stream = NULL; D(bugiI("NPN_NewStream instance=%p\n", instance)); npw_plugin_instance_ref(plugin); NPError ret = invoke_NPN_NewStream(plugin, type, target, stream); npw_plugin_instance_unref(plugin); D(bugiD("NPN_NewStream return: %d [%s]\n", ret, string_of_NPError(ret))); return ret; } // Closes and deletes a stream static NPError invoke_NPN_DestroyStream(PluginInstance *plugin, NPStream *stream, NPError reason) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NPERR_GENERIC_ERROR); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_DESTROY_STREAM, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_STREAM, stream, RPC_TYPE_INT32, (int32_t)reason, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_DestroyStream() invoke", error); return NPERR_GENERIC_ERROR; } int32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_DestroyStream() wait for reply", error); return NPERR_GENERIC_ERROR; } return ret; } static NPError g_NPN_DestroyStream(NPP instance, NPStream *stream, NPError reason) { if (!thread_check()) { npw_printf("WARNING: NPN_DestroyStream not called from the main thread\n"); return NPERR_INVALID_INSTANCE_ERROR; } if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return NPERR_INVALID_INSTANCE_ERROR; if (stream == NULL) return NPERR_INVALID_PARAM; D(bugiI("NPN_DestroyStream instance=%p, stream=%p, reason=%s\n", instance, stream, string_of_NPReason(reason))); npw_plugin_instance_ref(plugin); NPError ret = invoke_NPN_DestroyStream(plugin, stream, reason); npw_plugin_instance_unref(plugin); D(bugiD("NPN_DestroyStream return: %d [%s]\n", ret, string_of_NPError(ret))); // Mozilla calls NPP_DestroyStream() for its streams, keep stream // info in that case StreamInstance *stream_ndata = stream->ndata; if (stream_ndata && stream_ndata->is_plugin_stream) { id_remove(stream_ndata->stream_id); free(stream_ndata); free((char *)stream->url); free((char *)stream->headers); free(stream); } return ret; } // Pushes data into a stream produced by the plug-in and consumed by the browser static int invoke_NPN_Write(PluginInstance *plugin, NPStream *stream, int32_t len, void *buf) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), -1); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_WRITE, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_STREAM, stream, RPC_TYPE_ARRAY, RPC_TYPE_CHAR, len, buf, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_Write() invoke", error); return -1; } int32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_Write() wait for reply", error); return -1; } return ret; } static int32_t g_NPN_Write(NPP instance, NPStream *stream, int32_t len, void *buf) { if (!thread_check()) { npw_printf("WARNING: NPN_Write not called from the main thread\n"); return -1; } if (instance == NULL) return -1; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return -1; if (stream == NULL) return -1; D(bugiI("NPN_Write instance=%p, stream=%p, len=%d, buf=%p\n", instance, stream, len, buf)); npw_plugin_instance_ref(plugin); int32_t ret = invoke_NPN_Write(plugin, stream, len, buf); npw_plugin_instance_unref(plugin); D(bugiD("NPN_Write return: %d\n", ret)); return ret; } // Enable popups while executing code where popups should be enabled static void invoke_NPN_PushPopupsEnabledState(PluginInstance *plugin, NPBool enabled) { npw_return_if_fail(rpc_method_invoke_possible(g_rpc_connection)); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_PUSH_POPUPS_ENABLED_STATE, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_UINT32, (uint32_t)enabled, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_PushPopupsEnabledState() invoke", error); return; } error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) npw_perror("NPN_PushPopupsEnabledState() wait for reply", error); } static void g_NPN_PushPopupsEnabledState(NPP instance, NPBool enabled) { if (!thread_check()) { npw_printf("WARNING: NPN_PushPopupsEnabledState not called from the main thread\n"); return; } if (instance == NULL) return; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return; D(bugiI("NPN_PushPopupsEnabledState instance=%p, enabled=%d\n", instance, enabled)); npw_plugin_instance_ref(plugin); invoke_NPN_PushPopupsEnabledState(plugin, enabled); npw_plugin_instance_unref(plugin); D(bugiD("NPN_PushPopupsEnabledState done\n")); } // Restore popups state static void invoke_NPN_PopPopupsEnabledState(PluginInstance *plugin) { npw_return_if_fail(rpc_method_invoke_possible(g_rpc_connection)); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_POP_POPUPS_ENABLED_STATE, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_PopPopupsEnabledState() invoke", error); return; } error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) npw_perror("NPN_PopPopupsEnabledState() wait for reply", error); } static void g_NPN_PopPopupsEnabledState(NPP instance) { if (!thread_check()) { npw_printf("WARNING: NPN_PophPopupsEnabledState not called from the main thread\n"); return; } if (instance == NULL) return; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return; D(bugiI("NPN_PopPopupsEnabledState instance=%p\n", instance)); npw_plugin_instance_ref(plugin); invoke_NPN_PopPopupsEnabledState(plugin); npw_plugin_instance_unref(plugin); D(bugiD("NPN_PopPopupsEnabledState done\n")); } /* ====================================================================== */ /* === NPRuntime glue === */ /* ====================================================================== */ // Allocates a new NPObject static uint32_t invoke_NPN_CreateObject(PluginInstance *plugin) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), 0); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_CREATE_OBJECT, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_CreateObject() invoke", error); return 0; } uint32_t npobj_id = 0; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &npobj_id, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_CreateObject() wait for reply", error); return 0; } return npobj_id; } static NPObject * g_NPN_CreateObject(NPP instance, NPClass *class) { if (!thread_check()) { npw_printf("WARNING: NPN_CreateObject not called from the main thread\n"); return NULL; } if (instance == NULL) return NULL; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return NULL; if (class == NULL) return NULL; D(bugiI("NPN_CreateObject\n")); npw_plugin_instance_ref(plugin); uint32_t npobj_id = invoke_NPN_CreateObject(plugin); npw_plugin_instance_unref(plugin); assert(npobj_id != 0); NPObject *npobj = npobject_new(npobj_id, instance, class); D(bugiD("NPN_CreateObject return: %p (refcount: %d)\n", npobj, npobj->referenceCount)); return npobj; } // Increments the reference count of the given NPObject static uint32_t invoke_NPN_RetainObject(NPObject *npobj) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), npobj->referenceCount); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_RETAIN_OBJECT, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_RetainObject() invoke", error); return npobj->referenceCount; } uint32_t refcount; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &refcount, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_RetainObject() wait for reply", error); return npobj->referenceCount; } return refcount; } static NPObject * g_NPN_RetainObject(NPObject *npobj) { if (!thread_check()) { npw_printf("WARNING: NPN_RetainObject not called from the main thread\n"); return NULL; } if (npobj == NULL) return NULL; D(bugiI("NPN_RetainObject npobj=%p\n", npobj)); uint32_t refcount = invoke_NPN_RetainObject(npobj); D(bugiD("NPN_RetainObject return: %p (refcount: %d)\n", npobj, refcount)); npobj->referenceCount = refcount; return npobj; } // Decrements the reference count of the give NPObject static uint32_t invoke_NPN_ReleaseObject(NPObject *npobj) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), npobj->referenceCount); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_RELEASE_OBJECT, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_ReleaseObject() invoke", error); return npobj->referenceCount; } uint32_t refcount; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &refcount, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_ReleaseObject() wait for reply", error); return npobj->referenceCount; } return refcount; } static void g_NPN_ReleaseObject_Now(NPObject *npobj) { D(bugiI("NPN_ReleaseObject npobj=%p\n", npobj)); uint32_t refcount = invoke_NPN_ReleaseObject(npobj); D(bugiD("NPN_ReleaseObject done (refcount: %d)\n", refcount)); if ((npobj->referenceCount = refcount) == 0) npobject_destroy(npobj); } static void g_NPN_ReleaseObject_Delayed(NPObject *npobj) { delayed_calls_add(RPC_DELAYED_NPN_RELEASE_OBJECT, npobj); } static void g_NPN_ReleaseObject(NPObject *npobj) { if (!thread_check()) { npw_printf("WARNING: NPN_ReleaseObject not called from the main thread\n"); return; } if (npobj == NULL) return; if (rpc_method_invoke_possible(g_rpc_connection)) { D(bug("NPN_ReleaseObject <now>\n")); g_NPN_ReleaseObject_Now(npobj); } else { D(bug("NPN_ReleaseObject <delayed>\n")); g_NPN_ReleaseObject_Delayed(npobj); } } // Invokes a method on the given NPObject static bool invoke_NPN_Invoke(PluginInstance *plugin, NPObject *npobj, NPIdentifier methodName, const NPVariant *args, uint32_t argCount, NPVariant *result) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_INVOKE, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_NP_IDENTIFIER, &methodName, RPC_TYPE_ARRAY, RPC_TYPE_NP_VARIANT, argCount, args, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_Invoke() invoke", error); return false; } uint32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &ret, RPC_TYPE_NP_VARIANT, result, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_Invoke() wait for reply", error); return false; } return ret; } static bool g_NPN_Invoke(NPP instance, NPObject *npobj, NPIdentifier methodName, const NPVariant *args, uint32_t argCount, NPVariant *result) { if (!thread_check()) { npw_printf("WARNING: NPN_Invoke not called from the main thread\n"); return false; } if (instance == NULL) return false; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return false; if (!npobj || !npobj->_class || !npobj->_class->invoke) return false; D(bugiI("NPN_Invoke instance=%p, npobj=%p, methodName=%p\n", instance, npobj, methodName)); print_npvariant_args(args, argCount); npw_plugin_instance_ref(plugin); bool ret = invoke_NPN_Invoke(plugin, npobj, methodName, args, argCount, result); npw_plugin_instance_unref(plugin); gchar *result_str = string_of_NPVariant(result); D(bugiD("NPN_Invoke return: %d (%s)\n", ret, result_str)); g_free(result_str); return ret; } // Invokes the default method on the given NPObject static bool invoke_NPN_InvokeDefault(PluginInstance *plugin, NPObject *npobj, const NPVariant *args, uint32_t argCount, NPVariant *result) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_INVOKE_DEFAULT, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_ARRAY, RPC_TYPE_NP_VARIANT, argCount, args, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_InvokeDefault() invoke", error); return false; } uint32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &ret, RPC_TYPE_NP_VARIANT, result, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_InvokeDefault() wait for reply", error); return false; } return ret; } static bool g_NPN_InvokeDefault(NPP instance, NPObject *npobj, const NPVariant *args, uint32_t argCount, NPVariant *result) { if (!thread_check()) { npw_printf("WARNING: NPN_InvokeDefault not called from the main thread\n"); return false; } if (instance == NULL) return false; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return false; if (!npobj || !npobj->_class || !npobj->_class->invokeDefault) return false; D(bugiI("NPN_InvokeDefault instance=%p, npobj=%p\n", instance, npobj)); print_npvariant_args(args, argCount); npw_plugin_instance_ref(plugin); bool ret = invoke_NPN_InvokeDefault(plugin, npobj, args, argCount, result); npw_plugin_instance_unref(plugin); gchar *result_str = string_of_NPVariant(result); D(bugiD("NPN_InvokeDefault return: %d (%s)\n", ret, result_str)); g_free(result_str); return ret; } // Evaluates a script on the scope of a given NPObject static bool invoke_NPN_Evaluate(PluginInstance *plugin, NPObject *npobj, NPString *script, NPVariant *result) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_EVALUATE, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_NP_STRING, script, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_Evaluate() invoke", error); return false; } uint32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &ret, RPC_TYPE_NP_VARIANT, result, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_Evaluate() wait for reply", error); return false; } return ret; } static bool g_NPN_Evaluate(NPP instance, NPObject *npobj, NPString *script, NPVariant *result) { if (!thread_check()) { npw_printf("WARNING: NPN_Evaluate not called from the main thread\n"); return false; } if (instance == NULL) return false; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return false; if (!npobj) return false; if (!script || !script->UTF8Length || !script->UTF8Characters) return true; // nothing to evaluate D(bugiI("NPN_Evaluate instance=%p, npobj=%p\n", instance, npobj)); npw_plugin_instance_ref(plugin); bool ret = invoke_NPN_Evaluate(plugin, npobj, script, result); npw_plugin_instance_unref(plugin); gchar *result_str = string_of_NPVariant(result); D(bugiD("NPN_Evaluate return: %d (%s)\n", ret, result_str)); g_free(result_str); return ret; } // Gets the value of a property on the given NPObject static bool invoke_NPN_GetProperty(PluginInstance *plugin, NPObject *npobj, NPIdentifier propertyName, NPVariant *result) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_GET_PROPERTY, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_NP_IDENTIFIER, &propertyName, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetProperty() invoke", error); return false; } uint32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &ret, RPC_TYPE_NP_VARIANT, result, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetProperty() wait for reply", error); return false; } return ret; } static bool g_NPN_GetProperty(NPP instance, NPObject *npobj, NPIdentifier propertyName, NPVariant *result) { if (!thread_check()) { npw_printf("WARNING: NPN_GetProperty not called from the main thread\n"); return false; } if (instance == NULL) return false; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return false; if (!npobj || !npobj->_class || !npobj->_class->getProperty) return false; D(bugiI("NPN_GetProperty instance=%p, npobj=%p, propertyName=%p\n", instance, npobj, propertyName)); npw_plugin_instance_ref(plugin); bool ret = invoke_NPN_GetProperty(plugin, npobj, propertyName, result); npw_plugin_instance_unref(plugin); gchar *result_str = string_of_NPVariant(result); D(bugiD("NPN_GetProperty return: %d (%s)\n", ret, result_str)); g_free(result_str); return ret; } // Sets the value of a property on the given NPObject static bool invoke_NPN_SetProperty(PluginInstance *plugin, NPObject *npobj, NPIdentifier propertyName, const NPVariant *value) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_SET_PROPERTY, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_NP_IDENTIFIER, &propertyName, RPC_TYPE_NP_VARIANT, value, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_SetProperty() invoke", error); return false; } uint32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_SetProperty() wait for reply", error); return false; } return ret; } static bool g_NPN_SetProperty(NPP instance, NPObject *npobj, NPIdentifier propertyName, const NPVariant *value) { if (!thread_check()) { npw_printf("WARNING: NPN_SetProperty not called from the main thread\n"); return false; } if (instance == NULL) return false; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return false; if (!npobj || !npobj->_class || !npobj->_class->setProperty) return false; D(bugiI("NPN_SetProperty instance=%p, npobj=%p, propertyName=%p\n", instance, npobj, propertyName)); npw_plugin_instance_ref(plugin); bool ret = invoke_NPN_SetProperty(plugin, npobj, propertyName, value); npw_plugin_instance_unref(plugin); D(bugiD("NPN_SetProperty return: %d\n", ret)); return ret; } // Removes a property on the given NPObject static bool invoke_NPN_RemoveProperty(PluginInstance *plugin, NPObject *npobj, NPIdentifier propertyName) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_REMOVE_PROPERTY, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_NP_IDENTIFIER, &propertyName, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_RemoveProperty() invoke", error); return false; } uint32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_RemoveProperty() wait for reply", error); return false; } return ret; } static bool g_NPN_RemoveProperty(NPP instance, NPObject *npobj, NPIdentifier propertyName) { if (!thread_check()) { npw_printf("WARNING: NPN_RemoveProperty not called from the main thread\n"); return false; } if (instance == NULL) return false; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return false; if (!npobj || !npobj->_class || !npobj->_class->removeProperty) return false; D(bugiI("NPN_RemoveProperty instance=%p, npobj=%p, propertyName=%p\n", instance, npobj, propertyName)); npw_plugin_instance_ref(plugin); bool ret = invoke_NPN_RemoveProperty(plugin, npobj, propertyName); npw_plugin_instance_unref(plugin); D(bugiD("NPN_RemoveProperty return: %d\n", ret)); return ret; } // Checks if a given property exists on the given NPObject static bool invoke_NPN_HasProperty(PluginInstance *plugin, NPObject *npobj, NPIdentifier propertyName) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_HAS_PROPERTY, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_NP_IDENTIFIER, &propertyName, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_HasProperty() invoke", error); return false; } uint32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_HasProperty() wait for reply", error); return false; } return ret; } static bool g_NPN_HasProperty(NPP instance, NPObject *npobj, NPIdentifier propertyName) { if (!thread_check()) { npw_printf("WARNING: NPN_HasProperty not called from the main thread\n"); return false; } if (instance == NULL) return false; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return false; if (!npobj || !npobj->_class || !npobj->_class->hasProperty) return false; D(bugiI("NPN_HasProperty instance=%p, npobj=%p, propertyName=%p\n", instance, npobj, propertyName)); npw_plugin_instance_ref(plugin); bool ret = invoke_NPN_HasProperty(plugin, npobj, propertyName); npw_plugin_instance_unref(plugin); D(bugiD("NPN_HasProperty return: %d\n", ret)); return ret; } // Checks if a given method exists on the given NPObject static bool invoke_NPN_HasMethod(PluginInstance *plugin, NPObject *npobj, NPIdentifier methodName) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_HAS_METHOD, RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_NP_IDENTIFIER, &methodName, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_HasMethod() invoke", error); return false; } uint32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_HasMethod() wait for reply", error); return false; } return ret; } static bool g_NPN_HasMethod(NPP instance, NPObject *npobj, NPIdentifier methodName) { if (!thread_check()) { npw_printf("WARNING: NPN_HasMethod not called from the main thread\n"); return false; } if (instance == NULL) return false; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return false; if (!npobj || !npobj->_class || !npobj->_class->hasMethod) return false; D(bugiI("NPN_HasMethod instance=%p, npobj=%p, methodName=%p\n", instance, npobj, methodName)); npw_plugin_instance_ref(plugin); bool ret = invoke_NPN_HasMethod(plugin, npobj, methodName); npw_plugin_instance_unref(plugin); D(bugiD("NPN_HasMethod return: %d\n", ret)); return ret; } // Indicates that a call to one of the plugins NPObjects generated an error static void invoke_NPN_SetException(NPObject *npobj, const NPUTF8 *message) { npw_return_if_fail(rpc_method_invoke_possible(g_rpc_connection)); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_SET_EXCEPTION, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_STRING, message, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_SetException() invoke", error); return; } error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_SetException() wait for reply", error); return; } } static void g_NPN_SetException(NPObject *npobj, const NPUTF8 *message) { if (!thread_check()) { npw_printf("WARNING: NPN_SetException not called from the main thread\n"); return; } D(bugiI("NPN_SetException npobj=%p, message='%s'\n", npobj, message)); invoke_NPN_SetException(npobj, message); D(bugiD("NPN_SetException done\n")); } // Releases the value in the given variant static void g_NPN_ReleaseVariantValue(NPVariant *variant) { D(bugiI("NPN_ReleaseVariantValue\n")); npvariant_clear(variant); D(bugiD("NPN_ReleaseVariantValue done\n")); } // Returns an opaque identifier for the string that is passed in static NPIdentifier invoke_NPN_GetStringIdentifier(const NPUTF8 *name) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NULL); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_GET_STRING_IDENTIFIER, RPC_TYPE_STRING, name, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetStringIdentifier() invoke", error); return NULL; } NPIdentifier ident; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_NP_IDENTIFIER, &ident, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetStringIdentifier() wait for reply", error); return NULL; } return ident; } static NPIdentifier cached_NPN_GetStringIdentifier(const NPUTF8 *name) { NPIdentifier ident; if (!use_npidentifier_cache()) ident = invoke_NPN_GetStringIdentifier(name); #if USE_NPIDENTIFIER_CACHE else if (!npidentifier_cache_has_string(name, &ident)) { ident = invoke_NPN_GetStringIdentifier(name); npidentifier_cache_reserve(1); npidentifier_cache_add_string(ident, name); } #endif return ident; } static NPIdentifier g_NPN_GetStringIdentifier(const NPUTF8 *name) { if (!thread_check()) { npw_printf("WARNING: NPN_GetStringIdentifier not called from the main thread\n"); return NULL; } if (name == NULL) return NULL; D(bugiI("NPN_GetStringIdentifier name='%s'\n", name)); NPIdentifier ret = cached_NPN_GetStringIdentifier(name); D(bugiD("NPN_GetStringIdentifier return: %p\n", ret)); return ret; } // Returns an array of opaque identifiers for the names that are passed in static void invoke_NPN_GetStringIdentifiers(const NPUTF8 **names, int32_t nameCount, NPIdentifier *identifiers) { npw_return_if_fail(rpc_method_invoke_possible(g_rpc_connection)); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_GET_STRING_IDENTIFIERS, RPC_TYPE_ARRAY, RPC_TYPE_STRING, nameCount, names, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetStringIdentifiers() invoke", error); return; } uint32_t n_idents; NPIdentifier *idents; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_ARRAY, RPC_TYPE_NP_IDENTIFIER, &n_idents, &idents, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetStringIdentifiers() wait for reply", error); return; } if (identifiers) { if (n_idents != nameCount) { npw_printf("ERROR: NPN_GetStringIdentifiers returned fewer NPIdentifiers than expected\n"); if (n_idents > nameCount) n_idents = nameCount; } for (int i = 0; i < n_idents; i++) identifiers[i] = idents[i]; free(idents); } } static void cached_NPN_GetStringIdentifiers(const NPUTF8 **names, int32_t nameCount, NPIdentifier *identifiers) { /* XXX: could be optimized further */ invoke_NPN_GetStringIdentifiers(names, nameCount, identifiers); #if USE_NPIDENTIFIER_CACHE if (use_npidentifier_cache()) { for (int i = 0; i < nameCount; i++) { NPIdentifier ident = identifiers[i]; if (npidentifier_cache_lookup(ident) == NULL) { npidentifier_cache_reserve(1); npidentifier_cache_add_string(ident, names[i]); } } } #endif } static void g_NPN_GetStringIdentifiers(const NPUTF8 **names, int32_t nameCount, NPIdentifier *identifiers) { if (!thread_check()) { npw_printf("WARNING: NPN_GetStringIdentifiers not called from the main thread\n"); return; } if (names == NULL) return; if (identifiers == NULL) return; D(bugiI("NPN_GetStringIdentifiers names=%p\n", names)); cached_NPN_GetStringIdentifiers(names, nameCount, identifiers); D(bugiD("NPN_GetStringIdentifiers done\n")); } // Returns an opaque identifier for the integer that is passed in static NPIdentifier invoke_NPN_GetIntIdentifier(int32_t intid) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NULL); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_GET_INT_IDENTIFIER, RPC_TYPE_INT32, intid, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetIntIdentifier() invoke", error); return NULL; } NPIdentifier ident; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_NP_IDENTIFIER, &ident, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_GetIntIdentifier() wait for reply", error); return NULL; } return ident; } static NPIdentifier cached_NPN_GetIntIdentifier(int32_t intid) { NPIdentifier ident; if (!use_npidentifier_cache()) ident = invoke_NPN_GetIntIdentifier(intid); #if USE_NPIDENTIFIER_CACHE else if (!npidentifier_cache_has_int(intid, &ident)) { ident = invoke_NPN_GetIntIdentifier(intid); npidentifier_cache_reserve(1); npidentifier_cache_add_int(ident, intid); } #endif return ident; } static NPIdentifier g_NPN_GetIntIdentifier(int32_t intid) { if (!thread_check()) { npw_printf("WARNING: NPN_GetIntIdentifier not called from the main thread\n"); return NULL; } D(bugiI("NPN_GetIntIdentifier intid=%d\n", intid)); NPIdentifier ret = cached_NPN_GetIntIdentifier(intid); D(bugiD("NPN_GetIntIdentifier return: %p\n", ret)); return ret; } // Returns true if the given identifier is a string identifier, or false if it is an integer identifier static bool invoke_NPN_IdentifierIsString(NPIdentifier identifier) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_IDENTIFIER_IS_STRING, RPC_TYPE_NP_IDENTIFIER, &identifier, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_IdentifierIsString() invoke", error); return false; } uint32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_UINT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_IdentifierIsString() wait for reply", error); return false; } return ret; } static bool cached_NPN_IdentifierIsString(NPIdentifier ident) { #if USE_NPIDENTIFIER_CACHE if (use_npidentifier_cache()) { NPIdentifierInfo *npi = npidentifier_cache_lookup(ident); if (npi) return npi->string_len > 0; } #endif /* cache update is postponed to actual NPN_UTF8FromIdentifier() or NPN_IntFromIdentifier() */ return invoke_NPN_IdentifierIsString(ident); } static bool g_NPN_IdentifierIsString(NPIdentifier identifier) { if (!thread_check()) { npw_printf("WARNING: NPN_IdentifierIsString not called from the main thread\n"); return false; } D(bugiI("NPN_IdentifierIsString identifier=%p\n", identifier)); bool ret = invoke_NPN_IdentifierIsString(identifier); D(bugiD("NPN_IdentifierIsString return: %d\n", ret)); return ret; } // Returns a pointer to a UTF-8 string as a sequence of 8-bit units (NPUTF8) static NPUTF8 * invoke_NPN_UTF8FromIdentifier(NPIdentifier identifier) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), NULL); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_UTF8_FROM_IDENTIFIER, RPC_TYPE_NP_IDENTIFIER, &identifier, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_UTF8FromIdentifier() invoke", error); return NULL; } NPUTF8 *str; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_NP_UTF8, &str, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_UTF8FromIdentifier() wait for reply", error); return NULL; } return str; } static NPUTF8 * cached_NPN_UTF8FromIdentifier(NPIdentifier identifier) { NPUTF8 *str; if (!use_npidentifier_cache()) str = invoke_NPN_UTF8FromIdentifier(identifier); else { #if USE_NPIDENTIFIER_CACHE str = npidentifier_cache_get_string_copy(identifier); if (str == NULL) { str = invoke_NPN_UTF8FromIdentifier(identifier); npidentifier_cache_reserve(1); npidentifier_cache_add_string(identifier, str); } #endif } return str; } static NPUTF8 * g_NPN_UTF8FromIdentifier(NPIdentifier identifier) { if (!thread_check()) { npw_printf("WARNING: NPN_UTF8FromIdentifier not called from the main thread\n"); return NULL; } D(bugiI("NPN_UTF8FromIdentifier identifier=%p\n", identifier)); NPUTF8 *ret = cached_NPN_UTF8FromIdentifier(identifier); D(bugiD("NPN_UTF8FromIdentifier return: '%s'\n", ret)); return ret; } // Returns the integer value for the given integer identifier // NOTE: if the given identifier is not a integer identifier, the behavior is undefined (we return -1) static int32_t invoke_NPN_IntFromIdentifier(NPIdentifier identifier) { npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), -1); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_INT_FROM_IDENTIFIER, RPC_TYPE_NP_IDENTIFIER, &identifier, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_IntFromIdentifier() invoke", error); return -1; } int32_t ret; error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_IntFromIdentifier() wait for reply", error); return -1; } return ret; } static int32_t cached_NPN_IntFromIdentifier(NPIdentifier identifier) { int32_t value; if (!use_npidentifier_cache()) value = invoke_NPN_IntFromIdentifier(identifier); else { #if USE_NPIDENTIFIER_CACHE NPIdentifierInfo *npi = npidentifier_cache_lookup(identifier); if (npi) { assert(npi->string_len == 0); value = npi->u.value; } else { value = invoke_NPN_IntFromIdentifier(identifier); npidentifier_cache_reserve(1); npidentifier_cache_add_int(identifier, value); } #endif } return value; } static int32_t g_NPN_IntFromIdentifier(NPIdentifier identifier) { if (!thread_check()) { npw_printf("WARNING: NPN_IntFromIdentifier not called from the main thread\n"); return 0; } D(bugiI("NPN_IntFromIdentifier identifier=%p\n", identifier)); int32_t ret = cached_NPN_IntFromIdentifier(identifier); D(bugiD("NPN_IntFromIdentifier return: %d\n", ret)); return ret; } /* ====================================================================== */ /* === Plug-in side data === */ /* ====================================================================== */ // Functions supplied by the plug-in static NPPluginFuncs plugin_funcs; // Allows the browser to query the plug-in supported formats typedef char * (*NP_GetMIMEDescriptionUPP)(void); static NP_GetMIMEDescriptionUPP g_plugin_NP_GetMIMEDescription = NULL; // Allows the browser to query the plug-in for information typedef NPError (*NP_GetValueUPP)(void *instance, NPPVariable variable, void *value); static NP_GetValueUPP g_plugin_NP_GetValue = NULL; // Provides global initialization for a plug-in typedef NPError (*NP_InitializeUPP)(NPNetscapeFuncs *moz_funcs, NPPluginFuncs *plugin_funcs); static NP_InitializeUPP g_plugin_NP_Initialize = NULL; // Provides global deinitialization for a plug-in typedef NPError (*NP_ShutdownUPP)(void); static NP_ShutdownUPP g_plugin_NP_Shutdown = NULL; /* ====================================================================== */ /* === RPC communication === */ /* ====================================================================== */ // NP_GetMIMEDescription static char * g_NP_GetMIMEDescription(void) { if (g_plugin_NP_GetMIMEDescription == NULL) return NULL; D(bugiI("NP_GetMIMEDescription\n")); char *str = g_plugin_NP_GetMIMEDescription(); D(bugiD("NP_GetMIMEDescription return: %s\n", str ? str : "<empty>")); return str; } static int handle_NP_GetMIMEDescription(rpc_connection_t *connection) { D(bug("handle_NP_GetMIMEDescription\n")); int error = rpc_method_get_args(connection, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NP_GetMIMEDescription() get args", error); return error; } char *str = g_NP_GetMIMEDescription(); return rpc_method_send_reply(connection, RPC_TYPE_STRING, str, RPC_TYPE_INVALID); } // NP_GetValue static NPError g_NP_GetValue(NPPVariable variable, void *value) { if (g_plugin_NP_GetValue == NULL) return NPERR_INVALID_FUNCTABLE_ERROR; D(bugiI("NP_GetValue variable=%d [%s]\n", variable, string_of_NPPVariable(variable))); NPError ret = g_plugin_NP_GetValue(NULL, variable, value); D(bugiD("NP_GetValue return: %d\n", ret)); return ret; } static int handle_NP_GetValue(rpc_connection_t *connection) { D(bug("handle_NP_GetValue\n")); int32_t variable; int error = rpc_method_get_args(connection, RPC_TYPE_INT32, &variable, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NP_GetValue() get args", error); return error; } NPError ret = NPERR_GENERIC_ERROR; int variable_type = rpc_type_of_NPPVariable(variable); switch (variable_type) { case RPC_TYPE_STRING: { char *str = NULL; ret = g_NP_GetValue(variable, (void *)&str); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_STRING, str, RPC_TYPE_INVALID); } case RPC_TYPE_INT32: { uint32_t n = 0; ret = g_NP_GetValue(variable, (void *)&n); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INT32, n, RPC_TYPE_INVALID); } case RPC_TYPE_BOOLEAN: { NPBool b = FALSE; ret = g_NP_GetValue(variable, (void *)&b); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_BOOLEAN, b, RPC_TYPE_INVALID); } } npw_printf("ERROR: only basic types are supported in NP_GetValue()\n"); abort(); } // NP_Initialize static NPError g_NP_Initialize(uint32_t version) { if (g_plugin_NP_Initialize == NULL) return NPERR_INVALID_FUNCTABLE_ERROR; memset(&plugin_funcs, 0, sizeof(plugin_funcs)); plugin_funcs.size = sizeof(plugin_funcs); memset(&mozilla_funcs, 0, sizeof(mozilla_funcs)); mozilla_funcs.size = sizeof(mozilla_funcs); mozilla_funcs.version = version; mozilla_funcs.geturl = g_NPN_GetURL; mozilla_funcs.posturl = g_NPN_PostURL; mozilla_funcs.requestread = g_NPN_RequestRead; mozilla_funcs.newstream = g_NPN_NewStream; mozilla_funcs.write = g_NPN_Write; mozilla_funcs.destroystream = g_NPN_DestroyStream; mozilla_funcs.status = g_NPN_Status; mozilla_funcs.uagent = g_NPN_UserAgent; mozilla_funcs.memalloc = g_NPN_MemAlloc; mozilla_funcs.memfree = g_NPN_MemFree; mozilla_funcs.memflush = g_NPN_MemFlush; mozilla_funcs.reloadplugins = g_NPN_ReloadPlugins; mozilla_funcs.getJavaEnv = g_NPN_GetJavaEnv; mozilla_funcs.getJavaPeer = g_NPN_GetJavaPeer; mozilla_funcs.geturlnotify = g_NPN_GetURLNotify; mozilla_funcs.posturlnotify = g_NPN_PostURLNotify; mozilla_funcs.getvalue = g_NPN_GetValue; mozilla_funcs.setvalue = g_NPN_SetValue; mozilla_funcs.invalidaterect = g_NPN_InvalidateRect; mozilla_funcs.invalidateregion = g_NPN_InvalidateRegion; mozilla_funcs.forceredraw = g_NPN_ForceRedraw; mozilla_funcs.pushpopupsenabledstate = g_NPN_PushPopupsEnabledState; mozilla_funcs.poppopupsenabledstate = g_NPN_PopPopupsEnabledState; if (NPN_HAS_FEATURE(NPRUNTIME_SCRIPTING)) { D(bug(" browser supports scripting through npruntime\n")); mozilla_funcs.getstringidentifier = g_NPN_GetStringIdentifier; mozilla_funcs.getstringidentifiers = g_NPN_GetStringIdentifiers; mozilla_funcs.getintidentifier = g_NPN_GetIntIdentifier; mozilla_funcs.identifierisstring = g_NPN_IdentifierIsString; mozilla_funcs.utf8fromidentifier = g_NPN_UTF8FromIdentifier; mozilla_funcs.intfromidentifier = g_NPN_IntFromIdentifier; mozilla_funcs.createobject = g_NPN_CreateObject; mozilla_funcs.retainobject = g_NPN_RetainObject; mozilla_funcs.releaseobject = g_NPN_ReleaseObject; mozilla_funcs.invoke = g_NPN_Invoke; mozilla_funcs.invokeDefault = g_NPN_InvokeDefault; mozilla_funcs.evaluate = g_NPN_Evaluate; mozilla_funcs.getproperty = g_NPN_GetProperty; mozilla_funcs.setproperty = g_NPN_SetProperty; mozilla_funcs.removeproperty = g_NPN_RemoveProperty; mozilla_funcs.hasproperty = g_NPN_HasProperty; mozilla_funcs.hasmethod = g_NPN_HasMethod; mozilla_funcs.releasevariantvalue = g_NPN_ReleaseVariantValue; mozilla_funcs.setexception = g_NPN_SetException; if (!npobject_bridge_new()) return NPERR_OUT_OF_MEMORY_ERROR; } // Initialize function tables // XXX: remove the local copies from this file NPW_InitializeFuncs(&mozilla_funcs, &plugin_funcs); D(bugiI("NP_Initialize\n")); NPError ret = g_plugin_NP_Initialize(&mozilla_funcs, &plugin_funcs); D(bugiD("NP_Initialize return: %d\n", ret)); return ret; } static int handle_NP_Initialize(rpc_connection_t *connection) { D(bug("handle_NP_Initialize\n")); uint32_t version; int error = rpc_method_get_args(connection, RPC_TYPE_UINT32, &version, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NP_Initialize() get args", error); return error; } NPError ret = g_NP_Initialize(version); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INVALID); } // NP_Shutdown static NPError g_NP_Shutdown(void) { if (g_plugin_NP_Shutdown == NULL) return NPERR_INVALID_FUNCTABLE_ERROR; D(bugiI("NP_Shutdown\n")); NPError ret = g_plugin_NP_Shutdown(); D(bugiD("NP_Shutdown done\n")); if (NPN_HAS_FEATURE(NPRUNTIME_SCRIPTING)) npobject_bridge_destroy(); gtk_main_quit(); return ret; } static int handle_NP_Shutdown(rpc_connection_t *connection) { D(bug("handle_NP_Shutdown\n")); int error = rpc_method_get_args(connection, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NP_Shutdown() get args", error); return error; } /* Clear any NPP_Destroys we may have delayed. Although it doesn't really matter, and the plugin is going to die soon. XXX: To be really picky, we should probably delay this and make sure it is run on a new event loop iteration. */ delayed_destroys_process_cb(NULL); NPError ret = g_NP_Shutdown(); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INVALID); } // NPP_New static NPError g_NPP_New(NPMIMEType plugin_type, uint32_t instance_id, uint16_t mode, int16_t argc, char *argn[], char *argv[], NPSavedData *saved) { PluginInstance *plugin = npw_plugin_instance_new(&PluginInstanceClass); if (plugin == NULL) return NPERR_OUT_OF_MEMORY_ERROR; plugin->instance_id = instance_id; id_link(instance_id, plugin); NPP instance = malloc(sizeof(*instance)); if (instance == NULL) return NPERR_OUT_OF_MEMORY_ERROR; memset(instance, 0, sizeof(*instance)); instance->ndata = plugin; plugin->instance = instance; // check for size hints for (int i = 0; i < argc; i++) { if (argn[i] == NULL) continue; if (strcasecmp(argn[i], "width") == 0) { if (i < argc && argv[i]) plugin->width = atoi(argv[i]); } else if (strcasecmp(argn[i], "height") == 0) { if (i < argc && argv[i]) plugin->height = atoi(argv[i]); } } D(bugiI("NPP_New instance=%p\n", instance)); NPError ret = plugin_funcs.newp(plugin_type, instance, mode, argc, argn, argv, saved); D(bugiD("NPP_New return: %d [%s]\n", ret, string_of_NPError(ret))); // check if XEMBED is to be used long supports_XEmbed = FALSE; if (mozilla_funcs.getvalue) { // Even though NPAPI kindly provides an NPBool typedef, // NPNVSupportsXEmbedBool is documented to be a 4-byte PRBool. And // Flash treats it as such. NPError error = mozilla_funcs.getvalue(NULL, NPNVSupportsXEmbedBool, (void *)&supports_XEmbed); if (error == NPERR_NO_ERROR && plugin_funcs.getvalue) { long needs_XEmbed = FALSE; error = plugin_funcs.getvalue(instance, NPPVpluginNeedsXEmbed, (void *)&needs_XEmbed); if (error == NPERR_NO_ERROR) plugin->use_xembed = supports_XEmbed && needs_XEmbed; } } // assume Gtk plugin (no Xt event loop) if XEMBED is used if (!plugin->use_xembed) { if (xt_source_create() < 0) return NPERR_GENERIC_ERROR; } return ret; } static int handle_NPP_New(rpc_connection_t *connection) { D(bug("handle_NPP_New\n")); rpc_connection_ref(connection); uint32_t instance_id; NPMIMEType plugin_type; int32_t mode; int argn_count, argv_count; char **argn, **argv; NPSavedData *saved; int error = rpc_method_get_args(connection, RPC_TYPE_UINT32, &instance_id, RPC_TYPE_STRING, &plugin_type, RPC_TYPE_INT32, &mode, RPC_TYPE_ARRAY, RPC_TYPE_STRING, &argn_count, &argn, RPC_TYPE_ARRAY, RPC_TYPE_STRING, &argv_count, &argv, RPC_TYPE_NP_SAVED_DATA, &saved, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_New() get args", error); return error; } assert(argn_count == argv_count); NPError ret = g_NPP_New(plugin_type, instance_id, mode, argn_count, argn, argv, saved); if (plugin_type) free(plugin_type); if (argn) { for (int i = 0; i < argn_count; i++) free(argn[i]); free(argn); } if (argv) { for (int i = 0; i < argv_count; i++) free(argv[i]); free(argv); } if (saved) { if (saved->buf) NPN_MemFree(saved->buf); NPN_MemFree(saved); } return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INVALID); } // NPP_Destroy static NPError g_NPP_Destroy(NPP instance, NPSavedData **sdata) { if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return NPERR_INVALID_INSTANCE_ERROR; if (sdata) *sdata = NULL; // Process all pending calls as the data could become junk afterwards // XXX: this also processes delayed calls from other instances // XXX: Also, if this was delayed, the NPN_ReleaseObject calls will // be ignored; the browser thinks we've already died. delayed_calls_process(plugin, TRUE); D(bugiI("NPP_Destroy instance=%p\n", instance)); NPError ret = plugin_funcs.destroy(instance, sdata); D(bugiD("NPP_Destroy return: %d [%s]\n", ret, string_of_NPError(ret))); if (!plugin->use_xembed) xt_source_destroy(); npw_plugin_instance_invalidate(plugin); npw_plugin_instance_unref(plugin); return ret; } static NPError g_NPP_Destroy_Now(PluginInstance *plugin, NPSavedData **save) { D(bug("g_NPP_Destroy_Now\n")); NPSavedData *save_area = NULL; NPError ret = g_NPP_Destroy(PLUGIN_INSTANCE_NPP(plugin), &save_area); if (save) { *save = save_area; } else if (save_area) { npw_printf("WARNING: NPP_Destroy returned save_area, but it was ignored\n"); if (save_area->buf) NPN_MemFree(save_area->buf); NPN_MemFree(save_area); } rpc_connection_unref(g_rpc_connection); return ret; } static int handle_NPP_Destroy(rpc_connection_t *connection) { D(bug("handle_NPP_Destroy\n")); int error; PluginInstance *plugin; error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_Destroy() get args", error); return error; } NPSavedData *save_area = NULL; NPError ret = NPERR_NO_ERROR; /* Take a ref for the rpc_method_send_reply; otherwise the * rpc_connection_unref in g_NPP_Destroy_Now may cause a slight * nuisance. */ rpc_connection_ref(connection); if (!rpc_method_in_invoke(connection)) { /* The plugin is not on the stack; it's safe to call this. */ D(bug("NPP_Destroy is fine.\n")); ret = g_NPP_Destroy_Now(plugin, &save_area); } else { /* It is not safe to call NPP_Destroy right now. Delay it until we * return to the event loop. * * NOTE: This means that the browser never sees the real return * value of NPP_Destroy; the NPSavedData will be discarded, and any * error code will be ignored. */ D(bug("NPP_Destroy raced; delaying it to get a clean stack.\n")); delayed_destroys_add(plugin); } error = rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_NP_SAVED_DATA, save_area, RPC_TYPE_INVALID); if (save_area) { if (save_area->buf) NPN_MemFree(save_area->buf); NPN_MemFree(save_area); } rpc_connection_unref(connection); return error; } // NPP_SetWindow static NPError g_NPP_SetWindow(NPP instance, NPWindow *np_window) { if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; PluginInstance *plugin = PLUGIN_INSTANCE(instance); if (plugin == NULL) return NPERR_INVALID_INSTANCE_ERROR; if (plugin_funcs.setwindow == NULL) return NPERR_INVALID_FUNCTABLE_ERROR; plugin->is_windowless = np_window && np_window->type == NPWindowTypeDrawable; NPWindow *window = np_window; if (window && (window->window || plugin->is_windowless)) { if (plugin->toolkit_data) { if (update_window(plugin, window) < 0) return NPERR_GENERIC_ERROR; } else { if (create_window(plugin, window) < 0) return NPERR_GENERIC_ERROR; } window = &plugin->window; } D(bugiI("NPP_SetWindow instance=%p, window=%p\n", instance, window ? window->window : NULL)); NPError ret = plugin_funcs.setwindow(instance, window); D(bugiD("NPP_SetWindow return: %d [%s]\n", ret, string_of_NPError(ret))); if (np_window == NULL || (np_window->window == NULL && !plugin->is_windowless)) destroy_window(plugin); return ret; } static int handle_NPP_SetWindow(rpc_connection_t *connection) { D(bug("handle_NPP_SetWindow\n")); int error; PluginInstance *plugin; NPWindow *window; error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_NP_WINDOW, &window, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_SetWindow() get args", error); return error; } NPError ret = g_NPP_SetWindow(PLUGIN_INSTANCE_NPP(plugin), window); if (window) { if (window->ws_info) { free(window->ws_info); window->ws_info = NULL; } free(window); } return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INVALID); } // NPP_GetValue static NPError g_NPP_GetValue(NPP instance, NPPVariable variable, void *value) { if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; if (plugin_funcs.getvalue == NULL) return NPERR_INVALID_FUNCTABLE_ERROR; D(bugiI("NPP_GetValue instance=%p, variable=%d [%s]\n", instance, variable, string_of_NPPVariable(variable))); NPError ret = plugin_funcs.getvalue(instance, variable, value); D(bugiD("NPP_GetValue return: %d [%s]\n", ret, string_of_NPError(ret))); return ret; } static int handle_NPP_GetValue(rpc_connection_t *connection) { D(bug("handle_NPP_GetValue\n")); int error; PluginInstance *plugin; int32_t variable; error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_INT32, &variable, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_printf("ERROR: could not get NPP_GetValue variable\n"); return error; } NPError ret = NPERR_GENERIC_ERROR; int variable_type = rpc_type_of_NPPVariable(variable); switch (variable_type) { case RPC_TYPE_STRING: { char *str = NULL; ret = g_NPP_GetValue(PLUGIN_INSTANCE_NPP(plugin), variable, (void *)&str); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_STRING, str, RPC_TYPE_INVALID); } case RPC_TYPE_INT32: { uint32_t n = 0; ret = g_NPP_GetValue(PLUGIN_INSTANCE_NPP(plugin), variable, (void *)&n); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INT32, n, RPC_TYPE_INVALID); } case RPC_TYPE_BOOLEAN: { NPBool b = FALSE; ret = g_NPP_GetValue(PLUGIN_INSTANCE_NPP(plugin), variable, (void *)&b); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_BOOLEAN, b, RPC_TYPE_INVALID); } case RPC_TYPE_NP_OBJECT: { NPObject *npobj = NULL; ret = g_NPP_GetValue(PLUGIN_INSTANCE_NPP(plugin), variable, (void *)&npobj); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_NP_OBJECT, npobj, RPC_TYPE_INVALID); } } abort(); } // NPP_URLNotify static void g_NPP_URLNotify(NPP instance, const char *url, NPReason reason, void *notifyData) { if (instance == NULL) return; if (plugin_funcs.urlnotify == NULL) return; D(bugiI("NPP_URLNotify instance=%p, url='%s', reason=%s, notifyData=%p\n", instance, url, string_of_NPReason(reason), notifyData)); plugin_funcs.urlnotify(instance, url, reason, notifyData); D(bugiD("NPP_URLNotify done\n")); } static int handle_NPP_URLNotify(rpc_connection_t *connection) { D(bug("handle_NPP_URLNotify\n")); int error; PluginInstance *plugin; char *url; int32_t reason; void *notifyData; error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_STRING, &url, RPC_TYPE_INT32, &reason, RPC_TYPE_NP_NOTIFY_DATA, &notifyData, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_URLNotify() get args", error); return error; } g_NPP_URLNotify(PLUGIN_INSTANCE_NPP(plugin), url, reason, notifyData); if (url) free(url); return rpc_method_send_reply (connection, RPC_TYPE_INVALID); } // NPP_NewStream static NPError g_NPP_NewStream(NPP instance, NPMIMEType type, NPStream *stream, NPBool seekable, uint16_t *stype) { if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; if (plugin_funcs.newstream == NULL) return NPERR_INVALID_FUNCTABLE_ERROR; D(bugiI("NPP_NewStream instance=%p, stream=%p, url='%s', type='%s', seekable=%d, stype=%s, notifyData=%p\n", instance, stream, stream->url, type, seekable, string_of_NPStreamType(*stype), stream->notifyData)); NPError ret = plugin_funcs.newstream(instance, type, stream, seekable, stype); D(bugiD("NPP_NewStream return: %d [%s], stype=%s\n", ret, string_of_NPError(ret), string_of_NPStreamType(*stype))); return ret; } static int handle_NPP_NewStream(rpc_connection_t *connection) { D(bug("handle_NPP_NewStream\n")); int error; PluginInstance *plugin; uint32_t stream_id; uint32_t seekable; NPMIMEType type; NPStream *stream; if ((stream = malloc(sizeof(*stream))) == NULL) return RPC_ERROR_NO_MEMORY; memset(stream, 0, sizeof(*stream)); error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_STRING, &type, RPC_TYPE_UINT32, &stream_id, RPC_TYPE_STRING, &stream->url, RPC_TYPE_UINT32, &stream->end, RPC_TYPE_UINT32, &stream->lastmodified, RPC_TYPE_NP_NOTIFY_DATA, &stream->notifyData, RPC_TYPE_STRING, &stream->headers, RPC_TYPE_BOOLEAN, &seekable, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_NewStream() get args", error); return error; } StreamInstance *stream_ndata; if ((stream_ndata = malloc(sizeof(*stream_ndata))) == NULL) return RPC_ERROR_NO_MEMORY; stream->ndata = stream_ndata; memset(stream_ndata, 0, sizeof(*stream_ndata)); stream_ndata->stream_id = stream_id; id_link(stream_id, stream_ndata); stream_ndata->stream = stream; stream_ndata->is_plugin_stream = 0; uint16_t stype = NP_NORMAL; NPError ret = g_NPP_NewStream(PLUGIN_INSTANCE_NPP(plugin), type, stream, seekable, &stype); if (type) free(type); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_UINT32, (uint32_t)stype, RPC_TYPE_NP_NOTIFY_DATA, stream->notifyData, RPC_TYPE_INVALID); } // NPP_DestroyStream static NPError g_NPP_DestroyStream(NPP instance, NPStream *stream, NPReason reason) { if (instance == NULL) return NPERR_INVALID_INSTANCE_ERROR; if (plugin_funcs.destroystream == NULL) return NPERR_INVALID_FUNCTABLE_ERROR; if (stream == NULL) return NPERR_INVALID_PARAM; D(bugiI("NPP_DestroyStream instance=%p, stream=%p, reason=%s\n", instance, stream, string_of_NPReason(reason))); NPError ret = plugin_funcs.destroystream(instance, stream, reason); D(bugiD("NPP_DestroyStream return: %d [%s]\n", ret, string_of_NPError(ret))); StreamInstance *stream_ndata = stream->ndata; if (stream_ndata) { id_remove(stream_ndata->stream_id); free(stream_ndata); } free((char *)stream->url); free((char *)stream->headers); free(stream); return ret; } static int handle_NPP_DestroyStream(rpc_connection_t *connection) { D(bug("handle_NPP_DestroyStream\n")); PluginInstance *plugin; NPStream *stream; int32_t reason; int error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_NP_STREAM, &stream, RPC_TYPE_INT32, &reason, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_DestroyStream() get args", error); return error; } NPError ret = g_NPP_DestroyStream(PLUGIN_INSTANCE_NPP(plugin), stream, reason); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INVALID); } // NPP_WriteReady static int32_t g_NPP_WriteReady(NPP instance, NPStream *stream) { if (instance == NULL) return 0; if (plugin_funcs.writeready == NULL) return 0; if (stream == NULL) return 0; D(bugiI("NPP_WriteReady instance=%p, stream=%p\n", instance, stream)); int32_t ret = plugin_funcs.writeready(instance, stream); D(bugiD("NPP_WriteReady return: %d\n", ret)); return ret; } static int handle_NPP_WriteReady(rpc_connection_t *connection) { D(bug("handle_NPP_WriteReady\n")); PluginInstance *plugin; NPStream *stream; int error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_NP_STREAM, &stream, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_WriteReady() get args", error); return error; } int32_t ret = g_NPP_WriteReady(PLUGIN_INSTANCE_NPP(plugin), stream); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INVALID); } // NPP_Write static int32_t g_NPP_Write(NPP instance, NPStream *stream, int32_t offset, int32_t len, void *buf) { if (instance == NULL) return -1; if (plugin_funcs.write == NULL) return -1; if (stream == NULL) return -1; D(bugiI("NPP_Write instance=%p, stream=%p, offset=%d, len=%d, buf=%p\n", instance, stream, offset, len, buf)); int32_t ret = plugin_funcs.write(instance, stream, offset, len, buf); D(bugiD("NPP_Write return: %d\n", ret)); return ret; } static int handle_NPP_Write(rpc_connection_t *connection) { D(bug("handle_NPP_Write\n")); PluginInstance *plugin; NPStream *stream; unsigned char *buf; int32_t offset, len; int error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_NP_STREAM, &stream, RPC_TYPE_INT32, &offset, RPC_TYPE_ARRAY, RPC_TYPE_CHAR, &len, &buf, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_Write() get args", error); return error; } int32_t ret = g_NPP_Write(PLUGIN_INSTANCE_NPP(plugin), stream, offset, len, buf); if (buf) free(buf); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INVALID); } // NPP_StreamAsFile static void g_NPP_StreamAsFile(NPP instance, NPStream *stream, const char *fname) { if (instance == NULL) return; if (plugin_funcs.asfile == NULL) return; if (stream == NULL) return; D(bugiI("NPP_StreamAsFile instance=%p, stream=%p, fname='%s'\n", instance, stream, fname)); plugin_funcs.asfile(instance, stream, fname); D(bugiD("NPP_StreamAsFile done\n")); } static int handle_NPP_StreamAsFile(rpc_connection_t *connection) { D(bug("handle_NPP_StreamAsFile\n")); PluginInstance *plugin; NPStream *stream; char *fname; int error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_NP_STREAM, &stream, RPC_TYPE_STRING, &fname, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_StreamAsFile() get args", error); return error; } g_NPP_StreamAsFile(PLUGIN_INSTANCE_NPP(plugin), stream, fname); if (fname) free(fname); return rpc_method_send_reply (connection, RPC_TYPE_INVALID); } // NPP_Print static void g_NPP_Print(NPP instance, NPPrint *printInfo) { if (plugin_funcs.print == NULL) return; if (printInfo == NULL) return; D(bugiI("NPP_Print instance=%p, printInfo->mode=%d\n", instance, printInfo->mode)); plugin_funcs.print(instance, printInfo); D(bugiD("NPP_Print done\n")); } static void invoke_NPN_PrintData(PluginInstance *plugin, uint32_t platform_print_id, NPPrintData *printData) { if (printData == NULL) return; npw_return_if_fail(rpc_method_invoke_possible(g_rpc_connection)); int error = rpc_method_invoke(g_rpc_connection, RPC_METHOD_NPN_PRINT_DATA, RPC_TYPE_UINT32, platform_print_id, RPC_TYPE_NP_PRINT_DATA, printData, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_PrintData() invoke", error); return; } error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPN_PrintData() wait for reply", error); return; } } static int handle_NPP_Print(rpc_connection_t *connection) { D(bug("handle_NPP_Print\n")); PluginInstance *plugin; NPPrint printInfo; uint32_t platform_print_id; int error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_UINT32, &platform_print_id, RPC_TYPE_NP_PRINT, &printInfo, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_Print() get args", error); return error; } // reconstruct printer info NPPrintCallbackStruct printer; printer.type = NP_PRINT; printer.fp = platform_print_id ? tmpfile() : NULL; switch (printInfo.mode) { case NP_FULL: printInfo.print.fullPrint.platformPrint = &printer; break; case NP_EMBED: printInfo.print.embedPrint.platformPrint = &printer; // XXX the window ID is unlikely to work here as is. The NPWindow // is probably only used as a bounding box? create_window_attributes(printInfo.print.embedPrint.window.ws_info); break; } g_NPP_Print(PLUGIN_INSTANCE_NPP(plugin), &printInfo); // send back the printed data if (printer.fp) { long file_size = ftell(printer.fp); D(bug(" writeback data [%d bytes]\n", file_size)); rewind(printer.fp); if (file_size > 0) { NPPrintData printData; const int printDataMaxSize = sizeof(printData.data); int n = file_size / printDataMaxSize; while (--n >= 0) { printData.size = printDataMaxSize; if (fread(&printData.data, sizeof(printData.data), 1, printer.fp) != 1) { npw_printf("ERROR: unexpected end-of-file or error condition in NPP_Print\n"); break; } npw_plugin_instance_ref(plugin); invoke_NPN_PrintData(plugin, platform_print_id, &printData); npw_plugin_instance_unref(plugin); } printData.size = file_size % printDataMaxSize; if (fread(&printData.data, printData.size, 1, printer.fp) != 1) npw_printf("ERROR: unexpected end-of-file or error condition in NPP_Print\n"); npw_plugin_instance_ref(plugin); invoke_NPN_PrintData(plugin, platform_print_id, &printData); npw_plugin_instance_unref(plugin); } fclose(printer.fp); } if (printInfo.mode == NP_EMBED) { NPWindow *window = &printInfo.print.embedPrint.window; if (window->ws_info) { destroy_window_attributes(window->ws_info); window->ws_info = NULL; } } uint32_t plugin_printed = FALSE; if (printInfo.mode == NP_FULL) plugin_printed = printInfo.print.fullPrint.pluginPrinted; return rpc_method_send_reply(connection, RPC_TYPE_BOOLEAN, plugin_printed, RPC_TYPE_INVALID); } // Delivers a platform-specific window event to the instance static int16_t g_NPP_HandleEvent(NPP instance, NPEvent *event) { if (instance == NULL) return false; if (plugin_funcs.event == NULL) return false; if (event == NULL) return false; D(bugiI("NPP_HandleEvent instance=%p, event=%p [%s]\n", instance, event, string_of_NPEvent_type(event->type))); int16_t ret = plugin_funcs.event(instance, event); D(bugiD("NPP_HandleEvent return: %d\n", ret)); /* XXX: let's have a chance to commit the pixmap before it's gone */ if (event->type == GraphicsExpose) gdk_flush(); return ret; } static int handle_NPP_HandleEvent(rpc_connection_t *connection) { D(bug("handle_NPP_HandleEvent\n")); PluginInstance *plugin; NPEvent event; int error = rpc_method_get_args(connection, RPC_TYPE_NPW_PLUGIN_INSTANCE, &plugin, RPC_TYPE_NP_EVENT, &event, RPC_TYPE_INVALID); if (error != RPC_ERROR_NO_ERROR) { npw_perror("NPP_HandleEvent() get args", error); return error; } event.xany.display = x_display; int16_t ret = g_NPP_HandleEvent(PLUGIN_INSTANCE_NPP(plugin), &event); return rpc_method_send_reply(connection, RPC_TYPE_INT32, ret, RPC_TYPE_INVALID); } /* ====================================================================== */ /* === Events processing === */ /* ====================================================================== */ typedef gboolean (*GSourcePrepare)(GSource *, gint *); typedef gboolean (*GSourceCheckFunc)(GSource *); typedef gboolean (*GSourceDispatchFunc)(GSource *, GSourceFunc, gpointer); typedef void (*GSourceFinalizeFunc)(GSource *); // Xt events static GSource *xt_source = NULL; static int xt_source_count = 0; static GPollFD xt_event_poll_fd; static const int XT_DEFAULT_TIMEOUT = 25; static const int XT_MAX_DISPATCH_EVENTS = 10; static void xt_dummy_timeout_cb(XtPointer closure, XtIntervalId *id) { /* dummy function, never called */ npw_printf("ERROR: xt_dummy_timeout_cb() should never be called\n"); } static int xt_has_compatible_appcontext_timerQueue(void) { int is_compatible; XtIntervalId id; TimerEventRec *tq, *tq_probe; /* Try to determine where is the pointer to the next allocated TimerEventRec. Besides, XtAppAddTimeOut() shall not have been called already because we want to be sure any (libXt internal) "free" TimerEventRec pointer cache is empty. */ tq = XtNew(TimerEventRec); XtFree((char *)tq); tq_probe = XtNew(TimerEventRec); XtFree((char *)tq_probe); if (tq != tq_probe) return 0; id = XtAppAddTimeOut(x_app_context, 0, xt_dummy_timeout_cb, GUINT_TO_POINTER(0xdeadbeef)); tq = x_app_context->timerQueue; is_compatible = tq == tq_probe && tq->app == x_app_context && tq->te_proc == xt_dummy_timeout_cb && tq->te_closure == GUINT_TO_POINTER(0xdeadbeef) ; XtRemoveTimeOut(id); return is_compatible; } static void xt_dummy_input_cb(XtPointer closure, int *source, XtInputId *id) { /* dummy function, never called */ npw_printf("ERROR: xt_dummy_input_cb() should never be called\n"); } static inline int get_appcontext_input_count_at(int offset) { return *((short *)((char *)x_app_context + offset)); } static inline int add_appcontext_input(int fd, int n) { return XtAppAddInput(x_app_context, fd, GUINT_TO_POINTER(XtInputWriteMask), xt_dummy_input_cb, GUINT_TO_POINTER(0xdead0000)); } static int get_appcontext_input_count_offset(void) { #define low_offset offsetof(struct _XtAppStruct, __maxed__nfds) #define high_offset offsetof(struct _XtAppStruct, __maybe__input_max) #define n_offsets_max (high_offset - low_offset)/2 int i, ofs, n_offsets = 0; int offsets[n_offsets_max] = { 0, }; #define n_inputs_max 4 /* number of refinements/input sources */ int fd, id, n_inputs = 0; struct { int fd, id; } inputs[n_inputs_max] = { 0, }; if ((fd = open("/dev/null", O_WRONLY)) < 0) return 0; if ((id = add_appcontext_input(fd, 0)) < 0) { close(fd); return 0; } inputs[n_inputs].fd = fd; inputs[n_inputs].id = id; n_inputs++; for (ofs = low_offset; ofs < high_offset; ofs += 2) { if (get_appcontext_input_count_at(ofs) == 1) offsets[n_offsets++] = ofs; } while (n_inputs < n_inputs_max) { if ((fd = open("/dev/null", O_WRONLY)) < 0) break; if ((id = add_appcontext_input(fd, n_inputs)) < 0) { close(fd); break; } inputs[n_inputs].fd = fd; inputs[n_inputs].id = id; n_inputs++; int n = 0; for (i = 0; i < n_offsets; i++) { if (get_appcontext_input_count_at(offsets[i]) == n_inputs) offsets[n++] = offsets[i]; } for (i = n; i < n_offsets; i++) offsets[i] = 0; n_offsets = n; } for (i = 0; i < n_inputs; i++) { XtRemoveInput(inputs[i].id); close(inputs[i].fd); } if (n_offsets == 1) return offsets[0]; #undef n_fds_max #undef n_offsets_max #undef high_offset #undef low_offset return 0; } static int get_appcontext_input_count(void) { static int input_count_offset = -1; if (input_count_offset < 0) input_count_offset = get_appcontext_input_count_offset(); if (input_count_offset == 0) return 1; /* fake we have input to trigger timeout */ return get_appcontext_input_count_at(input_count_offset); } static int xt_has_compatible_appcontext(void) { return xt_has_compatible_appcontext_timerQueue(); } static int xt_get_next_timeout(GSource *source) { static int has_compatible_appcontext = -1; if (has_compatible_appcontext < 0) { if ((has_compatible_appcontext = xt_has_compatible_appcontext()) == 0) npw_printf("WARNING: xt_get_next_timeout() is not optimizable\n"); } int timeout = XT_DEFAULT_TIMEOUT; if (has_compatible_appcontext) { int input_timeout, timer_timeout; /* Check there is any input source to process */ if (get_appcontext_input_count() > 0) input_timeout = XT_DEFAULT_TIMEOUT; else input_timeout = -1; /* Check there is any timer to process */ if (x_app_context->timerQueue == NULL) timer_timeout = -1; else { /* Determine delay to next timeout. Zero means timeout already expired */ struct timeval *next = &x_app_context->timerQueue->te_timer_value; GTimeVal now; int64_t diff; g_source_get_current_time(source, &now); if ((diff = (int64_t)next->tv_sec - (int64_t)now.tv_sec) < 0) timer_timeout = 0; else if ((diff = diff*1000 + ((int64_t)next->tv_usec - (int64_t)now.tv_usec)/1000) <= 0) timer_timeout = 0; else timer_timeout = diff; } if (input_timeout < 0) timeout = timer_timeout; else if (timer_timeout < 0) timeout = input_timeout; else timeout = MIN(input_timeout, timer_timeout); } return timeout; } static gboolean xt_event_prepare(GSource *source, gint *timeout) { int mask = XtAppPending(x_app_context); if (mask) return TRUE; /* XXX: create new GPollFD for input sources? */ return (*timeout = xt_get_next_timeout(source)) == 0; } static gboolean xt_event_check(GSource *source) { if (xt_event_poll_fd.revents & G_IO_IN) { int mask = XtAppPending(x_app_context); if (mask) return TRUE; } return FALSE; } static gboolean xt_event_dispatch(GSource *source, GSourceFunc callback, gpointer user_data) { int i; for (i = 0; i < XT_MAX_DISPATCH_EVENTS; i++) { int mask = XtAppPending(x_app_context); if (mask == 0) break; XtAppProcessEvent(x_app_context, XtIMAll); } return TRUE; } static GSourceFuncs xt_event_funcs = { xt_event_prepare, xt_event_check, xt_event_dispatch, (GSourceFinalizeFunc)g_free, (GSourceFunc)NULL, (GSourceDummyMarshal)NULL }; static int xt_source_create(void) { if (++xt_source_count > 1 && xt_source != NULL) return 0; if ((xt_source = g_source_new(&xt_event_funcs, sizeof(GSource))) == NULL) { npw_printf("ERROR: failed to initialize Xt events listener\n"); return -1; } g_source_set_priority(xt_source, GDK_PRIORITY_EVENTS); g_source_set_can_recurse(xt_source, TRUE); g_source_attach(xt_source, NULL); xt_event_poll_fd.fd = ConnectionNumber(x_display); xt_event_poll_fd.events = G_IO_IN; xt_event_poll_fd.revents = 0; g_source_add_poll(xt_source, &xt_event_poll_fd); return 0; } static void xt_source_destroy(void) { if (--xt_source_count < 1 && xt_source) { g_source_destroy(xt_source); xt_source = NULL; } } // RPC events static GPollFD rpc_event_poll_fd; static gboolean rpc_event_prepare(GSource *source, gint *timeout) { *timeout = -1; return FALSE; } static gboolean rpc_event_check(GSource *source) { return rpc_wait_dispatch(g_rpc_connection, 0) > 0; } static gboolean rpc_event_dispatch(GSource *source, GSourceFunc callback, gpointer connection) { return rpc_dispatch(connection) != RPC_ERROR_CONNECTION_CLOSED; } static GSourceFuncs rpc_event_funcs = { rpc_event_prepare, rpc_event_check, rpc_event_dispatch, (GSourceFinalizeFunc)g_free, (GSourceFunc)NULL, (GSourceDummyMarshal)NULL }; // RPC error callback -- kill the plugin static void rpc_error_callback_cb(rpc_connection_t *connection, void *user_data) { D(bug("RPC connection %p is in a bad state, closing the plugin\n",connection)); rpc_connection_set_error_callback(connection, NULL, NULL); gtk_main_quit(); } /* ====================================================================== */ /* === Main program === */ /* ====================================================================== */ static int do_test(void); static int do_main(int argc, char **argv, const char *connection_path) { if (do_test() != 0) return 1; if (connection_path == NULL) { npw_printf("ERROR: missing connection path argument\n"); return 1; } D(bug(" Plugin connection: %s\n", connection_path)); D(bug(" Plugin viewer pid: %d\n", getpid())); thread_check_init(); D(bug(" Plugin main thread: %p\n", g_main_thread)); // Cleanup environment, the program may fork/exec a native shell // script and having 32-bit libraries in LD_PRELOAD is not right, // though not a fatal error #if defined(__linux__) if (getenv("LD_PRELOAD")) unsetenv("LD_PRELOAD"); #endif // Xt and GTK initialization XtToolkitInitialize(); x_app_context = XtCreateApplicationContext(); x_display = XtOpenDisplay(x_app_context, NULL, "npw-viewer", "npw-viewer", NULL, 0, &argc, argv); g_thread_init(NULL); gtk_init(&argc, &argv); // Initialize RPC communication channel if ((g_rpc_connection = rpc_init_server(connection_path)) == NULL) { npw_printf("ERROR: failed to initialize plugin-side RPC server connection\n"); return 1; } if (rpc_add_np_marshalers(g_rpc_connection) < 0) { npw_printf("ERROR: failed to initialize plugin-side marshalers\n"); return 1; } static const rpc_method_descriptor_t vtable[] = { { RPC_METHOD_NP_GET_MIME_DESCRIPTION, handle_NP_GetMIMEDescription }, { RPC_METHOD_NP_GET_VALUE, handle_NP_GetValue }, { RPC_METHOD_NP_INITIALIZE, handle_NP_Initialize }, { RPC_METHOD_NP_SHUTDOWN, handle_NP_Shutdown }, { RPC_METHOD_NPP_NEW, handle_NPP_New }, { RPC_METHOD_NPP_DESTROY, handle_NPP_Destroy }, { RPC_METHOD_NPP_GET_VALUE, handle_NPP_GetValue }, { RPC_METHOD_NPP_SET_WINDOW, handle_NPP_SetWindow }, { RPC_METHOD_NPP_URL_NOTIFY, handle_NPP_URLNotify }, { RPC_METHOD_NPP_NEW_STREAM, handle_NPP_NewStream }, { RPC_METHOD_NPP_DESTROY_STREAM, handle_NPP_DestroyStream }, { RPC_METHOD_NPP_WRITE_READY, handle_NPP_WriteReady }, { RPC_METHOD_NPP_WRITE, handle_NPP_Write }, { RPC_METHOD_NPP_STREAM_AS_FILE, handle_NPP_StreamAsFile }, { RPC_METHOD_NPP_PRINT, handle_NPP_Print }, { RPC_METHOD_NPP_HANDLE_EVENT, handle_NPP_HandleEvent }, { RPC_METHOD_NPCLASS_INVALIDATE, npclass_handle_Invalidate }, { RPC_METHOD_NPCLASS_HAS_METHOD, npclass_handle_HasMethod }, { RPC_METHOD_NPCLASS_INVOKE, npclass_handle_Invoke }, { RPC_METHOD_NPCLASS_INVOKE_DEFAULT, npclass_handle_InvokeDefault }, { RPC_METHOD_NPCLASS_HAS_PROPERTY, npclass_handle_HasProperty }, { RPC_METHOD_NPCLASS_GET_PROPERTY, npclass_handle_GetProperty }, { RPC_METHOD_NPCLASS_SET_PROPERTY, npclass_handle_SetProperty }, { RPC_METHOD_NPCLASS_REMOVE_PROPERTY, npclass_handle_RemoveProperty }, }; if (rpc_connection_add_method_descriptors(g_rpc_connection, vtable, sizeof(vtable) / sizeof(vtable[0])) < 0) { npw_printf("ERROR: failed to setup NPP method callbacks\n"); return 1; } id_init(); // Initialize RPC events listener GSource *rpc_source = g_source_new(&rpc_event_funcs, sizeof(GSource)); if (rpc_source == NULL) { npw_printf("ERROR: failed to initialize plugin-side RPC events listener\n"); return 1; } g_source_set_priority(rpc_source, G_PRIORITY_LOW); g_source_attach(rpc_source, NULL); rpc_event_poll_fd.fd = rpc_listen_socket(g_rpc_connection); rpc_event_poll_fd.events = G_IO_IN; rpc_event_poll_fd.revents = 0; g_source_set_callback(rpc_source, (GSourceFunc)rpc_dispatch, g_rpc_connection, NULL); g_source_add_poll(rpc_source, &rpc_event_poll_fd); // Set error handler - stop plugin if there's a connection error rpc_connection_set_error_callback(g_rpc_connection, rpc_error_callback_cb, NULL); gtk_main(); D(bug("--- EXIT ---\n")); #if USE_NPIDENTIFIER_CACHE npidentifier_cache_destroy(); #endif g_source_destroy(rpc_source); if (xt_source) g_source_destroy(xt_source); if (g_user_agent) free(g_user_agent); if (g_rpc_connection) rpc_connection_unref(g_rpc_connection); id_kill(); return 0; } // Flash Player 9 beta 1 is not stable enough and will generally // freeze on NP_Shutdown when multiple Flash movies are active static int is_flash_player9_beta1(void) { const char *plugin_desc = NULL; if (g_NP_GetValue(NPPVpluginDescriptionString, &plugin_desc) == NPERR_NO_ERROR && plugin_desc && strcmp(plugin_desc, "Shockwave Flash 9.0 d55") == 0) { npw_printf("WARNING: Flash Player 9 beta 1 detected and rejected\n"); return 1; } return 0; } static int do_test(void) { if (g_plugin_NP_GetMIMEDescription == NULL) return 1; if (g_plugin_NP_Initialize == NULL) return 2; if (g_plugin_NP_Shutdown == NULL) return 3; if (is_flash_player9_beta1()) return 4; return 0; } static int do_info(void) { if (do_test() != 0) return 1; const char *plugin_name = NULL; if (g_NP_GetValue(NPPVpluginNameString, &plugin_name) == NPERR_NO_ERROR && plugin_name) printf("PLUGIN_NAME %zd\n%s\n", strlen(plugin_name), plugin_name); const char *plugin_desc = NULL; if (g_NP_GetValue(NPPVpluginDescriptionString, &plugin_desc) == NPERR_NO_ERROR && plugin_desc) printf("PLUGIN_DESC %zd\n%s\n", strlen(plugin_desc), plugin_desc); const char *mime_info = g_NP_GetMIMEDescription(); if (mime_info) printf("PLUGIN_MIME %zd\n%s\n", strlen(mime_info), mime_info); return 0; } static int do_help(const char *prog) { printf("%s, NPAPI plugin viewer. Version %s\n", NPW_VIEWER, NPW_VERSION); printf("\n"); printf("usage: %s [GTK flags] [flags]\n", prog); printf(" -h --help print this message\n"); printf(" -t --test check plugin is compatible\n"); printf(" -i --info print plugin information\n"); printf(" -p --plugin set plugin path\n"); printf(" -c --connection set connection path\n"); return 0; } int main(int argc, char **argv) { const char *plugin_path = NULL; const char *connection_path = NULL; enum { CMD_RUN, CMD_TEST, CMD_INFO, CMD_HELP }; int cmd = CMD_RUN; // Parse command line arguments for (int i = 0; i < argc; i++) { const char *arg = argv[i]; if (strcmp(arg, "-h") == 0 || strcmp(arg, "--help") == 0) { argv[i] = NULL; cmd = CMD_HELP; } else if (strcmp(arg, "-t") == 0 || strcmp(arg, "--test") == 0) { argv[i] = NULL; cmd = CMD_TEST; } else if (strcmp(arg, "-i") == 0 || strcmp(arg, "--info") == 0) { argv[i] = NULL; cmd = CMD_INFO; } else if (strcmp(arg, "-p") == 0 || strcmp(arg, "--plugin") == 0) { argv[i] = NULL; if (++i < argc) { plugin_path = argv[i]; argv[i] = NULL; } } else if (strcmp(arg, "-c") == 0 || strcmp(arg, "--connection") == 0) { argv[i] = NULL; if (++i < argc) { connection_path = argv[i]; argv[i] = NULL; } } } // Remove processed arguments for (int i = 1, j = 1, n = argc; i < n; i++) { if (argv[i]) argv[j++] = argv[i]; else --argc; } // Open plug-in and get exported lib functions void *handles[10] = { NULL, }; int n_handles = 0; if (plugin_path == NULL) cmd = CMD_HELP; else { void *handle; const char *error; #if defined(__sun) /* XXX: check for Flash Player only? */ const char SunStudio_libCrun[] = "libCrun.so.1"; D(bug(" trying to open SunStudio C++ runtime '%s'\n", SunStudio_libCrun)); if ((handle = dlopen(SunStudio_libCrun, RTLD_LAZY|RTLD_GLOBAL)) == NULL) { npw_printf("ERROR: %s\n", dlerror()); return 1; } handles[n_handles++] = handle; dlerror(); #endif D(bug(" %s\n", plugin_path)); if ((handle = dlopen(plugin_path, RTLD_LAZY)) == NULL) { npw_printf("ERROR: %s\n", dlerror()); return 1; } handles[n_handles++] = handle; dlerror(); g_plugin_NP_GetMIMEDescription = (NP_GetMIMEDescriptionUPP)dlsym(handle, "NP_GetMIMEDescription"); if ((error = dlerror()) != NULL) { npw_printf("ERROR: %s\n", error); return 1; } g_plugin_NP_Initialize = (NP_InitializeUPP)dlsym(handle, "NP_Initialize"); if ((error = dlerror()) != NULL) { npw_printf("ERROR: %s\n", error); return 1; } g_plugin_NP_Shutdown = (NP_ShutdownUPP)dlsym(handle, "NP_Shutdown"); if ((error = dlerror()) != NULL) { npw_printf("ERROR: %s\n", error); return 1; } g_plugin_NP_GetValue = (NP_GetValueUPP)dlsym(handle, "NP_GetValue"); } int ret = 1; switch (cmd) { case CMD_RUN: ret = do_main(argc, argv, connection_path); break; case CMD_TEST: ret = do_test(); break; case CMD_INFO: ret = do_info(); break; case CMD_HELP: ret = do_help(argv[0]); break; } while (--n_handles >= 0) { void * const handle = handles[n_handles]; if (handle) dlclose(handle); } return ret; }
./CrossVul/dataset_final_sorted/CWE-264/c/good_3470_1
crossvul-cpp_data_good_1492_0
/* * Flexible mmap layout support * * Based on code by Ingo Molnar and Andi Kleen, copyrighted * as follows: * * Copyright 2003-2009 Red Hat Inc. * All Rights Reserved. * Copyright 2005 Andi Kleen, SUSE Labs. * Copyright 2007 Jiri Kosina, SUSE Labs. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/personality.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/limits.h> #include <linux/sched.h> #include <asm/elf.h> struct va_alignment __read_mostly va_align = { .flags = -1, }; static unsigned long stack_maxrandom_size(void) { unsigned long max = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT; } return max; } /* * Top of mmap area (just below the process stack). * * Leave an at least ~128 MB hole with possible stack randomization. */ #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size()) #define MAX_GAP (TASK_SIZE/6*5) static int mmap_is_legacy(void) { if (current->personality & ADDR_COMPAT_LAYOUT) return 1; if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) return 1; return sysctl_legacy_va_layout; } static unsigned long mmap_rnd(void) { unsigned long rnd = 0; /* * 8 bits of randomness in 32bit mmaps, 20 address space bits * 28 bits of randomness in 64bit mmaps, 40 address space bits */ if (current->flags & PF_RANDOMIZE) { if (mmap_is_ia32()) rnd = get_random_int() % (1<<8); else rnd = get_random_int() % (1<<28); } return rnd << PAGE_SHIFT; } static unsigned long mmap_base(void) { unsigned long gap = rlimit(RLIMIT_STACK); if (gap < MIN_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); } /* * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 * does, but not when emulating X86_32 */ static unsigned long mmap_legacy_base(void) { if (mmap_is_ia32()) return TASK_UNMAPPED_BASE; else return TASK_UNMAPPED_BASE + mmap_rnd(); } /* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { mm->mmap_legacy_base = mmap_legacy_base(); mm->mmap_base = mmap_base(); if (mmap_is_legacy()) { mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->get_unmapped_area = arch_get_unmapped_area_topdown; } }
./CrossVul/dataset_final_sorted/CWE-264/c/good_1492_0
crossvul-cpp_data_good_3524_13
/* * Interface handling (except master interface) * * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <net/mac80211.h> #include <net/ieee80211_radiotap.h> #include "ieee80211_i.h" #include "sta_info.h" #include "debugfs_netdev.h" #include "mesh.h" #include "led.h" #include "driver-ops.h" #include "wme.h" #include "rate.h" /** * DOC: Interface list locking * * The interface list in each struct ieee80211_local is protected * three-fold: * * (1) modifications may only be done under the RTNL * (2) modifications and readers are protected against each other by * the iflist_mtx. * (3) modifications are done in an RCU manner so atomic readers * can traverse the list in RCU-safe blocks. * * As a consequence, reads (traversals) of the list can be protected * by either the RTNL, the iflist_mtx or RCU. */ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) { int meshhdrlen; struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0; /* FIX: what would be proper limits for MTU? * This interface uses 802.3 frames. */ if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { return -EINVAL; } #ifdef CONFIG_MAC80211_VERBOSE_DEBUG printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu); #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ dev->mtu = new_mtu; return 0; } static int ieee80211_change_mac(struct net_device *dev, void *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sockaddr *sa = addr; int ret; if (ieee80211_sdata_running(sdata)) return -EBUSY; ret = eth_mac_addr(dev, sa); if (ret == 0) memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); return ret; } static inline int identical_mac_addr_allowed(int type1, int type2) { return type1 == NL80211_IFTYPE_MONITOR || type2 == NL80211_IFTYPE_MONITOR || (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) || (type1 == NL80211_IFTYPE_WDS && (type2 == NL80211_IFTYPE_WDS || type2 == NL80211_IFTYPE_AP)) || (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) || (type1 == NL80211_IFTYPE_AP_VLAN && (type2 == NL80211_IFTYPE_AP || type2 == NL80211_IFTYPE_AP_VLAN)); } static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype iftype) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *nsdata; struct net_device *dev = sdata->dev; ASSERT_RTNL(); /* we hold the RTNL here so can safely walk the list */ list_for_each_entry(nsdata, &local->interfaces, list) { struct net_device *ndev = nsdata->dev; if (ndev != dev && ieee80211_sdata_running(nsdata)) { /* * Allow only a single IBSS interface to be up at any * time. This is restricted because beacon distribution * cannot work properly if both are in the same IBSS. * * To remove this restriction we'd have to disallow them * from setting the same SSID on different IBSS interfaces * belonging to the same hardware. Then, however, we're * faced with having to adopt two different TSF timers... */ if (iftype == NL80211_IFTYPE_ADHOC && nsdata->vif.type == NL80211_IFTYPE_ADHOC) return -EBUSY; /* * The remaining checks are only performed for interfaces * with the same MAC address. */ if (compare_ether_addr(dev->dev_addr, ndev->dev_addr)) continue; /* * check whether it may have the same address */ if (!identical_mac_addr_allowed(iftype, nsdata->vif.type)) return -ENOTUNIQ; /* * can only add VLANs to enabled APs */ if (iftype == NL80211_IFTYPE_AP_VLAN && nsdata->vif.type == NL80211_IFTYPE_AP) sdata->bss = &nsdata->u.ap; } } return 0; } void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, const int offset) { struct ieee80211_local *local = sdata->local; u32 flags = sdata->u.mntr_flags; #define ADJUST(_f, _s) do { \ if (flags & MONITOR_FLAG_##_f) \ local->fif_##_s += offset; \ } while (0) ADJUST(FCSFAIL, fcsfail); ADJUST(PLCPFAIL, plcpfail); ADJUST(CONTROL, control); ADJUST(CONTROL, pspoll); ADJUST(OTHER_BSS, other_bss); #undef ADJUST } /* * NOTE: Be very careful when changing this function, it must NOT return * an error on interface type changes that have been pre-checked, so most * checks should be in ieee80211_check_concurrent_iface. */ static int ieee80211_do_open(struct net_device *dev, bool coming_up) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; u32 changed = 0; int res; u32 hw_reconf_flags = 0; switch (sdata->vif.type) { case NL80211_IFTYPE_WDS: if (!is_valid_ether_addr(sdata->u.wds.remote_addr)) return -ENOLINK; break; case NL80211_IFTYPE_AP_VLAN: if (!sdata->bss) return -ENOLINK; list_add(&sdata->u.vlan.list, &sdata->bss->vlans); break; case NL80211_IFTYPE_AP: sdata->bss = &sdata->u.ap; break; case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_ADHOC: /* no special treatment */ break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: /* cannot happen */ WARN_ON(1); break; } if (local->open_count == 0) { res = drv_start(local); if (res) goto err_del_bss; if (local->ops->napi_poll) napi_enable(&local->napi); /* we're brought up, everything changes */ hw_reconf_flags = ~0; ieee80211_led_radio(local, true); ieee80211_mod_tpt_led_trig(local, IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); } /* * Copy the hopefully now-present MAC address to * this interface, if it has the special null one. */ if (is_zero_ether_addr(dev->dev_addr)) { memcpy(dev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); if (!is_valid_ether_addr(dev->dev_addr)) { if (!local->open_count) drv_stop(local); return -EADDRNOTAVAIL; } } switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: /* no need to tell driver */ break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { local->cooked_mntrs++; break; } /* must be before the call to ieee80211_configure_filter */ local->monitors++; if (local->monitors == 1) { local->hw.conf.flags |= IEEE80211_CONF_MONITOR; hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; } ieee80211_adjust_monitor_flags(sdata, 1); ieee80211_configure_filter(local); netif_carrier_on(dev); break; default: if (coming_up) { res = drv_add_interface(local, &sdata->vif); if (res) goto err_stop; } if (sdata->vif.type == NL80211_IFTYPE_AP) { local->fif_pspoll++; local->fif_probe_req++; ieee80211_configure_filter(local); } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { local->fif_probe_req++; } changed |= ieee80211_reset_erp_info(sdata); ieee80211_bss_info_change_notify(sdata, changed); if (sdata->vif.type == NL80211_IFTYPE_STATION) netif_carrier_off(dev); else netif_carrier_on(dev); } set_bit(SDATA_STATE_RUNNING, &sdata->state); if (sdata->vif.type == NL80211_IFTYPE_WDS) { /* Create STA entry for the WDS peer */ sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr, GFP_KERNEL); if (!sta) { res = -ENOMEM; goto err_del_interface; } /* no locking required since STA is not live yet */ sta->flags |= WLAN_STA_AUTHORIZED; res = sta_info_insert(sta); if (res) { /* STA has been freed */ goto err_del_interface; } rate_control_rate_init(sta); } /* * set_multicast_list will be invoked by the networking core * which will check whether any increments here were done in * error and sync them down to the hardware as filter flags. */ if (sdata->flags & IEEE80211_SDATA_ALLMULTI) atomic_inc(&local->iff_allmultis); if (sdata->flags & IEEE80211_SDATA_PROMISC) atomic_inc(&local->iff_promiscs); mutex_lock(&local->mtx); hw_reconf_flags |= __ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); if (coming_up) local->open_count++; if (hw_reconf_flags) { ieee80211_hw_config(local, hw_reconf_flags); /* * set default queue parameters so drivers don't * need to initialise the hardware if the hardware * doesn't start up with sane defaults */ ieee80211_set_wmm_default(sdata); } ieee80211_recalc_ps(local, -1); netif_tx_start_all_queues(dev); return 0; err_del_interface: drv_remove_interface(local, &sdata->vif); err_stop: if (!local->open_count) drv_stop(local); err_del_bss: sdata->bss = NULL; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) list_del(&sdata->u.vlan.list); clear_bit(SDATA_STATE_RUNNING, &sdata->state); return res; } static int ieee80211_open(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int err; /* fail early if user set an invalid address */ if (!is_valid_ether_addr(dev->dev_addr)) return -EADDRNOTAVAIL; err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type); if (err) return err; return ieee80211_do_open(dev, true); } static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down) { struct ieee80211_local *local = sdata->local; unsigned long flags; struct sk_buff *skb, *tmp; u32 hw_reconf_flags = 0; int i; enum nl80211_channel_type orig_ct; clear_bit(SDATA_STATE_RUNNING, &sdata->state); if (local->scan_sdata == sdata) ieee80211_scan_cancel(local); /* * Stop TX on this interface first. */ netif_tx_stop_all_queues(sdata->dev); /* * Purge work for this interface. */ ieee80211_work_purge(sdata); /* * Remove all stations associated with this interface. * * This must be done before calling ops->remove_interface() * because otherwise we can later invoke ops->sta_notify() * whenever the STAs are removed, and that invalidates driver * assumptions about always getting a vif pointer that is valid * (because if we remove a STA after ops->remove_interface() * the driver will have removed the vif info already!) * * This is relevant only in AP, WDS and mesh modes, since in * all other modes we've already removed all stations when * disconnecting etc. */ sta_info_flush(local, sdata); /* * Don't count this interface for promisc/allmulti while it * is down. dev_mc_unsync() will invoke set_multicast_list * on the master interface which will sync these down to the * hardware as filter flags. */ if (sdata->flags & IEEE80211_SDATA_ALLMULTI) atomic_dec(&local->iff_allmultis); if (sdata->flags & IEEE80211_SDATA_PROMISC) atomic_dec(&local->iff_promiscs); if (sdata->vif.type == NL80211_IFTYPE_AP) { local->fif_pspoll--; local->fif_probe_req--; } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { local->fif_probe_req--; } netif_addr_lock_bh(sdata->dev); spin_lock_bh(&local->filter_lock); __hw_addr_unsync(&local->mc_list, &sdata->dev->mc, sdata->dev->addr_len); spin_unlock_bh(&local->filter_lock); netif_addr_unlock_bh(sdata->dev); ieee80211_configure_filter(local); del_timer_sync(&local->dynamic_ps_timer); cancel_work_sync(&local->dynamic_ps_enable_work); /* APs need special treatment */ if (sdata->vif.type == NL80211_IFTYPE_AP) { struct ieee80211_sub_if_data *vlan, *tmpsdata; struct beacon_data *old_beacon = rtnl_dereference(sdata->u.ap.beacon); /* sdata_running will return false, so this will disable */ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); /* remove beacon */ rcu_assign_pointer(sdata->u.ap.beacon, NULL); synchronize_rcu(); kfree(old_beacon); /* free all potentially still buffered bcast frames */ while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { local->total_ps_buffered--; dev_kfree_skb(skb); } /* down all dependent devices, that is VLANs */ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, u.vlan.list) dev_close(vlan->dev); WARN_ON(!list_empty(&sdata->u.ap.vlans)); } if (going_down) local->open_count--; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: list_del(&sdata->u.vlan.list); /* no need to tell driver */ break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { local->cooked_mntrs--; break; } local->monitors--; if (local->monitors == 0) { local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; } ieee80211_adjust_monitor_flags(sdata, -1); ieee80211_configure_filter(local); break; default: flush_work(&sdata->work); /* * When we get here, the interface is marked down. * Call synchronize_rcu() to wait for the RX path * should it be using the interface and enqueuing * frames at this very time on another CPU. */ synchronize_rcu(); skb_queue_purge(&sdata->skb_queue); /* * Disable beaconing here for mesh only, AP and IBSS * are already taken care of. */ if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); /* * Free all remaining keys, there shouldn't be any, * except maybe group keys in AP more or WDS? */ ieee80211_free_keys(sdata); if (going_down) drv_remove_interface(local, &sdata->vif); } sdata->bss = NULL; mutex_lock(&local->mtx); hw_reconf_flags |= __ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); ieee80211_recalc_ps(local, -1); if (local->open_count == 0) { if (local->ops->napi_poll) napi_disable(&local->napi); ieee80211_clear_tx_pending(local); ieee80211_stop_device(local); /* no reconfiguring after stop! */ hw_reconf_flags = 0; } /* Re-calculate channel-type, in case there are multiple vifs * on different channel types. */ orig_ct = local->_oper_channel_type; ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT); /* do after stop to avoid reconfiguring when we stop anyway */ if (hw_reconf_flags || (orig_ct != local->_oper_channel_type)) ieee80211_hw_config(local, hw_reconf_flags); spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_walk_safe(&local->pending[i], skb, tmp) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (info->control.vif == &sdata->vif) { __skb_unlink(skb, &local->pending[i]); dev_kfree_skb_irq(skb); } } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } static int ieee80211_stop(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_do_stop(sdata, true); return 0; } static void ieee80211_set_multicast_list(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int allmulti, promisc, sdata_allmulti, sdata_promisc; allmulti = !!(dev->flags & IFF_ALLMULTI); promisc = !!(dev->flags & IFF_PROMISC); sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI); sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC); if (allmulti != sdata_allmulti) { if (dev->flags & IFF_ALLMULTI) atomic_inc(&local->iff_allmultis); else atomic_dec(&local->iff_allmultis); sdata->flags ^= IEEE80211_SDATA_ALLMULTI; } if (promisc != sdata_promisc) { if (dev->flags & IFF_PROMISC) atomic_inc(&local->iff_promiscs); else atomic_dec(&local->iff_promiscs); sdata->flags ^= IEEE80211_SDATA_PROMISC; } spin_lock_bh(&local->filter_lock); __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); spin_unlock_bh(&local->filter_lock); ieee80211_queue_work(&local->hw, &local->reconfig_filter); } /* * Called when the netdev is removed or, by the code below, before * the interface type changes. */ static void ieee80211_teardown_sdata(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int flushed; int i; /* free extra data */ ieee80211_free_keys(sdata); ieee80211_debugfs_remove_netdev(sdata); for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) __skb_queue_purge(&sdata->fragments[i].skb_list); sdata->fragment_next = 0; if (ieee80211_vif_is_mesh(&sdata->vif)) mesh_rmc_free(sdata); flushed = sta_info_flush(local, sdata); WARN_ON(flushed); } static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb) { return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); } static const struct net_device_ops ieee80211_dataif_ops = { .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, .ndo_uninit = ieee80211_teardown_sdata, .ndo_start_xmit = ieee80211_subif_start_xmit, .ndo_set_multicast_list = ieee80211_set_multicast_list, .ndo_change_mtu = ieee80211_change_mtu, .ndo_set_mac_address = ieee80211_change_mac, .ndo_select_queue = ieee80211_netdev_select_queue, }; static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *hdr; struct ieee80211_radiotap_header *rtap = (void *)skb->data; u8 *p; if (local->hw.queues < 4) return 0; if (skb->len < 4 || skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) return 0; /* doesn't matter, frame will be dropped */ hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); if (!ieee80211_is_data(hdr->frame_control)) { skb->priority = 7; return ieee802_1d_to_ac[skb->priority]; } if (!ieee80211_is_data_qos(hdr->frame_control)) { skb->priority = 0; return ieee802_1d_to_ac[skb->priority]; } p = ieee80211_get_qos_ctl(hdr); skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; return ieee80211_downgrade_queue(local, skb); } static const struct net_device_ops ieee80211_monitorif_ops = { .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, .ndo_uninit = ieee80211_teardown_sdata, .ndo_start_xmit = ieee80211_monitor_start_xmit, .ndo_set_multicast_list = ieee80211_set_multicast_list, .ndo_change_mtu = ieee80211_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_select_queue = ieee80211_monitor_select_queue, }; static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; } static void ieee80211_iface_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, work); struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct sta_info *sta; struct ieee80211_ra_tid *ra_tid; if (!ieee80211_sdata_running(sdata)) return; if (local->scanning) return; /* * ieee80211_queue_work() should have picked up most cases, * here we'll pick the rest. */ if (WARN(local->suspended, "interface work scheduled while going to suspend\n")) return; /* first process frames */ while ((skb = skb_dequeue(&sdata->skb_queue))) { struct ieee80211_mgmt *mgmt = (void *)skb->data; if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { ra_tid = (void *)&skb->cb; ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra, ra_tid->tid); } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) { ra_tid = (void *)&skb->cb; ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra, ra_tid->tid); } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_BACK) { int len = skb->len; mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) { switch (mgmt->u.action.u.addba_req.action_code) { case WLAN_ACTION_ADDBA_REQ: ieee80211_process_addba_request( local, sta, mgmt, len); break; case WLAN_ACTION_ADDBA_RESP: ieee80211_process_addba_resp(local, sta, mgmt, len); break; case WLAN_ACTION_DELBA: ieee80211_process_delba(sdata, sta, mgmt, len); break; default: WARN_ON(1); break; } } mutex_unlock(&local->sta_mtx); } else if (ieee80211_is_data_qos(mgmt->frame_control)) { struct ieee80211_hdr *hdr = (void *)mgmt; /* * So the frame isn't mgmt, but frame_control * is at the right place anyway, of course, so * the if statement is correct. * * Warn if we have other data frame types here, * they must not get here. */ WARN_ON(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)); WARN_ON(!(hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG))); /* * This was a fragment of a frame, received while * a block-ack session was active. That cannot be * right, so terminate the session. */ mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) { u16 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; __ieee80211_stop_rx_ba_session( sta, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_REQUIRE_SETUP, true); } mutex_unlock(&local->sta_mtx); } else switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_rx_queued_mgmt(sdata, skb); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_rx_queued_mgmt(sdata, skb); break; case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_vif_is_mesh(&sdata->vif)) break; ieee80211_mesh_rx_queued_mgmt(sdata, skb); break; default: WARN(1, "frame for unexpected interface type"); break; } kfree_skb(skb); } /* then other type-dependent work */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_work(sdata); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_work(sdata); break; case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_vif_is_mesh(&sdata->vif)) break; ieee80211_mesh_work(sdata); break; default: break; } } /* * Helper function to initialise an interface to a specific type. */ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { /* clear type-dependent union */ memset(&sdata->u, 0, sizeof(sdata->u)); /* and set some type-dependent values */ sdata->vif.type = type; sdata->vif.p2p = false; sdata->dev->netdev_ops = &ieee80211_dataif_ops; sdata->wdev.iftype = type; sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); sdata->control_port_no_encrypt = false; /* only monitor differs */ sdata->dev->type = ARPHRD_ETHER; skb_queue_head_init(&sdata->skb_queue); INIT_WORK(&sdata->work, ieee80211_iface_work); switch (type) { case NL80211_IFTYPE_P2P_GO: type = NL80211_IFTYPE_AP; sdata->vif.type = type; sdata->vif.p2p = true; /* fall through */ case NL80211_IFTYPE_AP: skb_queue_head_init(&sdata->u.ap.ps_bc_buf); INIT_LIST_HEAD(&sdata->u.ap.vlans); break; case NL80211_IFTYPE_P2P_CLIENT: type = NL80211_IFTYPE_STATION; sdata->vif.type = type; sdata->vif.p2p = true; /* fall through */ case NL80211_IFTYPE_STATION: ieee80211_sta_setup_sdata(sdata); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_setup_sdata(sdata); break; case NL80211_IFTYPE_MESH_POINT: if (ieee80211_vif_is_mesh(&sdata->vif)) ieee80211_mesh_init_sdata(sdata); break; case NL80211_IFTYPE_MONITOR: sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; sdata->dev->netdev_ops = &ieee80211_monitorif_ops; sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | MONITOR_FLAG_OTHER_BSS; break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_AP_VLAN: break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: BUG(); break; } ieee80211_debugfs_add_netdev(sdata); } static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { struct ieee80211_local *local = sdata->local; int ret, err; enum nl80211_iftype internal_type = type; bool p2p = false; ASSERT_RTNL(); if (!local->ops->change_interface) return -EBUSY; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: /* * Could maybe also all others here? * Just not sure how that interacts * with the RX/config path e.g. for * mesh. */ break; default: return -EBUSY; } switch (type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: /* * Could probably support everything * but WDS here (WDS do_open can fail * under memory pressure, which this * code isn't prepared to handle). */ break; case NL80211_IFTYPE_P2P_CLIENT: p2p = true; internal_type = NL80211_IFTYPE_STATION; break; case NL80211_IFTYPE_P2P_GO: p2p = true; internal_type = NL80211_IFTYPE_AP; break; default: return -EBUSY; } ret = ieee80211_check_concurrent_iface(sdata, internal_type); if (ret) return ret; ieee80211_do_stop(sdata, false); ieee80211_teardown_sdata(sdata->dev); ret = drv_change_interface(local, sdata, internal_type, p2p); if (ret) type = sdata->vif.type; ieee80211_setup_sdata(sdata, type); err = ieee80211_do_open(sdata->dev, false); WARN(err, "type change: do_open returned %d", err); return ret; } int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { int ret; ASSERT_RTNL(); if (type == ieee80211_vif_type_p2p(&sdata->vif)) return 0; /* Setting ad-hoc mode on non-IBSS channel is not supported. */ if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS && type == NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; if (ieee80211_sdata_running(sdata)) { ret = ieee80211_runtime_change_iftype(sdata, type); if (ret) return ret; } else { /* Purge and reset type-dependent state. */ ieee80211_teardown_sdata(sdata->dev); ieee80211_setup_sdata(sdata, type); } /* reset some values that shouldn't be kept across type changes */ sdata->vif.bss_conf.basic_rates = ieee80211_mandatory_rates(sdata->local, sdata->local->hw.conf.channel->band); sdata->drop_unencrypted = 0; if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = false; return 0; } static void ieee80211_assign_perm_addr(struct ieee80211_local *local, struct net_device *dev, enum nl80211_iftype type) { struct ieee80211_sub_if_data *sdata; u64 mask, start, addr, val, inc; u8 *m; u8 tmp_addr[ETH_ALEN]; int i; /* default ... something at least */ memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN); if (is_zero_ether_addr(local->hw.wiphy->addr_mask) && local->hw.wiphy->n_addresses <= 1) return; mutex_lock(&local->iflist_mtx); switch (type) { case NL80211_IFTYPE_MONITOR: /* doesn't matter */ break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_AP_VLAN: /* match up with an AP interface */ list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_AP) continue; memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN); break; } /* keep default if no AP interface present */ break; default: /* assign a new address if possible -- try n_addresses first */ for (i = 0; i < local->hw.wiphy->n_addresses; i++) { bool used = false; list_for_each_entry(sdata, &local->interfaces, list) { if (memcmp(local->hw.wiphy->addresses[i].addr, sdata->vif.addr, ETH_ALEN) == 0) { used = true; break; } } if (!used) { memcpy(dev->perm_addr, local->hw.wiphy->addresses[i].addr, ETH_ALEN); break; } } /* try mask if available */ if (is_zero_ether_addr(local->hw.wiphy->addr_mask)) break; m = local->hw.wiphy->addr_mask; mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); if (__ffs64(mask) + hweight64(mask) != fls64(mask)) { /* not a contiguous mask ... not handled now! */ printk(KERN_DEBUG "not contiguous\n"); break; } m = local->hw.wiphy->perm_addr; start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); inc = 1ULL<<__ffs64(mask); val = (start & mask); addr = (start & ~mask) | (val & mask); do { bool used = false; tmp_addr[5] = addr >> 0*8; tmp_addr[4] = addr >> 1*8; tmp_addr[3] = addr >> 2*8; tmp_addr[2] = addr >> 3*8; tmp_addr[1] = addr >> 4*8; tmp_addr[0] = addr >> 5*8; val += inc; list_for_each_entry(sdata, &local->interfaces, list) { if (memcmp(tmp_addr, sdata->vif.addr, ETH_ALEN) == 0) { used = true; break; } } if (!used) { memcpy(dev->perm_addr, tmp_addr, ETH_ALEN); break; } addr = (start & ~mask) | (val & mask); } while (addr != start); break; } mutex_unlock(&local->iflist_mtx); } int ieee80211_if_add(struct ieee80211_local *local, const char *name, struct net_device **new_dev, enum nl80211_iftype type, struct vif_params *params) { struct net_device *ndev; struct ieee80211_sub_if_data *sdata = NULL; int ret, i; ASSERT_RTNL(); ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size, name, ieee80211_if_setup, local->hw.queues, 1); if (!ndev) return -ENOMEM; dev_net_set(ndev, wiphy_net(local->hw.wiphy)); ndev->needed_headroom = local->tx_headroom + 4*6 /* four MAC addresses */ + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ + 6 /* mesh */ + 8 /* rfc1042/bridge tunnel */ - ETH_HLEN /* ethernet hard_header_len */ + IEEE80211_ENCRYPT_HEADROOM; ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; ret = dev_alloc_name(ndev, ndev->name); if (ret < 0) goto fail; ieee80211_assign_perm_addr(local, ndev, type); memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ sdata = netdev_priv(ndev); ndev->ieee80211_ptr = &sdata->wdev; memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN); memcpy(sdata->name, ndev->name, IFNAMSIZ); /* initialise type-independent data */ sdata->wdev.wiphy = local->hw.wiphy; sdata->local = local; sdata->dev = ndev; #ifdef CONFIG_INET sdata->arp_filter_state = true; #endif for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) skb_queue_head_init(&sdata->fragments[i].skb_list); INIT_LIST_HEAD(&sdata->key_list); for (i = 0; i < IEEE80211_NUM_BANDS; i++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[i]; sdata->rc_rateidx_mask[i] = sband ? (1 << sband->n_bitrates) - 1 : 0; } /* setup type-dependent data */ ieee80211_setup_sdata(sdata, type); if (params) { ndev->ieee80211_ptr->use_4addr = params->use_4addr; if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = params->use_4addr; } ret = register_netdevice(ndev); if (ret) goto fail; mutex_lock(&local->iflist_mtx); list_add_tail_rcu(&sdata->list, &local->interfaces); mutex_unlock(&local->iflist_mtx); if (new_dev) *new_dev = ndev; return 0; fail: free_netdev(ndev); return ret; } void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) { ASSERT_RTNL(); mutex_lock(&sdata->local->iflist_mtx); list_del_rcu(&sdata->list); mutex_unlock(&sdata->local->iflist_mtx); synchronize_rcu(); unregister_netdevice(sdata->dev); } /* * Remove all interfaces, may only be called at hardware unregistration * time because it doesn't do RCU-safe list removals. */ void ieee80211_remove_interfaces(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *tmp; LIST_HEAD(unreg_list); ASSERT_RTNL(); mutex_lock(&local->iflist_mtx); list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { list_del(&sdata->list); unregister_netdevice_queue(sdata->dev, &unreg_list); } mutex_unlock(&local->iflist_mtx); unregister_netdevice_many(&unreg_list); list_del(&unreg_list); } static u32 ieee80211_idle_off(struct ieee80211_local *local, const char *reason) { if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) return 0; #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason); #endif local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; return IEEE80211_CONF_CHANGE_IDLE; } static u32 ieee80211_idle_on(struct ieee80211_local *local) { if (local->hw.conf.flags & IEEE80211_CONF_IDLE) return 0; #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "device now idle\n"); #endif drv_flush(local, false); local->hw.conf.flags |= IEEE80211_CONF_IDLE; return IEEE80211_CONF_CHANGE_IDLE; } u32 __ieee80211_recalc_idle(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; int count = 0; bool working = false, scanning = false, hw_roc = false; struct ieee80211_work *wk; unsigned int led_trig_start = 0, led_trig_stop = 0; #ifdef CONFIG_PROVE_LOCKING WARN_ON(debug_locks && !lockdep_rtnl_is_held() && !lockdep_is_held(&local->iflist_mtx)); #endif lockdep_assert_held(&local->mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) { sdata->vif.bss_conf.idle = true; continue; } sdata->old_idle = sdata->vif.bss_conf.idle; /* do not count disabled managed interfaces */ if (sdata->vif.type == NL80211_IFTYPE_STATION && !sdata->u.mgd.associated) { sdata->vif.bss_conf.idle = true; continue; } /* do not count unused IBSS interfaces */ if (sdata->vif.type == NL80211_IFTYPE_ADHOC && !sdata->u.ibss.ssid_len) { sdata->vif.bss_conf.idle = true; continue; } /* count everything else */ count++; } list_for_each_entry(wk, &local->work_list, list) { working = true; wk->sdata->vif.bss_conf.idle = false; } if (local->scan_sdata) { scanning = true; local->scan_sdata->vif.bss_conf.idle = false; } if (local->hw_roc_channel) hw_roc = true; list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->old_idle == sdata->vif.bss_conf.idle) continue; if (!ieee80211_sdata_running(sdata)) continue; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE); } if (working || scanning || hw_roc) led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK; else led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK; if (count) led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; else led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); if (hw_roc) return ieee80211_idle_off(local, "hw remain-on-channel"); if (working) return ieee80211_idle_off(local, "working"); if (scanning) return ieee80211_idle_off(local, "scanning"); if (!count) return ieee80211_idle_on(local); else return ieee80211_idle_off(local, "in use"); return 0; } void ieee80211_recalc_idle(struct ieee80211_local *local) { u32 chg; mutex_lock(&local->iflist_mtx); chg = __ieee80211_recalc_idle(local); mutex_unlock(&local->iflist_mtx); if (chg) ieee80211_hw_config(local, chg); } static int netdev_notify(struct notifier_block *nb, unsigned long state, void *ndev) { struct net_device *dev = ndev; struct ieee80211_sub_if_data *sdata; if (state != NETDEV_CHANGENAME) return 0; if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) return 0; if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) return 0; sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(sdata->name, dev->name, IFNAMSIZ); ieee80211_debugfs_rename_netdev(sdata); return 0; } static struct notifier_block mac80211_netdev_notifier = { .notifier_call = netdev_notify, }; int ieee80211_iface_init(void) { return register_netdevice_notifier(&mac80211_netdev_notifier); } void ieee80211_iface_exit(void) { unregister_netdevice_notifier(&mac80211_netdev_notifier); }
./CrossVul/dataset_final_sorted/CWE-264/c/good_3524_13
crossvul-cpp_data_good_5619_0
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/export.h> #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/user_namespace.h> #include <linux/proc_fs.h> #include <linux/highuid.h> #include <linux/cred.h> #include <linux/securebits.h> #include <linux/keyctl.h> #include <linux/key-type.h> #include <keys/user-type.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/ctype.h> #include <linux/projid.h> #include <linux/fs_struct.h> static struct kmem_cache *user_ns_cachep __read_mostly; static bool new_idmap_permitted(const struct file *file, struct user_namespace *ns, int cap_setid, struct uid_gid_map *map); static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns) { /* Start with the same capabilities as init but useless for doing * anything as the capabilities are bound to the new user namespace. */ cred->securebits = SECUREBITS_DEFAULT; cred->cap_inheritable = CAP_EMPTY_SET; cred->cap_permitted = CAP_FULL_SET; cred->cap_effective = CAP_FULL_SET; cred->cap_bset = CAP_FULL_SET; #ifdef CONFIG_KEYS key_put(cred->request_key_auth); cred->request_key_auth = NULL; #endif /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */ cred->user_ns = user_ns; } /* * Create a new user namespace, deriving the creator from the user in the * passed credentials, and replacing that user with the new root user for the * new namespace. * * This is called by copy_creds(), which will finish setting the target task's * credentials. */ int create_user_ns(struct cred *new) { struct user_namespace *ns, *parent_ns = new->user_ns; kuid_t owner = new->euid; kgid_t group = new->egid; int ret; /* * Verify that we can not violate the policy of which files * may be accessed that is specified by the root directory, * by verifing that the root directory is at the root of the * mount namespace which allows all files to be accessed. */ if (current_chrooted()) return -EPERM; /* The creator needs a mapping in the parent user namespace * or else we won't be able to reasonably tell userspace who * created a user_namespace. */ if (!kuid_has_mapping(parent_ns, owner) || !kgid_has_mapping(parent_ns, group)) return -EPERM; ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL); if (!ns) return -ENOMEM; ret = proc_alloc_inum(&ns->proc_inum); if (ret) { kmem_cache_free(user_ns_cachep, ns); return ret; } atomic_set(&ns->count, 1); /* Leave the new->user_ns reference with the new user namespace. */ ns->parent = parent_ns; ns->owner = owner; ns->group = group; set_cred_user_ns(new, ns); update_mnt_policy(ns); return 0; } int unshare_userns(unsigned long unshare_flags, struct cred **new_cred) { struct cred *cred; if (!(unshare_flags & CLONE_NEWUSER)) return 0; cred = prepare_creds(); if (!cred) return -ENOMEM; *new_cred = cred; return create_user_ns(cred); } void free_user_ns(struct user_namespace *ns) { struct user_namespace *parent; do { parent = ns->parent; proc_free_inum(ns->proc_inum); kmem_cache_free(user_ns_cachep, ns); ns = parent; } while (atomic_dec_and_test(&parent->count)); } EXPORT_SYMBOL(free_user_ns); static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) { unsigned idx, extents; u32 first, last, id2; id2 = id + count - 1; /* Find the matching extent */ extents = map->nr_extents; smp_read_barrier_depends(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].first; last = first + map->extent[idx].count - 1; if (id >= first && id <= last && (id2 >= first && id2 <= last)) break; } /* Map the id or note failure */ if (idx < extents) id = (id - first) + map->extent[idx].lower_first; else id = (u32) -1; return id; } static u32 map_id_down(struct uid_gid_map *map, u32 id) { unsigned idx, extents; u32 first, last; /* Find the matching extent */ extents = map->nr_extents; smp_read_barrier_depends(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].first; last = first + map->extent[idx].count - 1; if (id >= first && id <= last) break; } /* Map the id or note failure */ if (idx < extents) id = (id - first) + map->extent[idx].lower_first; else id = (u32) -1; return id; } static u32 map_id_up(struct uid_gid_map *map, u32 id) { unsigned idx, extents; u32 first, last; /* Find the matching extent */ extents = map->nr_extents; smp_read_barrier_depends(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].lower_first; last = first + map->extent[idx].count - 1; if (id >= first && id <= last) break; } /* Map the id or note failure */ if (idx < extents) id = (id - first) + map->extent[idx].first; else id = (u32) -1; return id; } /** * make_kuid - Map a user-namespace uid pair into a kuid. * @ns: User namespace that the uid is in * @uid: User identifier * * Maps a user-namespace uid pair into a kernel internal kuid, * and returns that kuid. * * When there is no mapping defined for the user-namespace uid * pair INVALID_UID is returned. Callers are expected to test * for and handle handle INVALID_UID being returned. INVALID_UID * may be tested for using uid_valid(). */ kuid_t make_kuid(struct user_namespace *ns, uid_t uid) { /* Map the uid to a global kernel uid */ return KUIDT_INIT(map_id_down(&ns->uid_map, uid)); } EXPORT_SYMBOL(make_kuid); /** * from_kuid - Create a uid from a kuid user-namespace pair. * @targ: The user namespace we want a uid in. * @kuid: The kernel internal uid to start with. * * Map @kuid into the user-namespace specified by @targ and * return the resulting uid. * * There is always a mapping into the initial user_namespace. * * If @kuid has no mapping in @targ (uid_t)-1 is returned. */ uid_t from_kuid(struct user_namespace *targ, kuid_t kuid) { /* Map the uid from a global kernel uid */ return map_id_up(&targ->uid_map, __kuid_val(kuid)); } EXPORT_SYMBOL(from_kuid); /** * from_kuid_munged - Create a uid from a kuid user-namespace pair. * @targ: The user namespace we want a uid in. * @kuid: The kernel internal uid to start with. * * Map @kuid into the user-namespace specified by @targ and * return the resulting uid. * * There is always a mapping into the initial user_namespace. * * Unlike from_kuid from_kuid_munged never fails and always * returns a valid uid. This makes from_kuid_munged appropriate * for use in syscalls like stat and getuid where failing the * system call and failing to provide a valid uid are not an * options. * * If @kuid has no mapping in @targ overflowuid is returned. */ uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid) { uid_t uid; uid = from_kuid(targ, kuid); if (uid == (uid_t) -1) uid = overflowuid; return uid; } EXPORT_SYMBOL(from_kuid_munged); /** * make_kgid - Map a user-namespace gid pair into a kgid. * @ns: User namespace that the gid is in * @uid: group identifier * * Maps a user-namespace gid pair into a kernel internal kgid, * and returns that kgid. * * When there is no mapping defined for the user-namespace gid * pair INVALID_GID is returned. Callers are expected to test * for and handle INVALID_GID being returned. INVALID_GID may be * tested for using gid_valid(). */ kgid_t make_kgid(struct user_namespace *ns, gid_t gid) { /* Map the gid to a global kernel gid */ return KGIDT_INIT(map_id_down(&ns->gid_map, gid)); } EXPORT_SYMBOL(make_kgid); /** * from_kgid - Create a gid from a kgid user-namespace pair. * @targ: The user namespace we want a gid in. * @kgid: The kernel internal gid to start with. * * Map @kgid into the user-namespace specified by @targ and * return the resulting gid. * * There is always a mapping into the initial user_namespace. * * If @kgid has no mapping in @targ (gid_t)-1 is returned. */ gid_t from_kgid(struct user_namespace *targ, kgid_t kgid) { /* Map the gid from a global kernel gid */ return map_id_up(&targ->gid_map, __kgid_val(kgid)); } EXPORT_SYMBOL(from_kgid); /** * from_kgid_munged - Create a gid from a kgid user-namespace pair. * @targ: The user namespace we want a gid in. * @kgid: The kernel internal gid to start with. * * Map @kgid into the user-namespace specified by @targ and * return the resulting gid. * * There is always a mapping into the initial user_namespace. * * Unlike from_kgid from_kgid_munged never fails and always * returns a valid gid. This makes from_kgid_munged appropriate * for use in syscalls like stat and getgid where failing the * system call and failing to provide a valid gid are not options. * * If @kgid has no mapping in @targ overflowgid is returned. */ gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid) { gid_t gid; gid = from_kgid(targ, kgid); if (gid == (gid_t) -1) gid = overflowgid; return gid; } EXPORT_SYMBOL(from_kgid_munged); /** * make_kprojid - Map a user-namespace projid pair into a kprojid. * @ns: User namespace that the projid is in * @projid: Project identifier * * Maps a user-namespace uid pair into a kernel internal kuid, * and returns that kuid. * * When there is no mapping defined for the user-namespace projid * pair INVALID_PROJID is returned. Callers are expected to test * for and handle handle INVALID_PROJID being returned. INVALID_PROJID * may be tested for using projid_valid(). */ kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid) { /* Map the uid to a global kernel uid */ return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid)); } EXPORT_SYMBOL(make_kprojid); /** * from_kprojid - Create a projid from a kprojid user-namespace pair. * @targ: The user namespace we want a projid in. * @kprojid: The kernel internal project identifier to start with. * * Map @kprojid into the user-namespace specified by @targ and * return the resulting projid. * * There is always a mapping into the initial user_namespace. * * If @kprojid has no mapping in @targ (projid_t)-1 is returned. */ projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid) { /* Map the uid from a global kernel uid */ return map_id_up(&targ->projid_map, __kprojid_val(kprojid)); } EXPORT_SYMBOL(from_kprojid); /** * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair. * @targ: The user namespace we want a projid in. * @kprojid: The kernel internal projid to start with. * * Map @kprojid into the user-namespace specified by @targ and * return the resulting projid. * * There is always a mapping into the initial user_namespace. * * Unlike from_kprojid from_kprojid_munged never fails and always * returns a valid projid. This makes from_kprojid_munged * appropriate for use in syscalls like stat and where * failing the system call and failing to provide a valid projid are * not an options. * * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned. */ projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid) { projid_t projid; projid = from_kprojid(targ, kprojid); if (projid == (projid_t) -1) projid = OVERFLOW_PROJID; return projid; } EXPORT_SYMBOL(from_kprojid_munged); static int uid_m_show(struct seq_file *seq, void *v) { struct user_namespace *ns = seq->private; struct uid_gid_extent *extent = v; struct user_namespace *lower_ns; uid_t lower; lower_ns = seq_user_ns(seq); if ((lower_ns == ns) && lower_ns->parent) lower_ns = lower_ns->parent; lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first)); seq_printf(seq, "%10u %10u %10u\n", extent->first, lower, extent->count); return 0; } static int gid_m_show(struct seq_file *seq, void *v) { struct user_namespace *ns = seq->private; struct uid_gid_extent *extent = v; struct user_namespace *lower_ns; gid_t lower; lower_ns = seq_user_ns(seq); if ((lower_ns == ns) && lower_ns->parent) lower_ns = lower_ns->parent; lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first)); seq_printf(seq, "%10u %10u %10u\n", extent->first, lower, extent->count); return 0; } static int projid_m_show(struct seq_file *seq, void *v) { struct user_namespace *ns = seq->private; struct uid_gid_extent *extent = v; struct user_namespace *lower_ns; projid_t lower; lower_ns = seq_user_ns(seq); if ((lower_ns == ns) && lower_ns->parent) lower_ns = lower_ns->parent; lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first)); seq_printf(seq, "%10u %10u %10u\n", extent->first, lower, extent->count); return 0; } static void *m_start(struct seq_file *seq, loff_t *ppos, struct uid_gid_map *map) { struct uid_gid_extent *extent = NULL; loff_t pos = *ppos; if (pos < map->nr_extents) extent = &map->extent[pos]; return extent; } static void *uid_m_start(struct seq_file *seq, loff_t *ppos) { struct user_namespace *ns = seq->private; return m_start(seq, ppos, &ns->uid_map); } static void *gid_m_start(struct seq_file *seq, loff_t *ppos) { struct user_namespace *ns = seq->private; return m_start(seq, ppos, &ns->gid_map); } static void *projid_m_start(struct seq_file *seq, loff_t *ppos) { struct user_namespace *ns = seq->private; return m_start(seq, ppos, &ns->projid_map); } static void *m_next(struct seq_file *seq, void *v, loff_t *pos) { (*pos)++; return seq->op->start(seq, pos); } static void m_stop(struct seq_file *seq, void *v) { return; } struct seq_operations proc_uid_seq_operations = { .start = uid_m_start, .stop = m_stop, .next = m_next, .show = uid_m_show, }; struct seq_operations proc_gid_seq_operations = { .start = gid_m_start, .stop = m_stop, .next = m_next, .show = gid_m_show, }; struct seq_operations proc_projid_seq_operations = { .start = projid_m_start, .stop = m_stop, .next = m_next, .show = projid_m_show, }; static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent *extent) { u32 upper_first, lower_first, upper_last, lower_last; unsigned idx; upper_first = extent->first; lower_first = extent->lower_first; upper_last = upper_first + extent->count - 1; lower_last = lower_first + extent->count - 1; for (idx = 0; idx < new_map->nr_extents; idx++) { u32 prev_upper_first, prev_lower_first; u32 prev_upper_last, prev_lower_last; struct uid_gid_extent *prev; prev = &new_map->extent[idx]; prev_upper_first = prev->first; prev_lower_first = prev->lower_first; prev_upper_last = prev_upper_first + prev->count - 1; prev_lower_last = prev_lower_first + prev->count - 1; /* Does the upper range intersect a previous extent? */ if ((prev_upper_first <= upper_last) && (prev_upper_last >= upper_first)) return true; /* Does the lower range intersect a previous extent? */ if ((prev_lower_first <= lower_last) && (prev_lower_last >= lower_first)) return true; } return false; } static DEFINE_MUTEX(id_map_mutex); static ssize_t map_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int cap_setid, struct uid_gid_map *map, struct uid_gid_map *parent_map) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct uid_gid_map new_map; unsigned idx; struct uid_gid_extent *extent = NULL; unsigned long page = 0; char *kbuf, *pos, *next_line; ssize_t ret = -EINVAL; /* * The id_map_mutex serializes all writes to any given map. * * Any map is only ever written once. * * An id map fits within 1 cache line on most architectures. * * On read nothing needs to be done unless you are on an * architecture with a crazy cache coherency model like alpha. * * There is a one time data dependency between reading the * count of the extents and the values of the extents. The * desired behavior is to see the values of the extents that * were written before the count of the extents. * * To achieve this smp_wmb() is used on guarantee the write * order and smp_read_barrier_depends() is guaranteed that we * don't have crazy architectures returning stale data. * */ mutex_lock(&id_map_mutex); ret = -EPERM; /* Only allow one successful write to the map */ if (map->nr_extents != 0) goto out; /* Require the appropriate privilege CAP_SETUID or CAP_SETGID * over the user namespace in order to set the id mapping. */ if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) goto out; /* Get a buffer */ ret = -ENOMEM; page = __get_free_page(GFP_TEMPORARY); kbuf = (char *) page; if (!page) goto out; /* Only allow <= page size writes at the beginning of the file */ ret = -EINVAL; if ((*ppos != 0) || (count >= PAGE_SIZE)) goto out; /* Slurp in the user data */ ret = -EFAULT; if (copy_from_user(kbuf, buf, count)) goto out; kbuf[count] = '\0'; /* Parse the user data */ ret = -EINVAL; pos = kbuf; new_map.nr_extents = 0; for (;pos; pos = next_line) { extent = &new_map.extent[new_map.nr_extents]; /* Find the end of line and ensure I don't look past it */ next_line = strchr(pos, '\n'); if (next_line) { *next_line = '\0'; next_line++; if (*next_line == '\0') next_line = NULL; } pos = skip_spaces(pos); extent->first = simple_strtoul(pos, &pos, 10); if (!isspace(*pos)) goto out; pos = skip_spaces(pos); extent->lower_first = simple_strtoul(pos, &pos, 10); if (!isspace(*pos)) goto out; pos = skip_spaces(pos); extent->count = simple_strtoul(pos, &pos, 10); if (*pos && !isspace(*pos)) goto out; /* Verify there is not trailing junk on the line */ pos = skip_spaces(pos); if (*pos != '\0') goto out; /* Verify we have been given valid starting values */ if ((extent->first == (u32) -1) || (extent->lower_first == (u32) -1 )) goto out; /* Verify count is not zero and does not cause the extent to wrap */ if ((extent->first + extent->count) <= extent->first) goto out; if ((extent->lower_first + extent->count) <= extent->lower_first) goto out; /* Do the ranges in extent overlap any previous extents? */ if (mappings_overlap(&new_map, extent)) goto out; new_map.nr_extents++; /* Fail if the file contains too many extents */ if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) && (next_line != NULL)) goto out; } /* Be very certaint the new map actually exists */ if (new_map.nr_extents == 0) goto out; ret = -EPERM; /* Validate the user is allowed to use user id's mapped to. */ if (!new_idmap_permitted(file, ns, cap_setid, &new_map)) goto out; /* Map the lower ids from the parent user namespace to the * kernel global id space. */ for (idx = 0; idx < new_map.nr_extents; idx++) { u32 lower_first; extent = &new_map.extent[idx]; lower_first = map_id_range_down(parent_map, extent->lower_first, extent->count); /* Fail if we can not map the specified extent to * the kernel global id space. */ if (lower_first == (u32) -1) goto out; extent->lower_first = lower_first; } /* Install the map */ memcpy(map->extent, new_map.extent, new_map.nr_extents*sizeof(new_map.extent[0])); smp_wmb(); map->nr_extents = new_map.nr_extents; *ppos = count; ret = count; out: mutex_unlock(&id_map_mutex); if (page) free_page(page); return ret; } ssize_t proc_uid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct user_namespace *seq_ns = seq_user_ns(seq); if (!ns->parent) return -EPERM; if ((seq_ns != ns) && (seq_ns != ns->parent)) return -EPERM; return map_write(file, buf, size, ppos, CAP_SETUID, &ns->uid_map, &ns->parent->uid_map); } ssize_t proc_gid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct user_namespace *seq_ns = seq_user_ns(seq); if (!ns->parent) return -EPERM; if ((seq_ns != ns) && (seq_ns != ns->parent)) return -EPERM; return map_write(file, buf, size, ppos, CAP_SETGID, &ns->gid_map, &ns->parent->gid_map); } ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct user_namespace *seq_ns = seq_user_ns(seq); if (!ns->parent) return -EPERM; if ((seq_ns != ns) && (seq_ns != ns->parent)) return -EPERM; /* Anyone can set any valid project id no capability needed */ return map_write(file, buf, size, ppos, -1, &ns->projid_map, &ns->parent->projid_map); } static bool new_idmap_permitted(const struct file *file, struct user_namespace *ns, int cap_setid, struct uid_gid_map *new_map) { /* Allow mapping to your own filesystem ids */ if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) { u32 id = new_map->extent[0].lower_first; if (cap_setid == CAP_SETUID) { kuid_t uid = make_kuid(ns->parent, id); if (uid_eq(uid, file->f_cred->fsuid)) return true; } else if (cap_setid == CAP_SETGID) { kgid_t gid = make_kgid(ns->parent, id); if (gid_eq(gid, file->f_cred->fsgid)) return true; } } /* Allow anyone to set a mapping that doesn't require privilege */ if (!cap_valid(cap_setid)) return true; /* Allow the specified ids if we have the appropriate capability * (CAP_SETUID or CAP_SETGID) over the parent user namespace. * And the opener of the id file also had the approprpiate capability. */ if (ns_capable(ns->parent, cap_setid) && file_ns_capable(file, ns->parent, cap_setid)) return true; return false; } static void *userns_get(struct task_struct *task) { struct user_namespace *user_ns; rcu_read_lock(); user_ns = get_user_ns(__task_cred(task)->user_ns); rcu_read_unlock(); return user_ns; } static void userns_put(void *ns) { put_user_ns(ns); } static int userns_install(struct nsproxy *nsproxy, void *ns) { struct user_namespace *user_ns = ns; struct cred *cred; /* Don't allow gaining capabilities by reentering * the same user namespace. */ if (user_ns == current_user_ns()) return -EINVAL; /* Threaded processes may not enter a different user namespace */ if (atomic_read(&current->mm->mm_users) > 1) return -EINVAL; if (current->fs->users != 1) return -EINVAL; if (!ns_capable(user_ns, CAP_SYS_ADMIN)) return -EPERM; cred = prepare_creds(); if (!cred) return -ENOMEM; put_user_ns(cred->user_ns); set_cred_user_ns(cred, get_user_ns(user_ns)); return commit_creds(cred); } static unsigned int userns_inum(void *ns) { struct user_namespace *user_ns = ns; return user_ns->proc_inum; } const struct proc_ns_operations userns_operations = { .name = "user", .type = CLONE_NEWUSER, .get = userns_get, .put = userns_put, .install = userns_install, .inum = userns_inum, }; static __init int user_namespaces_init(void) { user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); return 0; } module_init(user_namespaces_init);
./CrossVul/dataset_final_sorted/CWE-264/c/good_5619_0
crossvul-cpp_data_good_5192_0
/* * AppArmor security module * * This file contains AppArmor LSM hooks. * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/lsm_hooks.h> #include <linux/moduleparam.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/ptrace.h> #include <linux/ctype.h> #include <linux/sysctl.h> #include <linux/audit.h> #include <linux/user_namespace.h> #include <net/sock.h> #include "include/apparmor.h" #include "include/apparmorfs.h" #include "include/audit.h" #include "include/capability.h" #include "include/context.h" #include "include/file.h" #include "include/ipc.h" #include "include/path.h" #include "include/policy.h" #include "include/procattr.h" /* Flag indicating whether initialization completed */ int apparmor_initialized __initdata; /* * LSM hook functions */ /* * free the associated aa_task_cxt and put its profiles */ static void apparmor_cred_free(struct cred *cred) { aa_free_task_context(cred_cxt(cred)); cred_cxt(cred) = NULL; } /* * allocate the apparmor part of blank credentials */ static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp) { /* freed by apparmor_cred_free */ struct aa_task_cxt *cxt = aa_alloc_task_context(gfp); if (!cxt) return -ENOMEM; cred_cxt(cred) = cxt; return 0; } /* * prepare new aa_task_cxt for modification by prepare_cred block */ static int apparmor_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { /* freed by apparmor_cred_free */ struct aa_task_cxt *cxt = aa_alloc_task_context(gfp); if (!cxt) return -ENOMEM; aa_dup_task_context(cxt, cred_cxt(old)); cred_cxt(new) = cxt; return 0; } /* * transfer the apparmor data to a blank set of creds */ static void apparmor_cred_transfer(struct cred *new, const struct cred *old) { const struct aa_task_cxt *old_cxt = cred_cxt(old); struct aa_task_cxt *new_cxt = cred_cxt(new); aa_dup_task_context(new_cxt, old_cxt); } static int apparmor_ptrace_access_check(struct task_struct *child, unsigned int mode) { return aa_ptrace(current, child, mode); } static int apparmor_ptrace_traceme(struct task_struct *parent) { return aa_ptrace(parent, current, PTRACE_MODE_ATTACH); } /* Derived from security/commoncap.c:cap_capget */ static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { struct aa_profile *profile; const struct cred *cred; rcu_read_lock(); cred = __task_cred(target); profile = aa_cred_profile(cred); /* * cap_capget is stacked ahead of this and will * initialize effective and permitted. */ if (!unconfined(profile) && !COMPLAIN_MODE(profile)) { *effective = cap_intersect(*effective, profile->caps.allow); *permitted = cap_intersect(*permitted, profile->caps.allow); } rcu_read_unlock(); return 0; } static int apparmor_capable(const struct cred *cred, struct user_namespace *ns, int cap, int audit) { struct aa_profile *profile; int error = 0; profile = aa_cred_profile(cred); if (!unconfined(profile)) error = aa_capable(profile, cap, audit); return error; } /** * common_perm - basic common permission check wrapper fn for paths * @op: operation being checked * @path: path to check permission of (NOT NULL) * @mask: requested permissions mask * @cond: conditional info for the permission request (NOT NULL) * * Returns: %0 else error code if error or permission denied */ static int common_perm(int op, const struct path *path, u32 mask, struct path_cond *cond) { struct aa_profile *profile; int error = 0; profile = __aa_current_profile(); if (!unconfined(profile)) error = aa_path_perm(op, profile, path, 0, mask, cond); return error; } /** * common_perm_dir_dentry - common permission wrapper when path is dir, dentry * @op: operation being checked * @dir: directory of the dentry (NOT NULL) * @dentry: dentry to check (NOT NULL) * @mask: requested permissions mask * @cond: conditional info for the permission request (NOT NULL) * * Returns: %0 else error code if error or permission denied */ static int common_perm_dir_dentry(int op, const struct path *dir, struct dentry *dentry, u32 mask, struct path_cond *cond) { struct path path = { dir->mnt, dentry }; return common_perm(op, &path, mask, cond); } /** * common_perm_path - common permission wrapper when mnt, dentry * @op: operation being checked * @path: location to check (NOT NULL) * @mask: requested permissions mask * * Returns: %0 else error code if error or permission denied */ static inline int common_perm_path(int op, const struct path *path, u32 mask) { struct path_cond cond = { d_backing_inode(path->dentry)->i_uid, d_backing_inode(path->dentry)->i_mode }; if (!mediated_filesystem(path->dentry)) return 0; return common_perm(op, path, mask, &cond); } /** * common_perm_rm - common permission wrapper for operations doing rm * @op: operation being checked * @dir: directory that the dentry is in (NOT NULL) * @dentry: dentry being rm'd (NOT NULL) * @mask: requested permission mask * * Returns: %0 else error code if error or permission denied */ static int common_perm_rm(int op, const struct path *dir, struct dentry *dentry, u32 mask) { struct inode *inode = d_backing_inode(dentry); struct path_cond cond = { }; if (!inode || !mediated_filesystem(dentry)) return 0; cond.uid = inode->i_uid; cond.mode = inode->i_mode; return common_perm_dir_dentry(op, dir, dentry, mask, &cond); } /** * common_perm_create - common permission wrapper for operations doing create * @op: operation being checked * @dir: directory that dentry will be created in (NOT NULL) * @dentry: dentry to create (NOT NULL) * @mask: request permission mask * @mode: created file mode * * Returns: %0 else error code if error or permission denied */ static int common_perm_create(int op, const struct path *dir, struct dentry *dentry, u32 mask, umode_t mode) { struct path_cond cond = { current_fsuid(), mode }; if (!mediated_filesystem(dir->dentry)) return 0; return common_perm_dir_dentry(op, dir, dentry, mask, &cond); } static int apparmor_path_unlink(const struct path *dir, struct dentry *dentry) { return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE); } static int apparmor_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode) { return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE, S_IFDIR); } static int apparmor_path_rmdir(const struct path *dir, struct dentry *dentry) { return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE); } static int apparmor_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode); } static int apparmor_path_truncate(const struct path *path) { return common_perm_path(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE); } static int apparmor_path_symlink(const struct path *dir, struct dentry *dentry, const char *old_name) { return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE, S_IFLNK); } static int apparmor_path_link(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry) { struct aa_profile *profile; int error = 0; if (!mediated_filesystem(old_dentry)) return 0; profile = aa_current_profile(); if (!unconfined(profile)) error = aa_path_link(profile, old_dentry, new_dir, new_dentry); return error; } static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry) { struct aa_profile *profile; int error = 0; if (!mediated_filesystem(old_dentry)) return 0; profile = aa_current_profile(); if (!unconfined(profile)) { struct path old_path = { old_dir->mnt, old_dentry }; struct path new_path = { new_dir->mnt, new_dentry }; struct path_cond cond = { d_backing_inode(old_dentry)->i_uid, d_backing_inode(old_dentry)->i_mode }; error = aa_path_perm(OP_RENAME_SRC, profile, &old_path, 0, MAY_READ | AA_MAY_META_READ | MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_DELETE, &cond); if (!error) error = aa_path_perm(OP_RENAME_DEST, profile, &new_path, 0, MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_CREATE, &cond); } return error; } static int apparmor_path_chmod(const struct path *path, umode_t mode) { return common_perm_path(OP_CHMOD, path, AA_MAY_CHMOD); } static int apparmor_path_chown(const struct path *path, kuid_t uid, kgid_t gid) { return common_perm_path(OP_CHOWN, path, AA_MAY_CHOWN); } static int apparmor_inode_getattr(const struct path *path) { return common_perm_path(OP_GETATTR, path, AA_MAY_META_READ); } static int apparmor_file_open(struct file *file, const struct cred *cred) { struct aa_file_cxt *fcxt = file->f_security; struct aa_profile *profile; int error = 0; if (!mediated_filesystem(file->f_path.dentry)) return 0; /* If in exec, permission is handled by bprm hooks. * Cache permissions granted by the previous exec check, with * implicit read and executable mmap which are required to * actually execute the image. */ if (current->in_execve) { fcxt->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP; return 0; } profile = aa_cred_profile(cred); if (!unconfined(profile)) { struct inode *inode = file_inode(file); struct path_cond cond = { inode->i_uid, inode->i_mode }; error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0, aa_map_file_to_perms(file), &cond); /* todo cache full allowed permissions set and state */ fcxt->allow = aa_map_file_to_perms(file); } return error; } static int apparmor_file_alloc_security(struct file *file) { /* freed by apparmor_file_free_security */ file->f_security = aa_alloc_file_context(GFP_KERNEL); if (!file->f_security) return -ENOMEM; return 0; } static void apparmor_file_free_security(struct file *file) { struct aa_file_cxt *cxt = file->f_security; aa_free_file_context(cxt); } static int common_file_perm(int op, struct file *file, u32 mask) { struct aa_file_cxt *fcxt = file->f_security; struct aa_profile *profile, *fprofile = aa_cred_profile(file->f_cred); int error = 0; BUG_ON(!fprofile); if (!file->f_path.mnt || !mediated_filesystem(file->f_path.dentry)) return 0; profile = __aa_current_profile(); /* revalidate access, if task is unconfined, or the cached cred * doesn't match or if the request is for more permissions than * was granted. * * Note: the test for !unconfined(fprofile) is to handle file * delegation from unconfined tasks */ if (!unconfined(profile) && !unconfined(fprofile) && ((fprofile != profile) || (mask & ~fcxt->allow))) error = aa_file_perm(op, profile, file, mask); return error; } static int apparmor_file_permission(struct file *file, int mask) { return common_file_perm(OP_FPERM, file, mask); } static int apparmor_file_lock(struct file *file, unsigned int cmd) { u32 mask = AA_MAY_LOCK; if (cmd == F_WRLCK) mask |= MAY_WRITE; return common_file_perm(OP_FLOCK, file, mask); } static int common_mmap(int op, struct file *file, unsigned long prot, unsigned long flags) { int mask = 0; if (!file || !file->f_security) return 0; if (prot & PROT_READ) mask |= MAY_READ; /* * Private mappings don't require write perms since they don't * write back to the files */ if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE)) mask |= MAY_WRITE; if (prot & PROT_EXEC) mask |= AA_EXEC_MMAP; return common_file_perm(op, file, mask); } static int apparmor_mmap_file(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) { return common_mmap(OP_FMMAP, file, prot, flags); } static int apparmor_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { return common_mmap(OP_FMPROT, vma->vm_file, prot, !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0); } static int apparmor_getprocattr(struct task_struct *task, char *name, char **value) { int error = -ENOENT; /* released below */ const struct cred *cred = get_task_cred(task); struct aa_task_cxt *cxt = cred_cxt(cred); struct aa_profile *profile = NULL; if (strcmp(name, "current") == 0) profile = aa_get_newest_profile(cxt->profile); else if (strcmp(name, "prev") == 0 && cxt->previous) profile = aa_get_newest_profile(cxt->previous); else if (strcmp(name, "exec") == 0 && cxt->onexec) profile = aa_get_newest_profile(cxt->onexec); else error = -EINVAL; if (profile) error = aa_getprocattr(profile, value); aa_put_profile(profile); put_cred(cred); return error; } static int apparmor_setprocattr(struct task_struct *task, char *name, void *value, size_t size) { struct common_audit_data sa; struct apparmor_audit_data aad = {0,}; char *command, *largs = NULL, *args = value; size_t arg_size; int error; if (size == 0) return -EINVAL; /* task can only write its own attributes */ if (current != task) return -EACCES; /* AppArmor requires that the buffer must be null terminated atm */ if (args[size - 1] != '\0') { /* null terminate */ largs = args = kmalloc(size + 1, GFP_KERNEL); if (!args) return -ENOMEM; memcpy(args, value, size); args[size] = '\0'; } error = -EINVAL; args = strim(args); command = strsep(&args, " "); if (!args) goto out; args = skip_spaces(args); if (!*args) goto out; arg_size = size - (args - (char *) value); if (strcmp(name, "current") == 0) { if (strcmp(command, "changehat") == 0) { error = aa_setprocattr_changehat(args, arg_size, !AA_DO_TEST); } else if (strcmp(command, "permhat") == 0) { error = aa_setprocattr_changehat(args, arg_size, AA_DO_TEST); } else if (strcmp(command, "changeprofile") == 0) { error = aa_setprocattr_changeprofile(args, !AA_ONEXEC, !AA_DO_TEST); } else if (strcmp(command, "permprofile") == 0) { error = aa_setprocattr_changeprofile(args, !AA_ONEXEC, AA_DO_TEST); } else goto fail; } else if (strcmp(name, "exec") == 0) { if (strcmp(command, "exec") == 0) error = aa_setprocattr_changeprofile(args, AA_ONEXEC, !AA_DO_TEST); else goto fail; } else /* only support the "current" and "exec" process attributes */ goto fail; if (!error) error = size; out: kfree(largs); return error; fail: sa.type = LSM_AUDIT_DATA_NONE; sa.aad = &aad; aad.profile = aa_current_profile(); aad.op = OP_SETPROCATTR; aad.info = name; aad.error = error = -EINVAL; aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL); goto out; } static int apparmor_task_setrlimit(struct task_struct *task, unsigned int resource, struct rlimit *new_rlim) { struct aa_profile *profile = __aa_current_profile(); int error = 0; if (!unconfined(profile)) error = aa_task_setrlimit(profile, task, resource, new_rlim); return error; } static struct security_hook_list apparmor_hooks[] = { LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check), LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme), LSM_HOOK_INIT(capget, apparmor_capget), LSM_HOOK_INIT(capable, apparmor_capable), LSM_HOOK_INIT(path_link, apparmor_path_link), LSM_HOOK_INIT(path_unlink, apparmor_path_unlink), LSM_HOOK_INIT(path_symlink, apparmor_path_symlink), LSM_HOOK_INIT(path_mkdir, apparmor_path_mkdir), LSM_HOOK_INIT(path_rmdir, apparmor_path_rmdir), LSM_HOOK_INIT(path_mknod, apparmor_path_mknod), LSM_HOOK_INIT(path_rename, apparmor_path_rename), LSM_HOOK_INIT(path_chmod, apparmor_path_chmod), LSM_HOOK_INIT(path_chown, apparmor_path_chown), LSM_HOOK_INIT(path_truncate, apparmor_path_truncate), LSM_HOOK_INIT(inode_getattr, apparmor_inode_getattr), LSM_HOOK_INIT(file_open, apparmor_file_open), LSM_HOOK_INIT(file_permission, apparmor_file_permission), LSM_HOOK_INIT(file_alloc_security, apparmor_file_alloc_security), LSM_HOOK_INIT(file_free_security, apparmor_file_free_security), LSM_HOOK_INIT(mmap_file, apparmor_mmap_file), LSM_HOOK_INIT(file_mprotect, apparmor_file_mprotect), LSM_HOOK_INIT(file_lock, apparmor_file_lock), LSM_HOOK_INIT(getprocattr, apparmor_getprocattr), LSM_HOOK_INIT(setprocattr, apparmor_setprocattr), LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank), LSM_HOOK_INIT(cred_free, apparmor_cred_free), LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare), LSM_HOOK_INIT(cred_transfer, apparmor_cred_transfer), LSM_HOOK_INIT(bprm_set_creds, apparmor_bprm_set_creds), LSM_HOOK_INIT(bprm_committing_creds, apparmor_bprm_committing_creds), LSM_HOOK_INIT(bprm_committed_creds, apparmor_bprm_committed_creds), LSM_HOOK_INIT(bprm_secureexec, apparmor_bprm_secureexec), LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit), }; /* * AppArmor sysfs module parameters */ static int param_set_aabool(const char *val, const struct kernel_param *kp); static int param_get_aabool(char *buffer, const struct kernel_param *kp); #define param_check_aabool param_check_bool static const struct kernel_param_ops param_ops_aabool = { .flags = KERNEL_PARAM_OPS_FL_NOARG, .set = param_set_aabool, .get = param_get_aabool }; static int param_set_aauint(const char *val, const struct kernel_param *kp); static int param_get_aauint(char *buffer, const struct kernel_param *kp); #define param_check_aauint param_check_uint static const struct kernel_param_ops param_ops_aauint = { .set = param_set_aauint, .get = param_get_aauint }; static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp); static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp); #define param_check_aalockpolicy param_check_bool static const struct kernel_param_ops param_ops_aalockpolicy = { .flags = KERNEL_PARAM_OPS_FL_NOARG, .set = param_set_aalockpolicy, .get = param_get_aalockpolicy }; static int param_set_audit(const char *val, struct kernel_param *kp); static int param_get_audit(char *buffer, struct kernel_param *kp); static int param_set_mode(const char *val, struct kernel_param *kp); static int param_get_mode(char *buffer, struct kernel_param *kp); /* Flag values, also controllable via /sys/module/apparmor/parameters * We define special types as we want to do additional mediation. */ /* AppArmor global enforcement switch - complain, enforce, kill */ enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE; module_param_call(mode, param_set_mode, param_get_mode, &aa_g_profile_mode, S_IRUSR | S_IWUSR); /* Debug mode */ bool aa_g_debug; module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR); /* Audit mode */ enum audit_mode aa_g_audit; module_param_call(audit, param_set_audit, param_get_audit, &aa_g_audit, S_IRUSR | S_IWUSR); /* Determines if audit header is included in audited messages. This * provides more context if the audit daemon is not running */ bool aa_g_audit_header = 1; module_param_named(audit_header, aa_g_audit_header, aabool, S_IRUSR | S_IWUSR); /* lock out loading/removal of policy * TODO: add in at boot loading of policy, which is the only way to * load policy, if lock_policy is set */ bool aa_g_lock_policy; module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy, S_IRUSR | S_IWUSR); /* Syscall logging mode */ bool aa_g_logsyscall; module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR); /* Maximum pathname length before accesses will start getting rejected */ unsigned int aa_g_path_max = 2 * PATH_MAX; module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR); /* Determines how paranoid loading of policy is and how much verification * on the loaded policy is done. */ bool aa_g_paranoid_load = 1; module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUSR | S_IWUSR); /* Boot time disable flag */ static bool apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE; module_param_named(enabled, apparmor_enabled, bool, S_IRUGO); static int __init apparmor_enabled_setup(char *str) { unsigned long enabled; int error = kstrtoul(str, 0, &enabled); if (!error) apparmor_enabled = enabled ? 1 : 0; return 1; } __setup("apparmor=", apparmor_enabled_setup); /* set global flag turning off the ability to load policy */ static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (aa_g_lock_policy) return -EACCES; return param_set_bool(val, kp); } static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_get_bool(buffer, kp); } static int param_set_aabool(const char *val, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_set_bool(val, kp); } static int param_get_aabool(char *buffer, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_get_bool(buffer, kp); } static int param_set_aauint(const char *val, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_set_uint(val, kp); } static int param_get_aauint(char *buffer, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_get_uint(buffer, kp); } static int param_get_audit(char *buffer, struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]); } static int param_set_audit(const char *val, struct kernel_param *kp) { int i; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; if (!val) return -EINVAL; for (i = 0; i < AUDIT_MAX_INDEX; i++) { if (strcmp(val, audit_mode_names[i]) == 0) { aa_g_audit = i; return 0; } } return -EINVAL; } static int param_get_mode(char *buffer, struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]); } static int param_set_mode(const char *val, struct kernel_param *kp) { int i; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; if (!val) return -EINVAL; for (i = 0; i < APPARMOR_MODE_NAMES_MAX_INDEX; i++) { if (strcmp(val, aa_profile_mode_names[i]) == 0) { aa_g_profile_mode = i; return 0; } } return -EINVAL; } /* * AppArmor init functions */ /** * set_init_cxt - set a task context and profile on the first task. * * TODO: allow setting an alternate profile than unconfined */ static int __init set_init_cxt(void) { struct cred *cred = (struct cred *)current->real_cred; struct aa_task_cxt *cxt; cxt = aa_alloc_task_context(GFP_KERNEL); if (!cxt) return -ENOMEM; cxt->profile = aa_get_profile(root_ns->unconfined); cred_cxt(cred) = cxt; return 0; } static int __init apparmor_init(void) { int error; if (!apparmor_enabled || !security_module_enable("apparmor")) { aa_info_message("AppArmor disabled by boot time parameter"); apparmor_enabled = 0; return 0; } error = aa_alloc_root_ns(); if (error) { AA_ERROR("Unable to allocate default profile namespace\n"); goto alloc_out; } error = set_init_cxt(); if (error) { AA_ERROR("Failed to set context on init task\n"); aa_free_root_ns(); goto alloc_out; } security_add_hooks(apparmor_hooks, ARRAY_SIZE(apparmor_hooks)); /* Report that AppArmor successfully initialized */ apparmor_initialized = 1; if (aa_g_profile_mode == APPARMOR_COMPLAIN) aa_info_message("AppArmor initialized: complain mode enabled"); else if (aa_g_profile_mode == APPARMOR_KILL) aa_info_message("AppArmor initialized: kill mode enabled"); else aa_info_message("AppArmor initialized"); return error; alloc_out: aa_destroy_aafs(); apparmor_enabled = 0; return error; } security_initcall(apparmor_init);
./CrossVul/dataset_final_sorted/CWE-264/c/good_5192_0
crossvul-cpp_data_good_1513_2
/* * PCI Backend - Handles the virtual fields in the configuration space headers. * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/pci.h> #include "pciback.h" #include "conf_space.h" struct pci_cmd_info { u16 val; }; struct pci_bar_info { u32 val; u32 len_val; int which; }; #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) /* Bits guests are allowed to control in permissive mode. */ #define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \ PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \ PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK) static void *command_init(struct pci_dev *dev, int offset) { struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); int err; if (!cmd) return ERR_PTR(-ENOMEM); err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val); if (err) { kfree(cmd); return ERR_PTR(err); } return cmd; } static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) { int ret = pci_read_config_word(dev, offset, value); const struct pci_cmd_info *cmd = data; *value &= PCI_COMMAND_GUEST; *value |= cmd->val & ~PCI_COMMAND_GUEST; return ret; } static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; u16 val; struct pci_cmd_info *cmd = data; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable\n", pci_name(dev)); err = pci_enable_device(dev); if (err) return err; if (dev_data) dev_data->enable_intx = 1; } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable\n", pci_name(dev)); pci_disable_device(dev); if (dev_data) dev_data->enable_intx = 0; } if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n", pci_name(dev)); pci_set_master(dev); } if (value & PCI_COMMAND_INVALIDATE) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable memory-write-invalidate\n", pci_name(dev)); err = pci_set_mwi(dev); if (err) { pr_warn("%s: cannot enable memory-write-invalidate (%d)\n", pci_name(dev), err); value &= ~PCI_COMMAND_INVALIDATE; } } cmd->val = value; if (!permissive && (!dev_data || !dev_data->permissive)) return 0; /* Only allow the guest to control certain bits. */ err = pci_read_config_word(dev, offset, &val); if (err || val == value) return err; value &= PCI_COMMAND_GUEST; value |= val & ~PCI_COMMAND_GUEST; return pci_write_config_word(dev, offset, value); } static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { pr_warn(DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } /* A write to obtain the length must happen as a 32-bit write. * This does not (yet) support writing individual bytes */ if (value == ~PCI_ROM_ADDRESS_ENABLE) bar->which = 1; else { u32 tmpval; pci_read_config_dword(dev, offset, &tmpval); if (tmpval != bar->val && value == bar->val) { /* Allow restoration of bar value. */ pci_write_config_dword(dev, offset, bar->val); } bar->which = 0; } /* Do we need to support enabling/disabling the rom address here? */ return 0; } /* For the BARs, only allow writes which write ~0 or * the correct resource information * (Needed for when the driver probes the resource usage) */ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { pr_warn(DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } /* A write to obtain the length must happen as a 32-bit write. * This does not (yet) support writing individual bytes */ if (value == ~0) bar->which = 1; else { u32 tmpval; pci_read_config_dword(dev, offset, &tmpval); if (tmpval != bar->val && value == bar->val) { /* Allow restoration of bar value. */ pci_write_config_dword(dev, offset, bar->val); } bar->which = 0; } return 0; } static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { pr_warn(DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } *value = bar->which ? bar->len_val : bar->val; return 0; } static inline void read_dev_bar(struct pci_dev *dev, struct pci_bar_info *bar_info, int offset, u32 len_mask) { int pos; struct resource *res = dev->resource; if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1) pos = PCI_ROM_RESOURCE; else { pos = (offset - PCI_BASE_ADDRESS_0) / 4; if (pos && ((res[pos - 1].flags & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64))) { bar_info->val = res[pos - 1].start >> 32; bar_info->len_val = res[pos - 1].end >> 32; return; } } bar_info->val = res[pos].start | (res[pos].flags & PCI_REGION_FLAG_MASK); bar_info->len_val = resource_size(&res[pos]); } static void *bar_init(struct pci_dev *dev, int offset) { struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); if (!bar) return ERR_PTR(-ENOMEM); read_dev_bar(dev, bar, offset, ~0); bar->which = 0; return bar; } static void *rom_init(struct pci_dev *dev, int offset) { struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); if (!bar) return ERR_PTR(-ENOMEM); read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); bar->which = 0; return bar; } static void bar_reset(struct pci_dev *dev, int offset, void *data) { struct pci_bar_info *bar = data; bar->which = 0; } static void bar_release(struct pci_dev *dev, int offset, void *data) { kfree(data); } static int xen_pcibk_read_vendor(struct pci_dev *dev, int offset, u16 *value, void *data) { *value = dev->vendor; return 0; } static int xen_pcibk_read_device(struct pci_dev *dev, int offset, u16 *value, void *data) { *value = dev->device; return 0; } static int interrupt_read(struct pci_dev *dev, int offset, u8 * value, void *data) { *value = (u8) dev->irq; return 0; } static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data) { u8 cur_value; int err; err = pci_read_config_byte(dev, offset, &cur_value); if (err) goto out; if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START) || value == PCI_BIST_START) err = pci_write_config_byte(dev, offset, value); out: return err; } static const struct config_field header_common[] = { { .offset = PCI_VENDOR_ID, .size = 2, .u.w.read = xen_pcibk_read_vendor, }, { .offset = PCI_DEVICE_ID, .size = 2, .u.w.read = xen_pcibk_read_device, }, { .offset = PCI_COMMAND, .size = 2, .init = command_init, .release = bar_release, .u.w.read = command_read, .u.w.write = command_write, }, { .offset = PCI_INTERRUPT_LINE, .size = 1, .u.b.read = interrupt_read, }, { .offset = PCI_INTERRUPT_PIN, .size = 1, .u.b.read = xen_pcibk_read_config_byte, }, { /* Any side effects of letting driver domain control cache line? */ .offset = PCI_CACHE_LINE_SIZE, .size = 1, .u.b.read = xen_pcibk_read_config_byte, .u.b.write = xen_pcibk_write_config_byte, }, { .offset = PCI_LATENCY_TIMER, .size = 1, .u.b.read = xen_pcibk_read_config_byte, }, { .offset = PCI_BIST, .size = 1, .u.b.read = xen_pcibk_read_config_byte, .u.b.write = bist_write, }, {} }; #define CFG_FIELD_BAR(reg_offset) \ { \ .offset = reg_offset, \ .size = 4, \ .init = bar_init, \ .reset = bar_reset, \ .release = bar_release, \ .u.dw.read = bar_read, \ .u.dw.write = bar_write, \ } #define CFG_FIELD_ROM(reg_offset) \ { \ .offset = reg_offset, \ .size = 4, \ .init = rom_init, \ .reset = bar_reset, \ .release = bar_release, \ .u.dw.read = bar_read, \ .u.dw.write = rom_write, \ } static const struct config_field header_0[] = { CFG_FIELD_BAR(PCI_BASE_ADDRESS_0), CFG_FIELD_BAR(PCI_BASE_ADDRESS_1), CFG_FIELD_BAR(PCI_BASE_ADDRESS_2), CFG_FIELD_BAR(PCI_BASE_ADDRESS_3), CFG_FIELD_BAR(PCI_BASE_ADDRESS_4), CFG_FIELD_BAR(PCI_BASE_ADDRESS_5), CFG_FIELD_ROM(PCI_ROM_ADDRESS), {} }; static const struct config_field header_1[] = { CFG_FIELD_BAR(PCI_BASE_ADDRESS_0), CFG_FIELD_BAR(PCI_BASE_ADDRESS_1), CFG_FIELD_ROM(PCI_ROM_ADDRESS1), {} }; int xen_pcibk_config_header_add_fields(struct pci_dev *dev) { int err; err = xen_pcibk_config_add_fields(dev, header_common); if (err) goto out; switch (dev->hdr_type) { case PCI_HEADER_TYPE_NORMAL: err = xen_pcibk_config_add_fields(dev, header_0); break; case PCI_HEADER_TYPE_BRIDGE: err = xen_pcibk_config_add_fields(dev, header_1); break; default: err = -EINVAL; pr_err("%s: Unsupported header type %d!\n", pci_name(dev), dev->hdr_type); break; } out: return err; }
./CrossVul/dataset_final_sorted/CWE-264/c/good_1513_2
crossvul-cpp_data_bad_2159_2
/* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include "irq.h" #include "mmu.h" #include "cpuid.h" #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/moduleparam.h> #include <linux/mod_devicetable.h> #include <linux/ftrace_event.h> #include <linux/slab.h> #include <linux/tboot.h> #include <linux/hrtimer.h> #include "kvm_cache_regs.h" #include "x86.h" #include <asm/io.h> #include <asm/desc.h> #include <asm/vmx.h> #include <asm/virtext.h> #include <asm/mce.h> #include <asm/i387.h> #include <asm/xcr.h> #include <asm/perf_event.h> #include <asm/debugreg.h> #include <asm/kexec.h> #include "trace.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) #define __ex_clear(x, reg) \ ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); static const struct x86_cpu_id vmx_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_VMX), {} }; MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); static bool __read_mostly enable_vpid = 1; module_param_named(vpid, enable_vpid, bool, 0444); static bool __read_mostly flexpriority_enabled = 1; module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); static bool __read_mostly enable_ept = 1; module_param_named(ept, enable_ept, bool, S_IRUGO); static bool __read_mostly enable_unrestricted_guest = 1; module_param_named(unrestricted_guest, enable_unrestricted_guest, bool, S_IRUGO); static bool __read_mostly enable_ept_ad_bits = 1; module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); static bool __read_mostly emulate_invalid_guest_state = true; module_param(emulate_invalid_guest_state, bool, S_IRUGO); static bool __read_mostly vmm_exclusive = 1; module_param(vmm_exclusive, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); static bool __read_mostly enable_apicv = 1; module_param(enable_apicv, bool, S_IRUGO); static bool __read_mostly enable_shadow_vmcs = 1; module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); /* * If nested=1, nested virtualization is supported, i.e., guests may use * VMX and be a hypervisor for its own guests. If nested=0, guests may not * use VMX instructions. */ static bool __read_mostly nested = 0; module_param(nested, bool, S_IRUGO); #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD) #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE) #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_CR4_GUEST_OWNED_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXMMEXCPT) #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 /* * These 2 parameters are used to config the controls for Pause-Loop Exiting: * ple_gap: upper bound on the amount of time between two successive * executions of PAUSE in a loop. Also indicate if ple enabled. * According to test, this time is usually smaller than 128 cycles. * ple_window: upper bound on the amount of time a guest is allowed to execute * in a PAUSE loop. Tests indicate that most spinlocks are held for * less than 2^12 cycles * Time is measured based on a counter that runs at the same rate as the TSC, * refer SDM volume 3b section 21.6.13 & 22.1.3. */ #define KVM_VMX_DEFAULT_PLE_GAP 128 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \ INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP; module_param(ple_gap, int, S_IRUGO); static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; module_param(ple_window, int, S_IRUGO); /* Default doubles per-vcpu window every exit. */ static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW; module_param(ple_window_grow, int, S_IRUGO); /* Default resets per-vcpu window every exit to ple_window. */ static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK; module_param(ple_window_shrink, int, S_IRUGO); /* Default is to compute the maximum so we can never overflow. */ static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; module_param(ple_window_max, int, S_IRUGO); extern const ulong vmx_return; #define NR_AUTOLOAD_MSRS 8 #define VMCS02_POOL_SIZE 1 struct vmcs { u32 revision_id; u32 abort; char data[0]; }; /* * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs * loaded on this CPU (so we can clear them if the CPU goes down). */ struct loaded_vmcs { struct vmcs *vmcs; int cpu; int launched; struct list_head loaded_vmcss_on_cpu_link; }; struct shared_msr_entry { unsigned index; u64 data; u64 mask; }; /* * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a * single nested guest (L2), hence the name vmcs12. Any VMX implementation has * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is * stored in guest memory specified by VMPTRLD, but is opaque to the guest, * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. * More than one of these structures may exist, if L1 runs multiple L2 guests. * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the * underlying hardware which will be used to run L2. * This structure is packed to ensure that its layout is identical across * machines (necessary for live migration). * If there are changes in this struct, VMCS12_REVISION must be changed. */ typedef u64 natural_width; struct __packed vmcs12 { /* According to the Intel spec, a VMCS region must start with the * following two fields. Then follow implementation-specific data. */ u32 revision_id; u32 abort; u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ u32 padding[7]; /* room for future expansion */ u64 io_bitmap_a; u64 io_bitmap_b; u64 msr_bitmap; u64 vm_exit_msr_store_addr; u64 vm_exit_msr_load_addr; u64 vm_entry_msr_load_addr; u64 tsc_offset; u64 virtual_apic_page_addr; u64 apic_access_addr; u64 ept_pointer; u64 guest_physical_address; u64 vmcs_link_pointer; u64 guest_ia32_debugctl; u64 guest_ia32_pat; u64 guest_ia32_efer; u64 guest_ia32_perf_global_ctrl; u64 guest_pdptr0; u64 guest_pdptr1; u64 guest_pdptr2; u64 guest_pdptr3; u64 guest_bndcfgs; u64 host_ia32_pat; u64 host_ia32_efer; u64 host_ia32_perf_global_ctrl; u64 padding64[8]; /* room for future expansion */ /* * To allow migration of L1 (complete with its L2 guests) between * machines of different natural widths (32 or 64 bit), we cannot have * unsigned long fields with no explict size. We use u64 (aliased * natural_width) instead. Luckily, x86 is little-endian. */ natural_width cr0_guest_host_mask; natural_width cr4_guest_host_mask; natural_width cr0_read_shadow; natural_width cr4_read_shadow; natural_width cr3_target_value0; natural_width cr3_target_value1; natural_width cr3_target_value2; natural_width cr3_target_value3; natural_width exit_qualification; natural_width guest_linear_address; natural_width guest_cr0; natural_width guest_cr3; natural_width guest_cr4; natural_width guest_es_base; natural_width guest_cs_base; natural_width guest_ss_base; natural_width guest_ds_base; natural_width guest_fs_base; natural_width guest_gs_base; natural_width guest_ldtr_base; natural_width guest_tr_base; natural_width guest_gdtr_base; natural_width guest_idtr_base; natural_width guest_dr7; natural_width guest_rsp; natural_width guest_rip; natural_width guest_rflags; natural_width guest_pending_dbg_exceptions; natural_width guest_sysenter_esp; natural_width guest_sysenter_eip; natural_width host_cr0; natural_width host_cr3; natural_width host_cr4; natural_width host_fs_base; natural_width host_gs_base; natural_width host_tr_base; natural_width host_gdtr_base; natural_width host_idtr_base; natural_width host_ia32_sysenter_esp; natural_width host_ia32_sysenter_eip; natural_width host_rsp; natural_width host_rip; natural_width paddingl[8]; /* room for future expansion */ u32 pin_based_vm_exec_control; u32 cpu_based_vm_exec_control; u32 exception_bitmap; u32 page_fault_error_code_mask; u32 page_fault_error_code_match; u32 cr3_target_count; u32 vm_exit_controls; u32 vm_exit_msr_store_count; u32 vm_exit_msr_load_count; u32 vm_entry_controls; u32 vm_entry_msr_load_count; u32 vm_entry_intr_info_field; u32 vm_entry_exception_error_code; u32 vm_entry_instruction_len; u32 tpr_threshold; u32 secondary_vm_exec_control; u32 vm_instruction_error; u32 vm_exit_reason; u32 vm_exit_intr_info; u32 vm_exit_intr_error_code; u32 idt_vectoring_info_field; u32 idt_vectoring_error_code; u32 vm_exit_instruction_len; u32 vmx_instruction_info; u32 guest_es_limit; u32 guest_cs_limit; u32 guest_ss_limit; u32 guest_ds_limit; u32 guest_fs_limit; u32 guest_gs_limit; u32 guest_ldtr_limit; u32 guest_tr_limit; u32 guest_gdtr_limit; u32 guest_idtr_limit; u32 guest_es_ar_bytes; u32 guest_cs_ar_bytes; u32 guest_ss_ar_bytes; u32 guest_ds_ar_bytes; u32 guest_fs_ar_bytes; u32 guest_gs_ar_bytes; u32 guest_ldtr_ar_bytes; u32 guest_tr_ar_bytes; u32 guest_interruptibility_info; u32 guest_activity_state; u32 guest_sysenter_cs; u32 host_ia32_sysenter_cs; u32 vmx_preemption_timer_value; u32 padding32[7]; /* room for future expansion */ u16 virtual_processor_id; u16 guest_es_selector; u16 guest_cs_selector; u16 guest_ss_selector; u16 guest_ds_selector; u16 guest_fs_selector; u16 guest_gs_selector; u16 guest_ldtr_selector; u16 guest_tr_selector; u16 host_es_selector; u16 host_cs_selector; u16 host_ss_selector; u16 host_ds_selector; u16 host_fs_selector; u16 host_gs_selector; u16 host_tr_selector; }; /* * VMCS12_REVISION is an arbitrary id that should be changed if the content or * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and * VMPTRLD verifies that the VMCS region that L1 is loading contains this id. */ #define VMCS12_REVISION 0x11e57ed0 /* * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region * and any VMCS region. Although only sizeof(struct vmcs12) are used by the * current implementation, 4K are reserved to avoid future complications. */ #define VMCS12_SIZE 0x1000 /* Used to remember the last vmcs02 used for some recently used vmcs12s */ struct vmcs02_list { struct list_head list; gpa_t vmptr; struct loaded_vmcs vmcs02; }; /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. */ struct nested_vmx { /* Has the level1 guest done vmxon? */ bool vmxon; gpa_t vmxon_ptr; /* The guest-physical address of the current VMCS L1 keeps for L2 */ gpa_t current_vmptr; /* The host-usable pointer to the above */ struct page *current_vmcs12_page; struct vmcs12 *current_vmcs12; struct vmcs *current_shadow_vmcs; /* * Indicates if the shadow vmcs must be updated with the * data hold by vmcs12 */ bool sync_shadow_vmcs; /* vmcs02_list cache of VMCSs recently used to run L2 guests */ struct list_head vmcs02_pool; int vmcs02_num; u64 vmcs01_tsc_offset; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; /* * Guest pages referred to in vmcs02 with host-physical pointers, so * we must keep them pinned while L2 runs. */ struct page *apic_access_page; struct page *virtual_apic_page; u64 msr_ia32_feature_control; struct hrtimer preemption_timer; bool preemption_timer_expired; /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ u64 vmcs01_debugctl; }; #define POSTED_INTR_ON 0 /* Posted-Interrupt Descriptor */ struct pi_desc { u32 pir[8]; /* Posted interrupt requested */ u32 control; /* bit 0 of control is outstanding notification bit */ u32 rsvd[7]; } __aligned(64); static bool pi_test_and_set_on(struct pi_desc *pi_desc) { return test_and_set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static bool pi_test_and_clear_on(struct pi_desc *pi_desc) { return test_and_clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) { return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); } struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; u8 fail; bool nmi_known_unmasked; u32 exit_intr_info; u32 idt_vectoring_info; ulong rflags; struct shared_msr_entry *guest_msrs; int nmsrs; int save_nmsrs; unsigned long host_idt_base; #ifdef CONFIG_X86_64 u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; /* * loaded_vmcs points to the VMCS currently used in this vcpu. For a * non-nested (L1) guest, it always points to vmcs01. For a nested * guest (L2), it points to a different VMCS. */ struct loaded_vmcs vmcs01; struct loaded_vmcs *loaded_vmcs; bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { unsigned nr; struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; } msr_autoload; struct { int loaded; u16 fs_sel, gs_sel, ldt_sel; #ifdef CONFIG_X86_64 u16 ds_sel, es_sel; #endif int gs_ldt_reload_needed; int fs_reload_needed; u64 msr_host_bndcfgs; unsigned long vmcs_host_cr4; /* May not match real cr4 */ } host_state; struct { int vm86_active; ulong save_rflags; struct kvm_segment segs[8]; } rmode; struct { u32 bitmask; /* 4 bits per segment (1 bit per field) */ struct kvm_save_segment { u16 selector; unsigned long base; u32 limit; u32 ar; } seg[8]; } segment_cache; int vpid; bool emulation_required; /* Support for vnmi-less CPUs */ int soft_vnmi_blocked; ktime_t entry_time; s64 vnmi_blocked_time; u32 exit_reason; bool rdtscp_enabled; /* Posted interrupt descriptor */ struct pi_desc pi_desc; /* Support for a guest hypervisor (nested VMX) */ struct nested_vmx nested; /* Dynamic PLE window. */ int ple_window; bool ple_window_dirty; }; enum segment_cache_field { SEG_FIELD_SEL = 0, SEG_FIELD_BASE = 1, SEG_FIELD_LIMIT = 2, SEG_FIELD_AR = 3, SEG_FIELD_NR = 4 }; static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_vmx, vcpu); } #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x) #define FIELD(number, name) [number] = VMCS12_OFFSET(name) #define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \ [number##_HIGH] = VMCS12_OFFSET(name)+4 static unsigned long shadow_read_only_fields[] = { /* * We do NOT shadow fields that are modified when L0 * traps and emulates any vmx instruction (e.g. VMPTRLD, * VMXON...) executed by L1. * For example, VM_INSTRUCTION_ERROR is read * by L1 if a vmx instruction fails (part of the error path). * Note the code assumes this logic. If for some reason * we start shadowing these fields then we need to * force a shadow sync when L0 emulates vmx instructions * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified * by nested_vmx_failValid) */ VM_EXIT_REASON, VM_EXIT_INTR_INFO, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_INFO_FIELD, IDT_VECTORING_ERROR_CODE, VM_EXIT_INTR_ERROR_CODE, EXIT_QUALIFICATION, GUEST_LINEAR_ADDRESS, GUEST_PHYSICAL_ADDRESS }; static int max_shadow_read_only_fields = ARRAY_SIZE(shadow_read_only_fields); static unsigned long shadow_read_write_fields[] = { TPR_THRESHOLD, GUEST_RIP, GUEST_RSP, GUEST_CR0, GUEST_CR3, GUEST_CR4, GUEST_INTERRUPTIBILITY_INFO, GUEST_RFLAGS, GUEST_CS_SELECTOR, GUEST_CS_AR_BYTES, GUEST_CS_LIMIT, GUEST_CS_BASE, GUEST_ES_BASE, GUEST_BNDCFGS, CR0_GUEST_HOST_MASK, CR0_READ_SHADOW, CR4_READ_SHADOW, TSC_OFFSET, EXCEPTION_BITMAP, CPU_BASED_VM_EXEC_CONTROL, VM_ENTRY_EXCEPTION_ERROR_CODE, VM_ENTRY_INTR_INFO_FIELD, VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE, HOST_FS_BASE, HOST_GS_BASE, HOST_FS_SELECTOR, HOST_GS_SELECTOR }; static int max_shadow_read_write_fields = ARRAY_SIZE(shadow_read_write_fields); static const unsigned short vmcs_field_to_offset_table[] = { FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), FIELD(GUEST_ES_SELECTOR, guest_es_selector), FIELD(GUEST_CS_SELECTOR, guest_cs_selector), FIELD(GUEST_SS_SELECTOR, guest_ss_selector), FIELD(GUEST_DS_SELECTOR, guest_ds_selector), FIELD(GUEST_FS_SELECTOR, guest_fs_selector), FIELD(GUEST_GS_SELECTOR, guest_gs_selector), FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector), FIELD(GUEST_TR_SELECTOR, guest_tr_selector), FIELD(HOST_ES_SELECTOR, host_es_selector), FIELD(HOST_CS_SELECTOR, host_cs_selector), FIELD(HOST_SS_SELECTOR, host_ss_selector), FIELD(HOST_DS_SELECTOR, host_ds_selector), FIELD(HOST_FS_SELECTOR, host_fs_selector), FIELD(HOST_GS_SELECTOR, host_gs_selector), FIELD(HOST_TR_SELECTOR, host_tr_selector), FIELD64(IO_BITMAP_A, io_bitmap_a), FIELD64(IO_BITMAP_B, io_bitmap_b), FIELD64(MSR_BITMAP, msr_bitmap), FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr), FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr), FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr), FIELD64(TSC_OFFSET, tsc_offset), FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), FIELD64(APIC_ACCESS_ADDR, apic_access_addr), FIELD64(EPT_POINTER, ept_pointer), FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), FIELD64(GUEST_IA32_PAT, guest_ia32_pat), FIELD64(GUEST_IA32_EFER, guest_ia32_efer), FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl), FIELD64(GUEST_PDPTR0, guest_pdptr0), FIELD64(GUEST_PDPTR1, guest_pdptr1), FIELD64(GUEST_PDPTR2, guest_pdptr2), FIELD64(GUEST_PDPTR3, guest_pdptr3), FIELD64(GUEST_BNDCFGS, guest_bndcfgs), FIELD64(HOST_IA32_PAT, host_ia32_pat), FIELD64(HOST_IA32_EFER, host_ia32_efer), FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl), FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control), FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control), FIELD(EXCEPTION_BITMAP, exception_bitmap), FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask), FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match), FIELD(CR3_TARGET_COUNT, cr3_target_count), FIELD(VM_EXIT_CONTROLS, vm_exit_controls), FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count), FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count), FIELD(VM_ENTRY_CONTROLS, vm_entry_controls), FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count), FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field), FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code), FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len), FIELD(TPR_THRESHOLD, tpr_threshold), FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control), FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error), FIELD(VM_EXIT_REASON, vm_exit_reason), FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info), FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code), FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field), FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code), FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len), FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info), FIELD(GUEST_ES_LIMIT, guest_es_limit), FIELD(GUEST_CS_LIMIT, guest_cs_limit), FIELD(GUEST_SS_LIMIT, guest_ss_limit), FIELD(GUEST_DS_LIMIT, guest_ds_limit), FIELD(GUEST_FS_LIMIT, guest_fs_limit), FIELD(GUEST_GS_LIMIT, guest_gs_limit), FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit), FIELD(GUEST_TR_LIMIT, guest_tr_limit), FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit), FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit), FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes), FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes), FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes), FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes), FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes), FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes), FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes), FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes), FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info), FIELD(GUEST_ACTIVITY_STATE, guest_activity_state), FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs), FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs), FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value), FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask), FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask), FIELD(CR0_READ_SHADOW, cr0_read_shadow), FIELD(CR4_READ_SHADOW, cr4_read_shadow), FIELD(CR3_TARGET_VALUE0, cr3_target_value0), FIELD(CR3_TARGET_VALUE1, cr3_target_value1), FIELD(CR3_TARGET_VALUE2, cr3_target_value2), FIELD(CR3_TARGET_VALUE3, cr3_target_value3), FIELD(EXIT_QUALIFICATION, exit_qualification), FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address), FIELD(GUEST_CR0, guest_cr0), FIELD(GUEST_CR3, guest_cr3), FIELD(GUEST_CR4, guest_cr4), FIELD(GUEST_ES_BASE, guest_es_base), FIELD(GUEST_CS_BASE, guest_cs_base), FIELD(GUEST_SS_BASE, guest_ss_base), FIELD(GUEST_DS_BASE, guest_ds_base), FIELD(GUEST_FS_BASE, guest_fs_base), FIELD(GUEST_GS_BASE, guest_gs_base), FIELD(GUEST_LDTR_BASE, guest_ldtr_base), FIELD(GUEST_TR_BASE, guest_tr_base), FIELD(GUEST_GDTR_BASE, guest_gdtr_base), FIELD(GUEST_IDTR_BASE, guest_idtr_base), FIELD(GUEST_DR7, guest_dr7), FIELD(GUEST_RSP, guest_rsp), FIELD(GUEST_RIP, guest_rip), FIELD(GUEST_RFLAGS, guest_rflags), FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions), FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp), FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip), FIELD(HOST_CR0, host_cr0), FIELD(HOST_CR3, host_cr3), FIELD(HOST_CR4, host_cr4), FIELD(HOST_FS_BASE, host_fs_base), FIELD(HOST_GS_BASE, host_gs_base), FIELD(HOST_TR_BASE, host_tr_base), FIELD(HOST_GDTR_BASE, host_gdtr_base), FIELD(HOST_IDTR_BASE, host_idtr_base), FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp), FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip), FIELD(HOST_RSP, host_rsp), FIELD(HOST_RIP, host_rip), }; static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table); static inline short vmcs_field_to_offset(unsigned long field) { if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0) return -1; return vmcs_field_to_offset_table[field]; } static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) { return to_vmx(vcpu)->nested.current_vmcs12; } static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) { struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT); if (is_error_page(page)) return NULL; return page; } static void nested_release_page(struct page *page) { kvm_release_page_dirty(page); } static void nested_release_page_clean(struct page *page) { kvm_release_page_clean(page); } static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); static bool vmx_mpx_supported(void); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static bool guest_state_valid(struct kvm_vcpu *vcpu); static u32 vmx_segment_access_rights(struct kvm_segment *var); static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu); static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); static int alloc_identity_pagetable(struct kvm *kvm); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); /* * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. */ static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); static DEFINE_PER_CPU(struct desc_ptr, host_gdt); static unsigned long *vmx_io_bitmap_a; static unsigned long *vmx_io_bitmap_b; static unsigned long *vmx_msr_bitmap_legacy; static unsigned long *vmx_msr_bitmap_longmode; static unsigned long *vmx_msr_bitmap_legacy_x2apic; static unsigned long *vmx_msr_bitmap_longmode_x2apic; static unsigned long *vmx_vmread_bitmap; static unsigned long *vmx_vmwrite_bitmap; static bool cpu_has_load_ia32_efer; static bool cpu_has_load_perf_global_ctrl; static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DEFINE_SPINLOCK(vmx_vpid_lock); static struct vmcs_config { int size; int order; u32 revision_id; u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; } vmcs_config; static struct vmx_capability { u32 ept; u32 vpid; } vmx_capability; #define VMX_SEGMENT_FIELD(seg) \ [VCPU_SREG_##seg] = { \ .selector = GUEST_##seg##_SELECTOR, \ .base = GUEST_##seg##_BASE, \ .limit = GUEST_##seg##_LIMIT, \ .ar_bytes = GUEST_##seg##_AR_BYTES, \ } static const struct kvm_vmx_segment_field { unsigned selector; unsigned base; unsigned limit; unsigned ar_bytes; } kvm_vmx_segment_fields[] = { VMX_SEGMENT_FIELD(CS), VMX_SEGMENT_FIELD(DS), VMX_SEGMENT_FIELD(ES), VMX_SEGMENT_FIELD(FS), VMX_SEGMENT_FIELD(GS), VMX_SEGMENT_FIELD(SS), VMX_SEGMENT_FIELD(TR), VMX_SEGMENT_FIELD(LDTR), }; static u64 host_efer; static void ept_save_pdptrs(struct kvm_vcpu *vcpu); /* * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it * away by decrementing the array size. */ static const u32 vmx_msr_index[] = { #ifdef CONFIG_X86_64 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, #endif MSR_EFER, MSR_TSC_AUX, MSR_STAR, }; static inline bool is_page_fault(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); } static inline bool is_no_device(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); } static inline bool is_invalid_opcode(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); } static inline bool is_external_interrupt(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); } static inline bool is_machine_check(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); } static inline bool cpu_has_vmx_msr_bitmap(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; } static inline bool cpu_has_vmx_tpr_shadow(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; } static inline bool vm_need_tpr_shadow(struct kvm *kvm) { return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)); } static inline bool cpu_has_secondary_exec_ctrls(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; } static inline bool cpu_has_vmx_virtualize_apic_accesses(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; } static inline bool cpu_has_vmx_virtualize_x2apic_mode(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; } static inline bool cpu_has_vmx_apic_register_virt(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_APIC_REGISTER_VIRT; } static inline bool cpu_has_vmx_virtual_intr_delivery(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; } static inline bool cpu_has_vmx_posted_intr(void) { return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR; } static inline bool cpu_has_vmx_apicv(void) { return cpu_has_vmx_apic_register_virt() && cpu_has_vmx_virtual_intr_delivery() && cpu_has_vmx_posted_intr(); } static inline bool cpu_has_vmx_flexpriority(void) { return cpu_has_vmx_tpr_shadow() && cpu_has_vmx_virtualize_apic_accesses(); } static inline bool cpu_has_vmx_ept_execute_only(void) { return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT; } static inline bool cpu_has_vmx_eptp_uncacheable(void) { return vmx_capability.ept & VMX_EPTP_UC_BIT; } static inline bool cpu_has_vmx_eptp_writeback(void) { return vmx_capability.ept & VMX_EPTP_WB_BIT; } static inline bool cpu_has_vmx_ept_2m_page(void) { return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT; } static inline bool cpu_has_vmx_ept_1g_page(void) { return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; } static inline bool cpu_has_vmx_ept_4levels(void) { return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; } static inline bool cpu_has_vmx_ept_ad_bits(void) { return vmx_capability.ept & VMX_EPT_AD_BIT; } static inline bool cpu_has_vmx_invept_context(void) { return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT; } static inline bool cpu_has_vmx_invept_global(void) { return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; } static inline bool cpu_has_vmx_invvpid_single(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; } static inline bool cpu_has_vmx_invvpid_global(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; } static inline bool cpu_has_vmx_ept(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_EPT; } static inline bool cpu_has_vmx_unrestricted_guest(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_UNRESTRICTED_GUEST; } static inline bool cpu_has_vmx_ple(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PAUSE_LOOP_EXITING; } static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm) { return flexpriority_enabled && irqchip_in_kernel(kvm); } static inline bool cpu_has_vmx_vpid(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_VPID; } static inline bool cpu_has_vmx_rdtscp(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_RDTSCP; } static inline bool cpu_has_vmx_invpcid(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_INVPCID; } static inline bool cpu_has_virtual_nmis(void) { return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; } static inline bool cpu_has_vmx_wbinvd_exit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_WBINVD_EXITING; } static inline bool cpu_has_vmx_shadow_vmcs(void) { u64 vmx_msr; rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); /* check if the cpu supports writing r/o exit information fields */ if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) return false; return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_SHADOW_VMCS; } static inline bool report_flexpriority(void) { return flexpriority_enabled; } static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) { return vmcs12->cpu_based_vm_exec_control & bit; } static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) { return (vmcs12->cpu_based_vm_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && (vmcs12->secondary_vm_exec_control & bit); } static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; } static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER; } static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); } static inline bool is_exception(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); } static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification); static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification); static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; for (i = 0; i < vmx->nmsrs; ++i) if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) return i; return -1; } static inline void __invvpid(int ext, u16 vpid, gva_t gva) { struct { u64 vpid : 16; u64 rsvd : 48; u64 gva; } operand = { vpid, 0, gva }; asm volatile (__ex(ASM_VMX_INVVPID) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:" : : "a"(&operand), "c"(ext) : "cc", "memory"); } static inline void __invept(int ext, u64 eptp, gpa_t gpa) { struct { u64 eptp, gpa; } operand = {eptp, gpa}; asm volatile (__ex(ASM_VMX_INVEPT) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:\n" : : "a" (&operand), "c" (ext) : "cc", "memory"); } static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) { int i; i = __find_msr_index(vmx, msr); if (i >= 0) return &vmx->guest_msrs[i]; return NULL; } static void vmcs_clear(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", vmcs, phys_addr); } static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) { vmcs_clear(loaded_vmcs->vmcs); loaded_vmcs->cpu = -1; loaded_vmcs->launched = 0; } static void vmcs_load(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", vmcs, phys_addr); } #ifdef CONFIG_KEXEC /* * This bitmap is used to indicate whether the vmclear * operation is enabled on all cpus. All disabled by * default. */ static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; static inline void crash_enable_local_vmclear(int cpu) { cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); } static inline void crash_disable_local_vmclear(int cpu) { cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); } static inline int crash_local_vmclear_enabled(int cpu) { return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); } static void crash_vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; if (!crash_local_vmclear_enabled(cpu)) return; list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); } #else static inline void crash_enable_local_vmclear(int cpu) { } static inline void crash_disable_local_vmclear(int cpu) { } #endif /* CONFIG_KEXEC */ static void __loaded_vmcs_clear(void *arg) { struct loaded_vmcs *loaded_vmcs = arg; int cpu = raw_smp_processor_id(); if (loaded_vmcs->cpu != cpu) return; /* vcpu migration can race with cpu offline */ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) per_cpu(current_vmcs, cpu) = NULL; crash_disable_local_vmclear(cpu); list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); /* * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link * is before setting loaded_vmcs->vcpu to -1 which is done in * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist * then adds the vmcs into percpu list before it is deleted. */ smp_wmb(); loaded_vmcs_init(loaded_vmcs); crash_enable_local_vmclear(cpu); } static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) { int cpu = loaded_vmcs->cpu; if (cpu != -1) smp_call_function_single(cpu, __loaded_vmcs_clear, loaded_vmcs, 1); } static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx) { if (vmx->vpid == 0) return; if (cpu_has_vmx_invvpid_single()) __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0); } static inline void vpid_sync_vcpu_global(void) { if (cpu_has_vmx_invvpid_global()) __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); } static inline void vpid_sync_context(struct vcpu_vmx *vmx) { if (cpu_has_vmx_invvpid_single()) vpid_sync_vcpu_single(vmx); else vpid_sync_vcpu_global(); } static inline void ept_sync_global(void) { if (cpu_has_vmx_invept_global()) __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); } static inline void ept_sync_context(u64 eptp) { if (enable_ept) { if (cpu_has_vmx_invept_context()) __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); else ept_sync_global(); } } static __always_inline unsigned long vmcs_readl(unsigned long field) { unsigned long value; asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0") : "=a"(value) : "d"(field) : "cc"); return value; } static __always_inline u16 vmcs_read16(unsigned long field) { return vmcs_readl(field); } static __always_inline u32 vmcs_read32(unsigned long field) { return vmcs_readl(field); } static __always_inline u64 vmcs_read64(unsigned long field) { #ifdef CONFIG_X86_64 return vmcs_readl(field); #else return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); #endif } static noinline void vmwrite_error(unsigned long field, unsigned long value) { printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); dump_stack(); } static void vmcs_writel(unsigned long field, unsigned long value) { u8 error; asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" : "=q"(error) : "a"(value), "d"(field) : "cc"); if (unlikely(error)) vmwrite_error(field, value); } static void vmcs_write16(unsigned long field, u16 value) { vmcs_writel(field, value); } static void vmcs_write32(unsigned long field, u32 value) { vmcs_writel(field, value); } static void vmcs_write64(unsigned long field, u64 value) { vmcs_writel(field, value); #ifndef CONFIG_X86_64 asm volatile (""); vmcs_writel(field+1, value >> 32); #endif } static void vmcs_clear_bits(unsigned long field, u32 mask) { vmcs_writel(field, vmcs_readl(field) & ~mask); } static void vmcs_set_bits(unsigned long field, u32 mask) { vmcs_writel(field, vmcs_readl(field) | mask); } static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) { vmcs_write32(VM_ENTRY_CONTROLS, val); vmx->vm_entry_controls_shadow = val; } static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) { if (vmx->vm_entry_controls_shadow != val) vm_entry_controls_init(vmx, val); } static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) { return vmx->vm_entry_controls_shadow; } static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) { vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); } static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) { vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); } static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) { vmcs_write32(VM_EXIT_CONTROLS, val); vmx->vm_exit_controls_shadow = val; } static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) { if (vmx->vm_exit_controls_shadow != val) vm_exit_controls_init(vmx, val); } static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) { return vmx->vm_exit_controls_shadow; } static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) { vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); } static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) { vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); } static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) { vmx->segment_cache.bitmask = 0; } static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, unsigned field) { bool ret; u32 mask = 1 << (seg * SEG_FIELD_NR + field); if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); vmx->segment_cache.bitmask = 0; } ret = vmx->segment_cache.bitmask & mask; vmx->segment_cache.bitmask |= mask; return ret; } static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) { u16 *p = &vmx->segment_cache.seg[seg].selector; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); return *p; } static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) { ulong *p = &vmx->segment_cache.seg[seg].base; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); return *p; } static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) { u32 *p = &vmx->segment_cache.seg[seg].limit; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); return *p; } static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) { u32 *p = &vmx->segment_cache.seg[seg].ar; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); return *p; } static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | (1u << NM_VECTOR) | (1u << DB_VECTOR); if ((vcpu->guest_debug & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) eb |= 1u << BP_VECTOR; if (to_vmx(vcpu)->rmode.vm86_active) eb = ~0; if (enable_ept) eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ if (vcpu->fpu_active) eb &= ~(1u << NM_VECTOR); /* When we are running a nested L2 guest and L1 specified for it a * certain exception bitmap, we must trap the same exceptions and pass * them to L1. When running L2, we will only handle the exceptions * specified above if L1 did not want them. */ if (is_guest_mode(vcpu)) eb |= get_vmcs12(vcpu)->exception_bitmap; vmcs_write32(EXCEPTION_BITMAP, eb); } static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) { vm_entry_controls_clearbit(vmx, entry); vm_exit_controls_clearbit(vmx, exit); } static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) { unsigned i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); return; } break; } for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; if (i == m->nr) return; --m->nr; m->guest[i] = m->guest[m->nr]; m->host[i] = m->host[m->nr]; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit, unsigned long guest_val_vmcs, unsigned long host_val_vmcs, u64 guest_val, u64 host_val) { vmcs_write64(guest_val_vmcs, guest_val); vmcs_write64(host_val_vmcs, host_val); vm_entry_controls_setbit(vmx, entry); vm_exit_controls_setbit(vmx, exit); } static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val) { unsigned i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER, GUEST_IA32_EFER, HOST_IA32_EFER, guest_val, host_val); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, GUEST_IA32_PERF_GLOBAL_CTRL, HOST_IA32_PERF_GLOBAL_CTRL, guest_val, host_val); return; } break; } for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; if (i == NR_AUTOLOAD_MSRS) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; } else if (i == m->nr) { ++m->nr; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } m->guest[i].index = msr; m->guest[i].value = guest_val; m->host[i].index = msr; m->host[i].value = host_val; } static void reload_tss(void) { /* * VT restores TR but not its size. Useless. */ struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); struct desc_struct *descs; descs = (void *)gdt->address; descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ load_TR_desc(); } static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) { u64 guest_efer; u64 ignore_bits; guest_efer = vmx->vcpu.arch.efer; /* * NX is emulated; LMA and LME handled by hardware; SCE meaningless * outside long mode */ ignore_bits = EFER_NX | EFER_SCE; #ifdef CONFIG_X86_64 ignore_bits |= EFER_LMA | EFER_LME; /* SCE is meaningful only in long mode on Intel */ if (guest_efer & EFER_LMA) ignore_bits &= ~(u64)EFER_SCE; #endif guest_efer &= ~ignore_bits; guest_efer |= host_efer & ignore_bits; vmx->guest_msrs[efer_offset].data = guest_efer; vmx->guest_msrs[efer_offset].mask = ~ignore_bits; clear_atomic_switch_msr(vmx, MSR_EFER); /* On ept, can't emulate nx, and must switch nx atomically */ if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) { guest_efer = vmx->vcpu.arch.efer; if (!(guest_efer & EFER_LMA)) guest_efer &= ~EFER_LME; add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); return false; } return true; } static unsigned long segment_base(u16 selector) { struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); struct desc_struct *d; unsigned long table_base; unsigned long v; if (!(selector & ~3)) return 0; table_base = gdt->address; if (selector & 4) { /* from ldt */ u16 ldt_selector = kvm_read_ldt(); if (!(ldt_selector & ~3)) return 0; table_base = segment_base(ldt_selector); } d = (struct desc_struct *)(table_base + (selector & ~7)); v = get_desc_base(d); #ifdef CONFIG_X86_64 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11)) v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32; #endif return v; } static inline unsigned long kvm_read_tr_base(void) { u16 tr; asm("str %0" : "=g"(tr)); return segment_base(tr); } static void vmx_save_host_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int i; if (vmx->host_state.loaded) return; vmx->host_state.loaded = 1; /* * Set host fs and gs selectors. Unfortunately, 22.2.3 does not * allow segment selectors with cpl > 0 or ti == 1. */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; savesegment(fs, vmx->host_state.fs_sel); if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; } else { vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; } savesegment(gs, vmx->host_state.gs_sel); if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { vmcs_write16(HOST_GS_SELECTOR, 0); vmx->host_state.gs_ldt_reload_needed = 1; } #ifdef CONFIG_X86_64 savesegment(ds, vmx->host_state.ds_sel); savesegment(es, vmx->host_state.es_sel); #endif #ifdef CONFIG_X86_64 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); #else vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); #endif #ifdef CONFIG_X86_64 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); if (is_long_mode(&vmx->vcpu)) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (boot_cpu_has(X86_FEATURE_MPX)) rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); for (i = 0; i < vmx->save_nmsrs; ++i) kvm_set_shared_msr(vmx->guest_msrs[i].index, vmx->guest_msrs[i].data, vmx->guest_msrs[i].mask); } static void __vmx_load_host_state(struct vcpu_vmx *vmx) { if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); #ifdef CONFIG_X86_64 load_gs_index(vmx->host_state.gs_sel); #else loadsegment(gs, vmx->host_state.gs_sel); #endif } if (vmx->host_state.fs_reload_needed) loadsegment(fs, vmx->host_state.fs_sel); #ifdef CONFIG_X86_64 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { loadsegment(ds, vmx->host_state.ds_sel); loadsegment(es, vmx->host_state.es_sel); } #endif reload_tss(); #ifdef CONFIG_X86_64 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif if (vmx->host_state.msr_host_bndcfgs) wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); /* * If the FPU is not active (through the host task or * the guest vcpu), then restore the cr0.TS bit. */ if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) stts(); load_gdt(this_cpu_ptr(&host_gdt)); } static void vmx_load_host_state(struct vcpu_vmx *vmx) { preempt_disable(); __vmx_load_host_state(vmx); preempt_enable(); } /* * Switches to specified vcpu, until a matching vcpu_put(), but assumes * vcpu mutex is already taken. */ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); if (!vmm_exclusive) kvm_cpu_vmxon(phys_addr); else if (vmx->loaded_vmcs->cpu != cpu) loaded_vmcs_clear(vmx->loaded_vmcs); if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); } if (vmx->loaded_vmcs->cpu != cpu) { struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); unsigned long sysenter_esp; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); local_irq_disable(); crash_disable_local_vmclear(cpu); /* * Read loaded_vmcs->cpu should be before fetching * loaded_vmcs->loaded_vmcss_on_cpu_link. * See the comments in __loaded_vmcs_clear(). */ smp_rmb(); list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, &per_cpu(loaded_vmcss_on_cpu, cpu)); crash_enable_local_vmclear(cpu); local_irq_enable(); /* * Linux uses per-cpu TSS and GDT, so set these when switching * processors. */ vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */ rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ vmx->loaded_vmcs->cpu = cpu; } } static void vmx_vcpu_put(struct kvm_vcpu *vcpu) { __vmx_load_host_state(to_vmx(vcpu)); if (!vmm_exclusive) { __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); vcpu->cpu = -1; kvm_cpu_vmxoff(); } } static void vmx_fpu_activate(struct kvm_vcpu *vcpu) { ulong cr0; if (vcpu->fpu_active) return; vcpu->fpu_active = 1; cr0 = vmcs_readl(GUEST_CR0); cr0 &= ~(X86_CR0_TS | X86_CR0_MP); cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP); vmcs_writel(GUEST_CR0, cr0); update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; if (is_guest_mode(vcpu)) vcpu->arch.cr0_guest_owned_bits &= ~get_vmcs12(vcpu)->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); /* * Return the cr0 value that a nested guest would read. This is a combination * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by * its hypervisor (cr0_read_shadow). */ static inline unsigned long nested_read_cr0(struct vmcs12 *fields) { return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | (fields->cr0_read_shadow & fields->cr0_guest_host_mask); } static inline unsigned long nested_read_cr4(struct vmcs12 *fields) { return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | (fields->cr4_read_shadow & fields->cr4_guest_host_mask); } static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) { /* Note that there is no vcpu->fpu_active = 0 here. The caller must * set this *before* calling this function. */ vmx_decache_cr0_guest_bits(vcpu); vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP); update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = 0; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); if (is_guest_mode(vcpu)) { /* * L1's specified read shadow might not contain the TS bit, * so now that we turned on shadowing of this bit, we need to * set this bit of the shadow. Like in nested_vmx_run we need * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet * up-to-date here because we just decached cr0.TS (and we'll * only update vmcs12->guest_cr0 on nested exit). */ struct vmcs12 *vmcs12 = get_vmcs12(vcpu); vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) | (vcpu->arch.cr0 & X86_CR0_TS); vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); } else vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); } static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags, save_rflags; if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); rflags = vmcs_readl(GUEST_RFLAGS); if (to_vmx(vcpu)->rmode.vm86_active) { rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; save_rflags = to_vmx(vcpu)->rmode.save_rflags; rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; } to_vmx(vcpu)->rflags = rflags; } return to_vmx(vcpu)->rflags; } static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); to_vmx(vcpu)->rflags = rflags; if (to_vmx(vcpu)->rmode.vm86_active) { to_vmx(vcpu)->rmode.save_rflags = rflags; rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; } vmcs_writel(GUEST_RFLAGS, rflags); } static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) { u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); int ret = 0; if (interruptibility & GUEST_INTR_STATE_STI) ret |= KVM_X86_SHADOW_INT_STI; if (interruptibility & GUEST_INTR_STATE_MOV_SS) ret |= KVM_X86_SHADOW_INT_MOV_SS; return ret; } static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) { u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); u32 interruptibility = interruptibility_old; interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); if (mask & KVM_X86_SHADOW_INT_MOV_SS) interruptibility |= GUEST_INTR_STATE_MOV_SS; else if (mask & KVM_X86_SHADOW_INT_STI) interruptibility |= GUEST_INTR_STATE_STI; if ((interruptibility != interruptibility_old)) vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); } static void skip_emulated_instruction(struct kvm_vcpu *vcpu) { unsigned long rip; rip = kvm_rip_read(vcpu); rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_rip_write(vcpu, rip); /* skipping an emulated instruction also counts */ vmx_set_interrupt_shadow(vcpu, 0); } /* * KVM wants to inject page-faults which it got to the guest. This function * checks whether in a nested guest, we need to inject them to L1 or L2. */ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (!(vmcs12->exception_bitmap & (1u << nr))) return 0; nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; } static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code, bool reinject) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info = nr | INTR_INFO_VALID_MASK; if (!reinject && is_guest_mode(vcpu) && nested_vmx_check_exception(vcpu, nr)) return; if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; } if (vmx->rmode.vm86_active) { int inc_eip = 0; if (kvm_exception_is_soft(nr)) inc_eip = vcpu->arch.event_exit_inst_len; if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } if (kvm_exception_is_soft(nr)) { vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); intr_info |= INTR_TYPE_SOFT_EXCEPTION; } else intr_info |= INTR_TYPE_HARD_EXCEPTION; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); } static bool vmx_rdtscp_supported(void) { return cpu_has_vmx_rdtscp(); } static bool vmx_invpcid_supported(void) { return cpu_has_vmx_invpcid() && enable_ept; } /* * Swap MSR entry in host/guest MSR entry array. */ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) { struct shared_msr_entry tmp; tmp = vmx->guest_msrs[to]; vmx->guest_msrs[to] = vmx->guest_msrs[from]; vmx->guest_msrs[from] = tmp; } static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) { unsigned long *msr_bitmap; if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode_x2apic; else msr_bitmap = vmx_msr_bitmap_legacy_x2apic; } else { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode; else msr_bitmap = vmx_msr_bitmap_legacy; } vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); } /* * Set up the vmcs to automatically save and restore system * msrs. Don't touch the 64-bit msrs if the guest is in legacy * mode, as fiddling with msrs is very expensive. */ static void setup_msrs(struct vcpu_vmx *vmx) { int save_nmsrs, index; save_nmsrs = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) { index = __find_msr_index(vmx, MSR_SYSCALL_MASK); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_LSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_CSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_TSC_AUX); if (index >= 0 && vmx->rdtscp_enabled) move_msr_up(vmx, index, save_nmsrs++); /* * MSR_STAR is only needed on long mode guests, and only * if efer.sce is enabled. */ index = __find_msr_index(vmx, MSR_STAR); if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) move_msr_up(vmx, index, save_nmsrs++); } #endif index = __find_msr_index(vmx, MSR_EFER); if (index >= 0 && update_transition_efer(vmx, index)) move_msr_up(vmx, index, save_nmsrs++); vmx->save_nmsrs = save_nmsrs; if (cpu_has_vmx_msr_bitmap()) vmx_set_msr_bitmap(&vmx->vcpu); } /* * reads and returns guest's timestamp counter "register" * guest_tsc = host_tsc + tsc_offset -- 21.3 */ static u64 guest_read_tsc(void) { u64 host_tsc, tsc_offset; rdtscll(host_tsc); tsc_offset = vmcs_read64(TSC_OFFSET); return host_tsc + tsc_offset; } /* * Like guest_read_tsc, but always returns L1's notion of the timestamp * counter, even if a nested guest (L2) is currently running. */ static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) { u64 tsc_offset; tsc_offset = is_guest_mode(vcpu) ? to_vmx(vcpu)->nested.vmcs01_tsc_offset : vmcs_read64(TSC_OFFSET); return host_tsc + tsc_offset; } /* * Engage any workarounds for mis-matched TSC rates. Currently limited to * software catchup for faster rates on slower CPUs. */ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) { if (!scale) return; if (user_tsc_khz > tsc_khz) { vcpu->arch.tsc_catchup = 1; vcpu->arch.tsc_always_catchup = 1; } else WARN(1, "user requested TSC rate below hardware speed\n"); } static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu) { return vmcs_read64(TSC_OFFSET); } /* * writes 'offset' into guest's timestamp counter offset register */ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { if (is_guest_mode(vcpu)) { /* * We're here if L1 chose not to trap WRMSR to TSC. According * to the spec, this should set L1's TSC; The offset that L1 * set for L2 remains unchanged, and still needs to be added * to the newly set TSC to get L2's TSC. */ struct vmcs12 *vmcs12; to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset; /* recalculate vmcs02.TSC_OFFSET: */ vmcs12 = get_vmcs12(vcpu); vmcs_write64(TSC_OFFSET, offset + (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? vmcs12->tsc_offset : 0)); } else { trace_kvm_write_tsc_offset(vcpu->vcpu_id, vmcs_read64(TSC_OFFSET), offset); vmcs_write64(TSC_OFFSET, offset); } } static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host) { u64 offset = vmcs_read64(TSC_OFFSET); vmcs_write64(TSC_OFFSET, offset + adjustment); if (is_guest_mode(vcpu)) { /* Even when running L2, the adjustment needs to apply to L1 */ to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; } else trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset, offset + adjustment); } static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { return target_tsc - native_read_tsc(); } static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31))); } /* * nested_vmx_allowed() checks whether a guest should be allowed to use VMX * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for * all guests if the "nested" module option is off, and can also be disabled * for a single guest by disabling its VMX cpuid bit. */ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) { return nested && guest_cpuid_has_vmx(vcpu); } /* * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be * returned for the various VMX controls MSRs when nested VMX is enabled. * The same values should also be used to verify that vmcs12 control fields are * valid during nested entry from L1 to L2. * Each of these control msrs has a low and high 32-bit half: A low bit is on * if the corresponding bit in the (32-bit) control field *must* be on, and a * bit in the high half is on if the corresponding bit in the control field * may be on. See also vmx_control_verify(). * TODO: allow these variables to be modified (downgraded) by module options * or other means. */ static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high; static u32 nested_vmx_true_procbased_ctls_low; static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high; static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high; static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high; static u32 nested_vmx_true_exit_ctls_low; static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high; static u32 nested_vmx_true_entry_ctls_low; static u32 nested_vmx_misc_low, nested_vmx_misc_high; static u32 nested_vmx_ept_caps; static __init void nested_vmx_setup_ctls_msrs(void) { /* * Note that as a general rule, the high half of the MSRs (bits in * the control fields which may be 1) should be initialized by the * intersection of the underlying hardware's MSR (i.e., features which * can be supported) and the list of features we want to expose - * because they are known to be properly supported in our code. * Also, usually, the low half of the MSRs (bits which must be 1) can * be set to 0, meaning that L1 may turn off any of these bits. The * reason is that if one of these bits is necessary, it will appear * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control * fields of vmcs01 and vmcs02, will turn these bits off - and * nested_vmx_exit_handled() will not pass related exits to L1. * These rules have exceptions below. */ /* pin-based controls */ rdmsr(MSR_IA32_VMX_PINBASED_CTLS, nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high); nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | PIN_BASED_VMX_PREEMPTION_TIMER; /* exit controls */ rdmsr(MSR_IA32_VMX_EXIT_CTLS, nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high); nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; nested_vmx_exit_ctls_high &= #ifdef CONFIG_X86_64 VM_EXIT_HOST_ADDR_SPACE_SIZE | #endif VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; if (vmx_mpx_supported()) nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; /* We support free control of debug control saving. */ nested_vmx_true_exit_ctls_low = nested_vmx_exit_ctls_low & ~VM_EXIT_SAVE_DEBUG_CONTROLS; /* entry controls */ rdmsr(MSR_IA32_VMX_ENTRY_CTLS, nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high); nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; nested_vmx_entry_ctls_high &= #ifdef CONFIG_X86_64 VM_ENTRY_IA32E_MODE | #endif VM_ENTRY_LOAD_IA32_PAT; nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); if (vmx_mpx_supported()) nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; /* We support free control of debug control loading. */ nested_vmx_true_entry_ctls_low = nested_vmx_entry_ctls_low & ~VM_ENTRY_LOAD_DEBUG_CONTROLS; /* cpu-based controls */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; nested_vmx_procbased_ctls_high &= CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; /* * We can allow some features even when not supported by the * hardware. For example, L1 can specify an MSR bitmap - and we * can use it to avoid exits to L1 - even when L0 runs L2 * without MSR bitmaps. */ nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | CPU_BASED_USE_MSR_BITMAPS; /* We support free control of CR3 access interception. */ nested_vmx_true_procbased_ctls_low = nested_vmx_procbased_ctls_low & ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); /* secondary cpu-based controls */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high); nested_vmx_secondary_ctls_low = 0; nested_vmx_secondary_ctls_high &= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_WBINVD_EXITING; if (enable_ept) { /* nested EPT: emulate EPT also to L1 */ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | VMX_EPT_INVEPT_BIT; nested_vmx_ept_caps &= vmx_capability.ept; /* * For nested guests, we don't do anything specific * for single context invalidation. Hence, only advertise * support for global context invalidation. */ nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT; } else nested_vmx_ept_caps = 0; /* miscellaneous data */ rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high); nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA; nested_vmx_misc_low |= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | VMX_MISC_ACTIVITY_HLT; nested_vmx_misc_high = 0; } static inline bool vmx_control_verify(u32 control, u32 low, u32 high) { /* * Bits 0 in high must be 0, and bits 1 in low must be 1. */ return ((control & high) | low) == control; } static inline u64 vmx_control_msr(u32 low, u32 high) { return low | ((u64)high << 32); } /* Returns 0 on success, non-0 otherwise. */ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { switch (msr_index) { case MSR_IA32_VMX_BASIC: /* * This MSR reports some information about VMX support. We * should return information about the VMX we emulate for the * guest, and the VMCS structure we give it - not about the * VMX support of the underlying hardware. */ *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS | ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); break; case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_PINBASED_CTLS: *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high); break; case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *pdata = vmx_control_msr(nested_vmx_true_procbased_ctls_low, nested_vmx_procbased_ctls_high); break; case MSR_IA32_VMX_PROCBASED_CTLS: *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); break; case MSR_IA32_VMX_TRUE_EXIT_CTLS: *pdata = vmx_control_msr(nested_vmx_true_exit_ctls_low, nested_vmx_exit_ctls_high); break; case MSR_IA32_VMX_EXIT_CTLS: *pdata = vmx_control_msr(nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high); break; case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *pdata = vmx_control_msr(nested_vmx_true_entry_ctls_low, nested_vmx_entry_ctls_high); break; case MSR_IA32_VMX_ENTRY_CTLS: *pdata = vmx_control_msr(nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high); break; case MSR_IA32_VMX_MISC: *pdata = vmx_control_msr(nested_vmx_misc_low, nested_vmx_misc_high); break; /* * These MSRs specify bits which the guest must keep fixed (on or off) * while L1 is in VMXON mode (in L1's root mode, or running an L2). * We picked the standard core2 setting. */ #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) #define VMXON_CR4_ALWAYSON X86_CR4_VMXE case MSR_IA32_VMX_CR0_FIXED0: *pdata = VMXON_CR0_ALWAYSON; break; case MSR_IA32_VMX_CR0_FIXED1: *pdata = -1ULL; break; case MSR_IA32_VMX_CR4_FIXED0: *pdata = VMXON_CR4_ALWAYSON; break; case MSR_IA32_VMX_CR4_FIXED1: *pdata = -1ULL; break; case MSR_IA32_VMX_VMCS_ENUM: *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */ break; case MSR_IA32_VMX_PROCBASED_CTLS2: *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high); break; case MSR_IA32_VMX_EPT_VPID_CAP: /* Currently, no nested vpid support */ *pdata = nested_vmx_ept_caps; break; default: return 1; } return 0; } /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { u64 data; struct shared_msr_entry *msr; if (!pdata) { printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); return -EINVAL; } switch (msr_index) { #ifdef CONFIG_X86_64 case MSR_FS_BASE: data = vmcs_readl(GUEST_FS_BASE); break; case MSR_GS_BASE: data = vmcs_readl(GUEST_GS_BASE); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(to_vmx(vcpu)); data = to_vmx(vcpu)->msr_guest_kernel_gs_base; break; #endif case MSR_EFER: return kvm_get_msr_common(vcpu, msr_index, pdata); case MSR_IA32_TSC: data = guest_read_tsc(); break; case MSR_IA32_SYSENTER_CS: data = vmcs_read32(GUEST_SYSENTER_CS); break; case MSR_IA32_SYSENTER_EIP: data = vmcs_readl(GUEST_SYSENTER_EIP); break; case MSR_IA32_SYSENTER_ESP: data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: if (!vmx_mpx_supported()) return 1; data = vmcs_read64(GUEST_BNDCFGS); break; case MSR_IA32_FEATURE_CONTROL: if (!nested_vmx_allowed(vcpu)) return 1; data = to_vmx(vcpu)->nested.msr_ia32_feature_control; break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!nested_vmx_allowed(vcpu)) return 1; return vmx_get_vmx_msr(vcpu, msr_index, pdata); case MSR_TSC_AUX: if (!to_vmx(vcpu)->rdtscp_enabled) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(to_vmx(vcpu), msr_index); if (msr) { data = msr->data; break; } return kvm_get_msr_common(vcpu, msr_index, pdata); } *pdata = data; return 0; } static void vmx_leave_nested(struct kvm_vcpu *vcpu); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr; int ret = 0; u32 msr_index = msr_info->index; u64 data = msr_info->data; switch (msr_index) { case MSR_EFER: ret = kvm_set_msr_common(vcpu, msr_info); break; #ifdef CONFIG_X86_64 case MSR_FS_BASE: vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_FS_BASE, data); break; case MSR_GS_BASE: vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_GS_BASE, data); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(vmx); vmx->msr_guest_kernel_gs_base = data; break; #endif case MSR_IA32_SYSENTER_CS: vmcs_write32(GUEST_SYSENTER_CS, data); break; case MSR_IA32_SYSENTER_EIP: vmcs_writel(GUEST_SYSENTER_EIP, data); break; case MSR_IA32_SYSENTER_ESP: vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: if (!vmx_mpx_supported()) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr_info); break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) return 1; vmcs_write64(GUEST_IA32_PAT, data); vcpu->arch.pat = data; break; } ret = kvm_set_msr_common(vcpu, msr_info); break; case MSR_IA32_TSC_ADJUST: ret = kvm_set_msr_common(vcpu, msr_info); break; case MSR_IA32_FEATURE_CONTROL: if (!nested_vmx_allowed(vcpu) || (to_vmx(vcpu)->nested.msr_ia32_feature_control & FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) return 1; vmx->nested.msr_ia32_feature_control = data; if (msr_info->host_initiated && data == 0) vmx_leave_nested(vcpu); break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: return 1; /* they are read-only */ case MSR_TSC_AUX: if (!vmx->rdtscp_enabled) return 1; /* Check reserved bit, higher 32 bits should be zero */ if ((data >> 32) != 0) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(vmx, msr_index); if (msr) { msr->data = data; if (msr - vmx->guest_msrs < vmx->save_nmsrs) { preempt_disable(); kvm_set_shared_msr(msr->index, msr->data, msr->mask); preempt_enable(); } break; } ret = kvm_set_msr_common(vcpu, msr_info); } return ret; } static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); switch (reg) { case VCPU_REGS_RSP: vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); break; case VCPU_REGS_RIP: vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); break; case VCPU_EXREG_PDPTR: if (enable_ept) ept_save_pdptrs(vcpu); break; default: break; } } static __init int cpu_has_kvm_support(void) { return cpu_has_vmx(); } static __init int vmx_disabled_by_bios(void) { u64 msr; rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); if (msr & FEATURE_CONTROL_LOCKED) { /* launched w/ TXT and VMX disabled */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) && tboot_enabled()) return 1; /* launched w/o TXT and VMX only enabled w/ TXT */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) && !tboot_enabled()) { printk(KERN_WARNING "kvm: disable TXT in the BIOS or " "activate TXT before enabling KVM\n"); return 1; } /* launched w/o TXT and VMX disabled */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) && !tboot_enabled()) return 1; } return 0; } static void kvm_cpu_vmxon(u64 addr) { asm volatile (ASM_VMX_VMXON_RAX : : "a"(&addr), "m"(addr) : "memory", "cc"); } static int hardware_enable(void) { int cpu = raw_smp_processor_id(); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); u64 old, test_bits; if (read_cr4() & X86_CR4_VMXE) return -EBUSY; INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); /* * Now we can enable the vmclear operation in kdump * since the loaded_vmcss_on_cpu list on this cpu * has been initialized. * * Though the cpu is not in VMX operation now, there * is no problem to enable the vmclear operation * for the loaded_vmcss_on_cpu list is empty! */ crash_enable_local_vmclear(cpu); rdmsrl(MSR_IA32_FEATURE_CONTROL, old); test_bits = FEATURE_CONTROL_LOCKED; test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; if (tboot_enabled()) test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; if ((old & test_bits) != test_bits) { /* enable and lock */ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); } write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ if (vmm_exclusive) { kvm_cpu_vmxon(phys_addr); ept_sync_global(); } native_store_gdt(this_cpu_ptr(&host_gdt)); return 0; } static void vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v, *n; list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) __loaded_vmcs_clear(v); } /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() * tricks. */ static void kvm_cpu_vmxoff(void) { asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); } static void hardware_disable(void) { if (vmm_exclusive) { vmclear_local_loaded_vmcss(); kvm_cpu_vmxoff(); } write_cr4(read_cr4() & ~X86_CR4_VMXE); } static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result) { u32 vmx_msr_low, vmx_msr_high; u32 ctl = ctl_min | ctl_opt; rdmsr(msr, vmx_msr_low, vmx_msr_high); ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ /* Ensure minimum (required) set of control bits are supported. */ if (ctl_min & ~ctl) return -EIO; *result = ctl; return 0; } static __init bool allow_1_setting(u32 msr, u32 ctl) { u32 vmx_msr_low, vmx_msr_high; rdmsr(msr, vmx_msr_low, vmx_msr_high); return vmx_msr_high & ctl; } static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) { u32 vmx_msr_low, vmx_msr_high; u32 min, opt, min2, opt2; u32 _pin_based_exec_control = 0; u32 _cpu_based_exec_control = 0; u32 _cpu_based_2nd_exec_control = 0; u32 _vmexit_control = 0; u32 _vmentry_control = 0; min = CPU_BASED_HLT_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_MWAIT_EXITING | CPU_BASED_MONITOR_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_RDPMC_EXITING; opt = CPU_BASED_TPR_SHADOW | CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, &_cpu_based_exec_control) < 0) return -EIO; #ifdef CONFIG_X86_64 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & ~CPU_BASED_CR8_STORE_EXITING; #endif if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { min2 = 0; opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_WBINVD_EXITING | SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_PAUSE_LOOP_EXITING | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_SHADOW_VMCS; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) return -EIO; } #ifndef CONFIG_X86_64 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; #endif if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_2nd_exec_control &= ~( SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { /* CR3 accesses and invlpg don't need to cause VM Exits when EPT enabled */ _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_INVLPG_EXITING); rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, vmx_capability.ept, vmx_capability.vpid); } min = VM_EXIT_SAVE_DEBUG_CONTROLS; #ifdef CONFIG_X86_64 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; #endif opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_CLEAR_BNDCFGS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, &_vmexit_control) < 0) return -EIO; min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, &_pin_based_exec_control) < 0) return -EIO; if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) || !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT)) _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; min = VM_ENTRY_LOAD_DEBUG_CONTROLS; opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, &_vmentry_control) < 0) return -EIO; rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) return -EIO; #ifdef CONFIG_X86_64 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ if (vmx_msr_high & (1u<<16)) return -EIO; #endif /* Require Write-Back (WB) memory type for VMCS accesses. */ if (((vmx_msr_high >> 18) & 15) != 6) return -EIO; vmcs_conf->size = vmx_msr_high & 0x1fff; vmcs_conf->order = get_order(vmcs_config.size); vmcs_conf->revision_id = vmx_msr_low; vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; vmcs_conf->vmexit_ctrl = _vmexit_control; vmcs_conf->vmentry_ctrl = _vmentry_control; cpu_has_load_ia32_efer = allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, VM_ENTRY_LOAD_IA32_EFER) && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_LOAD_IA32_EFER); cpu_has_load_perf_global_ctrl = allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); /* * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL * but due to arrata below it can't be used. Workaround is to use * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. * * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] * * AAK155 (model 26) * AAP115 (model 30) * AAT100 (model 37) * BC86,AAY89,BD102 (model 44) * BA97 (model 46) * */ if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) { switch (boot_cpu_data.x86_model) { case 26: case 30: case 37: case 44: case 46: cpu_has_load_perf_global_ctrl = false; printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " "does not work properly. Using workaround\n"); break; default: break; } } return 0; } static struct vmcs *alloc_vmcs_cpu(int cpu) { int node = cpu_to_node(cpu); struct page *pages; struct vmcs *vmcs; pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order); if (!pages) return NULL; vmcs = page_address(pages); memset(vmcs, 0, vmcs_config.size); vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ return vmcs; } static struct vmcs *alloc_vmcs(void) { return alloc_vmcs_cpu(raw_smp_processor_id()); } static void free_vmcs(struct vmcs *vmcs) { free_pages((unsigned long)vmcs, vmcs_config.order); } /* * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded */ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) { if (!loaded_vmcs->vmcs) return; loaded_vmcs_clear(loaded_vmcs); free_vmcs(loaded_vmcs->vmcs); loaded_vmcs->vmcs = NULL; } static void free_kvm_area(void) { int cpu; for_each_possible_cpu(cpu) { free_vmcs(per_cpu(vmxarea, cpu)); per_cpu(vmxarea, cpu) = NULL; } } static void init_vmcs_shadow_fields(void) { int i, j; /* No checks for read only fields yet */ for (i = j = 0; i < max_shadow_read_write_fields; i++) { switch (shadow_read_write_fields[i]) { case GUEST_BNDCFGS: if (!vmx_mpx_supported()) continue; break; default: break; } if (j < i) shadow_read_write_fields[j] = shadow_read_write_fields[i]; j++; } max_shadow_read_write_fields = j; /* shadowed fields guest access without vmexit */ for (i = 0; i < max_shadow_read_write_fields; i++) { clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap); clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap); } for (i = 0; i < max_shadow_read_only_fields; i++) clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap); } static __init int alloc_kvm_area(void) { int cpu; for_each_possible_cpu(cpu) { struct vmcs *vmcs; vmcs = alloc_vmcs_cpu(cpu); if (!vmcs) { free_kvm_area(); return -ENOMEM; } per_cpu(vmxarea, cpu) = vmcs; } return 0; } static __init int hardware_setup(void) { if (setup_vmcs_config(&vmcs_config) < 0) return -EIO; if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); if (!cpu_has_vmx_vpid()) enable_vpid = 0; if (!cpu_has_vmx_shadow_vmcs()) enable_shadow_vmcs = 0; if (enable_shadow_vmcs) init_vmcs_shadow_fields(); if (!cpu_has_vmx_ept() || !cpu_has_vmx_ept_4levels()) { enable_ept = 0; enable_unrestricted_guest = 0; enable_ept_ad_bits = 0; } if (!cpu_has_vmx_ept_ad_bits()) enable_ept_ad_bits = 0; if (!cpu_has_vmx_unrestricted_guest()) enable_unrestricted_guest = 0; if (!cpu_has_vmx_flexpriority()) { flexpriority_enabled = 0; /* * set_apic_access_page_addr() is used to reload apic access * page upon invalidation. No need to do anything if the * processor does not have the APIC_ACCESS_ADDR VMCS field. */ kvm_x86_ops->set_apic_access_page_addr = NULL; } if (!cpu_has_vmx_tpr_shadow()) kvm_x86_ops->update_cr8_intercept = NULL; if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); if (!cpu_has_vmx_ple()) ple_gap = 0; if (!cpu_has_vmx_apicv()) enable_apicv = 0; if (enable_apicv) kvm_x86_ops->update_cr8_intercept = NULL; else { kvm_x86_ops->hwapic_irr_update = NULL; kvm_x86_ops->deliver_posted_interrupt = NULL; kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy; } if (nested) nested_vmx_setup_ctls_msrs(); return alloc_kvm_area(); } static __exit void hardware_unsetup(void) { free_kvm_area(); } static bool emulation_required(struct kvm_vcpu *vcpu) { return emulate_invalid_guest_state && !guest_state_valid(vcpu); } static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save) { if (!emulate_invalid_guest_state) { /* * CS and SS RPL should be equal during guest entry according * to VMX spec, but in reality it is not always so. Since vcpu * is in the middle of the transition from real mode to * protected mode it is safe to assume that RPL 0 is a good * default value. */ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) save->selector &= ~SELECTOR_RPL_MASK; save->dpl = save->selector & SELECTOR_RPL_MASK; save->s = 1; } vmx_set_segment(vcpu, save, seg); } static void enter_pmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Update real mode segment cache. It may be not up-to-date if sement * register was written while vcpu was in a guest mode. */ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->rmode.vm86_active = 0; vmx_segment_cache_clear(vmx); vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); flags = vmcs_readl(GUEST_RFLAGS); flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); update_exception_bitmap(vcpu); fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); } static void fix_rmode_seg(int seg, struct kvm_segment *save) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; struct kvm_segment var = *save; var.dpl = 0x3; if (seg == VCPU_SREG_CS) var.type = 0x3; if (!emulate_invalid_guest_state) { var.selector = var.base >> 4; var.base = var.base & 0xffff0; var.limit = 0xffff; var.g = 0; var.db = 0; var.present = 1; var.s = 1; var.l = 0; var.unusable = 0; var.type = 0x3; var.avl = 0; if (save->base & 0xf) printk_once(KERN_WARNING "kvm: segment base is not " "paragraph aligned when entering " "protected mode (seg=%d)", seg); } vmcs_write16(sf->selector, var.selector); vmcs_write32(sf->base, var.base); vmcs_write32(sf->limit, var.limit); vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); } static void enter_rmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->rmode.vm86_active = 1; /* * Very old userspace does not call KVM_SET_TSS_ADDR before entering * vcpu. Warn the user that an update is overdue. */ if (!vcpu->kvm->arch.tss_addr) printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " "called before entering vcpu\n"); vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); flags = vmcs_readl(GUEST_RFLAGS); vmx->rmode.save_rflags = flags; flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); update_exception_bitmap(vcpu); fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); kvm_mmu_reset_context(vcpu); } static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); if (!msr) return; /* * Force kernel_gs_base reloading before EFER changes, as control * of this msr depends on is_long_mode(). */ vmx_load_host_state(to_vmx(vcpu)); vcpu->arch.efer = efer; if (efer & EFER_LMA) { vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer; } else { vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer & ~EFER_LME; } setup_msrs(vmx); } #ifdef CONFIG_X86_64 static void enter_lmode(struct kvm_vcpu *vcpu) { u32 guest_tr_ar; vmx_segment_cache_clear(to_vmx(vcpu)); guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { pr_debug_ratelimited("%s: tss fixup for long mode. \n", __func__); vmcs_write32(GUEST_TR_AR_BYTES, (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS); } vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); } static void exit_lmode(struct kvm_vcpu *vcpu) { vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); } #endif static void vmx_flush_tlb(struct kvm_vcpu *vcpu) { vpid_sync_context(to_vmx(vcpu)); if (enable_ept) { if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); } } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; vcpu->arch.cr0 &= ~cr0_guest_owned_bits; vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; } static void vmx_decache_cr3(struct kvm_vcpu *vcpu) { if (enable_ept && is_paging(vcpu)) vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); } static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; vcpu->arch.cr4 &= ~cr4_guest_owned_bits; vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; } static void ept_load_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty)) return; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); } } static void ept_save_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); } __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); } static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | (CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } else if (!is_paging(vcpu)) { /* From nonpaging to paging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } if (!(cr0 & X86_CR0_WP)) *hw_cr0 &= ~X86_CR0_WP; } static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long hw_cr0; hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK); if (enable_unrestricted_guest) hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; else { hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) enter_pmode(vcpu); if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) enter_rmode(vcpu); } #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) enter_lmode(vcpu); if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) exit_lmode(vcpu); } #endif if (enable_ept) ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); if (!vcpu->fpu_active) hw_cr0 |= X86_CR0_TS | X86_CR0_MP; vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(GUEST_CR0, hw_cr0); vcpu->arch.cr0 = cr0; /* depends on vcpu->arch.cr0 to be set to a new value */ vmx->emulation_required = emulation_required(vcpu); } static u64 construct_eptp(unsigned long root_hpa) { u64 eptp; /* TODO write the value reading from MSR */ eptp = VMX_EPT_DEFAULT_MT | VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; if (enable_ept_ad_bits) eptp |= VMX_EPT_AD_ENABLE_BIT; eptp |= (root_hpa & PAGE_MASK); return eptp; } static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { unsigned long guest_cr3; u64 eptp; guest_cr3 = cr3; if (enable_ept) { eptp = construct_eptp(cr3); vmcs_write64(EPT_POINTER, eptp); if (is_paging(vcpu) || is_guest_mode(vcpu)) guest_cr3 = kvm_read_cr3(vcpu); else guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; ept_load_pdptrs(vcpu); } vmx_flush_tlb(vcpu); vmcs_writel(GUEST_CR3, guest_cr3); } static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ? KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); if (cr4 & X86_CR4_VMXE) { /* * To use VMXON (and later other VMX instructions), a guest * must first be able to turn on cr4.VMXE (see handle_vmon()). * So basically the check on whether to allow nested VMX * is here. */ if (!nested_vmx_allowed(vcpu)) return 1; } if (to_vmx(vcpu)->nested.vmxon && ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) return 1; vcpu->arch.cr4 = cr4; if (enable_ept) { if (!is_paging(vcpu)) { hw_cr4 &= ~X86_CR4_PAE; hw_cr4 |= X86_CR4_PSE; /* * SMEP/SMAP is disabled if CPU is in non-paging mode * in hardware. However KVM always uses paging mode to * emulate guest non-paging mode with TDP. * To emulate this behavior, SMEP/SMAP needs to be * manually disabled when guest switches to non-paging * mode. */ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP); } else if (!(cr4 & X86_CR4_PAE)) { hw_cr4 &= ~X86_CR4_PAE; } } vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, hw_cr4); return 0; } static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 ar; if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { *var = vmx->rmode.segs[seg]; if (seg == VCPU_SREG_TR || var->selector == vmx_read_guest_seg_selector(vmx, seg)) return; var->base = vmx_read_guest_seg_base(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); return; } var->base = vmx_read_guest_seg_base(vmx, seg); var->limit = vmx_read_guest_seg_limit(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); ar = vmx_read_guest_seg_ar(vmx, seg); var->unusable = (ar >> 16) & 1; var->type = ar & 15; var->s = (ar >> 4) & 1; var->dpl = (ar >> 5) & 3; /* * Some userspaces do not preserve unusable property. Since usable * segment has to be present according to VMX spec we can use present * property to amend userspace bug by making unusable segment always * nonpresent. vmx_segment_access_rights() already marks nonpresent * segment as unusable. */ var->present = !var->unusable; var->avl = (ar >> 12) & 1; var->l = (ar >> 13) & 1; var->db = (ar >> 14) & 1; var->g = (ar >> 15) & 1; } static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment s; if (to_vmx(vcpu)->rmode.vm86_active) { vmx_get_segment(vcpu, &s, seg); return s.base; } return vmx_read_guest_seg_base(to_vmx(vcpu), seg); } static int vmx_get_cpl(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (unlikely(vmx->rmode.vm86_active)) return 0; else { int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); return AR_DPL(ar); } } static u32 vmx_segment_access_rights(struct kvm_segment *var) { u32 ar; if (var->unusable || !var->present) ar = 1 << 16; else { ar = var->type & 15; ar |= (var->s & 1) << 4; ar |= (var->dpl & 3) << 5; ar |= (var->present & 1) << 7; ar |= (var->avl & 1) << 12; ar |= (var->l & 1) << 13; ar |= (var->db & 1) << 14; ar |= (var->g & 1) << 15; } return ar; } static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_vmx *vmx = to_vmx(vcpu); const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; vmx_segment_cache_clear(vmx); if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { vmx->rmode.segs[seg] = *var; if (seg == VCPU_SREG_TR) vmcs_write16(sf->selector, var->selector); else if (var->s) fix_rmode_seg(seg, &vmx->rmode.segs[seg]); goto out; } vmcs_writel(sf->base, var->base); vmcs_write32(sf->limit, var->limit); vmcs_write16(sf->selector, var->selector); /* * Fix the "Accessed" bit in AR field of segment registers for older * qemu binaries. * IA32 arch specifies that at the time of processor reset the * "Accessed" bit in the AR field of segment registers is 1. And qemu * is setting it to 0 in the userland code. This causes invalid guest * state vmexit when "unrestricted guest" mode is turned on. * Fix for this setup issue in cpu_reset is being pushed in the qemu * tree. Newer qemu binaries with that qemu fix would not need this * kvm hack. */ if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) var->type |= 0x1; /* Accessed */ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); out: vmx->emulation_required = emulation_required(vcpu); } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); *db = (ar >> 14) & 1; *l = (ar >> 13) & 1; } static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { dt->size = vmcs_read32(GUEST_IDTR_LIMIT); dt->address = vmcs_readl(GUEST_IDTR_BASE); } static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { vmcs_write32(GUEST_IDTR_LIMIT, dt->size); vmcs_writel(GUEST_IDTR_BASE, dt->address); } static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { dt->size = vmcs_read32(GUEST_GDTR_LIMIT); dt->address = vmcs_readl(GUEST_GDTR_BASE); } static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { vmcs_write32(GUEST_GDTR_LIMIT, dt->size); vmcs_writel(GUEST_GDTR_BASE, dt->address); } static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; u32 ar; vmx_get_segment(vcpu, &var, seg); var.dpl = 0x3; if (seg == VCPU_SREG_CS) var.type = 0x3; ar = vmx_segment_access_rights(&var); if (var.base != (var.selector << 4)) return false; if (var.limit != 0xffff) return false; if (ar != 0xf3) return false; return true; } static bool code_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment cs; unsigned int cs_rpl; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); cs_rpl = cs.selector & SELECTOR_RPL_MASK; if (cs.unusable) return false; if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) return false; if (!cs.s) return false; if (cs.type & AR_TYPE_WRITEABLE_MASK) { if (cs.dpl > cs_rpl) return false; } else { if (cs.dpl != cs_rpl) return false; } if (!cs.present) return false; /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ return true; } static bool stack_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ss; unsigned int ss_rpl; vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); ss_rpl = ss.selector & SELECTOR_RPL_MASK; if (ss.unusable) return true; if (ss.type != 3 && ss.type != 7) return false; if (!ss.s) return false; if (ss.dpl != ss_rpl) /* DPL != RPL */ return false; if (!ss.present) return false; return true; } static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; unsigned int rpl; vmx_get_segment(vcpu, &var, seg); rpl = var.selector & SELECTOR_RPL_MASK; if (var.unusable) return true; if (!var.s) return false; if (!var.present) return false; if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) { if (var.dpl < rpl) /* DPL < RPL */ return false; } /* TODO: Add other members to kvm_segment_field to allow checking for other access * rights flags */ return true; } static bool tr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment tr; vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); if (tr.unusable) return false; if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ return false; if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ return false; if (!tr.present) return false; return true; } static bool ldtr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ldtr; vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); if (ldtr.unusable) return true; if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ return false; if (ldtr.type != 2) return false; if (!ldtr.present) return false; return true; } static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) { struct kvm_segment cs, ss; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); return ((cs.selector & SELECTOR_RPL_MASK) == (ss.selector & SELECTOR_RPL_MASK)); } /* * Check if guest state is valid. Returns true if valid, false if * not. * We assume that registers are always usable */ static bool guest_state_valid(struct kvm_vcpu *vcpu) { if (enable_unrestricted_guest) return true; /* real mode guest state checks */ if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) return false; } else { /* protected mode guest state checks */ if (!cs_ss_rpl_check(vcpu)) return false; if (!code_segment_valid(vcpu)) return false; if (!stack_segment_valid(vcpu)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_GS)) return false; if (!tr_valid(vcpu)) return false; if (!ldtr_valid(vcpu)) return false; } /* TODO: * - Add checks on RIP * - Add checks on RFLAGS */ return true; } static int init_rmode_tss(struct kvm *kvm) { gfn_t fn; u16 data = 0; int idx, r; idx = srcu_read_lock(&kvm->srcu); fn = kvm->arch.tss_addr >> PAGE_SHIFT; r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; r = kvm_write_guest_page(kvm, fn++, &data, TSS_IOPB_BASE_OFFSET, sizeof(u16)); if (r < 0) goto out; r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); if (r < 0) goto out; r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = ~0; r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, sizeof(u8)); out: srcu_read_unlock(&kvm->srcu, idx); return r; } static int init_rmode_identity_map(struct kvm *kvm) { int i, idx, r = 0; pfn_t identity_map_pfn; u32 tmp; if (!enable_ept) return 0; /* Protect kvm->arch.ept_identity_pagetable_done. */ mutex_lock(&kvm->slots_lock); if (likely(kvm->arch.ept_identity_pagetable_done)) goto out2; identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; r = alloc_identity_pagetable(kvm); if (r < 0) goto out2; idx = srcu_read_lock(&kvm->srcu); r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); if (r < 0) goto out; /* Set up identity-mapping pagetable for EPT in real mode */ for (i = 0; i < PT32_ENT_PER_PAGE; i++) { tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); r = kvm_write_guest_page(kvm, identity_map_pfn, &tmp, i * sizeof(tmp), sizeof(tmp)); if (r < 0) goto out; } kvm->arch.ept_identity_pagetable_done = true; out: srcu_read_unlock(&kvm->srcu, idx); out2: mutex_unlock(&kvm->slots_lock); return r; } static void seg_setup(int seg) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; unsigned int ar; vmcs_write16(sf->selector, 0); vmcs_writel(sf->base, 0); vmcs_write32(sf->limit, 0xffff); ar = 0x93; if (seg == VCPU_SREG_CS) ar |= 0x08; /* code segment */ vmcs_write32(sf->ar_bytes, ar); } static int alloc_apic_access_page(struct kvm *kvm) { struct page *page; struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; mutex_lock(&kvm->slots_lock); if (kvm->arch.apic_access_page_done) goto out; kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; kvm_userspace_mem.flags = 0; kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE; kvm_userspace_mem.memory_size = PAGE_SIZE; r = __kvm_set_memory_region(kvm, &kvm_userspace_mem); if (r) goto out; page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); if (is_error_page(page)) { r = -EFAULT; goto out; } /* * Do not pin the page in memory, so that memory hot-unplug * is able to migrate it. */ put_page(page); kvm->arch.apic_access_page_done = true; out: mutex_unlock(&kvm->slots_lock); return r; } static int alloc_identity_pagetable(struct kvm *kvm) { /* Called with kvm->slots_lock held. */ struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; BUG_ON(kvm->arch.ept_identity_pagetable_done); kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; kvm_userspace_mem.flags = 0; kvm_userspace_mem.guest_phys_addr = kvm->arch.ept_identity_map_addr; kvm_userspace_mem.memory_size = PAGE_SIZE; r = __kvm_set_memory_region(kvm, &kvm_userspace_mem); return r; } static void allocate_vpid(struct vcpu_vmx *vmx) { int vpid; vmx->vpid = 0; if (!enable_vpid) return; spin_lock(&vmx_vpid_lock); vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); if (vpid < VMX_NR_VPIDS) { vmx->vpid = vpid; __set_bit(vpid, vmx_vpid_bitmap); } spin_unlock(&vmx_vpid_lock); } static void free_vpid(struct vcpu_vmx *vmx) { if (!enable_vpid) return; spin_lock(&vmx_vpid_lock); if (vmx->vpid != 0) __clear_bit(vmx->vpid, vmx_vpid_bitmap); spin_unlock(&vmx_vpid_lock); } #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) return; /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R) /* read-low */ __clear_bit(msr, msr_bitmap + 0x000 / f); if (type & MSR_TYPE_W) /* write-low */ __clear_bit(msr, msr_bitmap + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R) /* read-high */ __clear_bit(msr, msr_bitmap + 0x400 / f); if (type & MSR_TYPE_W) /* write-high */ __clear_bit(msr, msr_bitmap + 0xc00 / f); } } static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) return; /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R) /* read-low */ __set_bit(msr, msr_bitmap + 0x000 / f); if (type & MSR_TYPE_W) /* write-low */ __set_bit(msr, msr_bitmap + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R) /* read-high */ __set_bit(msr, msr_bitmap + 0x400 / f); if (type & MSR_TYPE_W) /* write-high */ __set_bit(msr, msr_bitmap + 0xc00 / f); } } static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) { if (!longmode_only) __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr, MSR_TYPE_R | MSR_TYPE_W); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr, MSR_TYPE_R | MSR_TYPE_W); } static void vmx_enable_intercept_msr_read_x2apic(u32 msr) { __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, MSR_TYPE_R); __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, MSR_TYPE_R); } static void vmx_disable_intercept_msr_read_x2apic(u32 msr) { __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, MSR_TYPE_R); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, MSR_TYPE_R); } static void vmx_disable_intercept_msr_write_x2apic(u32 msr) { __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, MSR_TYPE_W); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, MSR_TYPE_W); } static int vmx_vm_has_apicv(struct kvm *kvm) { return enable_apicv && irqchip_in_kernel(kvm); } /* * Send interrupt to vcpu via posted interrupt way. * 1. If target vcpu is running(non-root mode), send posted interrupt * notification to vcpu and hardware will sync PIR to vIRR atomically. * 2. If target vcpu isn't running(root mode), kick it to pick up the * interrupt from PIR in next vmentry. */ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { struct vcpu_vmx *vmx = to_vmx(vcpu); int r; if (pi_test_and_set_pir(vector, &vmx->pi_desc)) return; r = pi_test_and_set_on(&vmx->pi_desc); kvm_make_request(KVM_REQ_EVENT, vcpu); #ifdef CONFIG_SMP if (!r && (vcpu->mode == IN_GUEST_MODE)) apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), POSTED_INTR_VECTOR); else #endif kvm_vcpu_kick(vcpu); } static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (!pi_test_and_clear_on(&vmx->pi_desc)) return; kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); } static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu) { return; } /* * Set up the vmcs's constant host-state fields, i.e., host-state fields that * will not change in the lifetime of the guest. * Note that host-state that does change is set elsewhere. E.g., host-state * that is set differently for each CPU is set in vmx_vcpu_load(), not here. */ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) { u32 low32, high32; unsigned long tmpl; struct desc_ptr dt; unsigned long cr4; vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ /* Save the most likely value for this task's CR4 in the VMCS. */ cr4 = read_cr4(); vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ vmx->host_state.vmcs_host_cr4 = cr4; vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ #ifdef CONFIG_X86_64 /* * Load null selectors, so we can avoid reloading them in * __vmx_load_host_state(), in case userspace uses the null selectors * too (the expected case). */ vmcs_write16(HOST_DS_SELECTOR, 0); vmcs_write16(HOST_ES_SELECTOR, 0); #else vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #endif vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ native_store_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ vmx->host_idt_base = dt.address; vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { rdmsr(MSR_IA32_CR_PAT, low32, high32); vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); } } static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) { vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; if (enable_ept) vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; if (is_guest_mode(&vmx->vcpu)) vmx->vcpu.arch.cr4_guest_owned_bits &= ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); } static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) { u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; return pin_based_exec_ctrl; } static u32 vmx_exec_control(struct vcpu_vmx *vmx) { u32 exec_control = vmcs_config.cpu_based_exec_ctrl; if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) exec_control &= ~CPU_BASED_MOV_DR_EXITING; if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { exec_control &= ~CPU_BASED_TPR_SHADOW; #ifdef CONFIG_X86_64 exec_control |= CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING; #endif } if (!enable_ept) exec_control |= CPU_BASED_CR3_STORE_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_INVLPG_EXITING; return exec_control; } static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) { u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if (vmx->vpid == 0) exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; if (!enable_ept) { exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; enable_unrestricted_guest = 0; /* Enable INVPCID for non-ept guests may cause performance regression. */ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; } if (!enable_unrestricted_guest) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; if (!ple_gap) exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD (handle_vmptrld). We can NOT enable shadow_vmcs here because we don't have yet a current VMCS12 */ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; return exec_control; } static void ept_set_mmio_spte_mask(void) { /* * EPT Misconfigurations can be generated if the value of bits 2:0 * of an EPT paging-structure entry is 110b (write/execute). * Also, magic bits (0x3ull << 62) is set to quickly identify mmio * spte. */ kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull); } /* * Sets up the vmcs for emulated real mode. */ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { #ifdef CONFIG_X86_64 unsigned long a; #endif int i; /* I/O */ vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); if (enable_shadow_vmcs) { vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); } if (cpu_has_vmx_msr_bitmap()) vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ /* Control */ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); if (cpu_has_secondary_exec_ctrls()) { vmcs_write32(SECONDARY_VM_EXEC_CONTROL, vmx_secondary_exec_control(vmx)); } if (vmx_vm_has_apicv(vmx->vcpu.kvm)) { vmcs_write64(EOI_EXIT_BITMAP0, 0); vmcs_write64(EOI_EXIT_BITMAP1, 0); vmcs_write64(EOI_EXIT_BITMAP2, 0); vmcs_write64(EOI_EXIT_BITMAP3, 0); vmcs_write16(GUEST_INTR_STATUS, 0); vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR); vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); } if (ple_gap) { vmcs_write32(PLE_GAP, ple_gap); vmx->ple_window = ple_window; vmx->ple_window_dirty = true; } vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmx_set_constant_host_state(vmx); #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ rdmsrl(MSR_GS_BASE, a); vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ #else vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ #endif vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { u32 msr_low, msr_high; u64 host_pat; rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); host_pat = msr_low | ((u64) msr_high << 32); /* Write the default value follow host pat */ vmcs_write64(GUEST_IA32_PAT, host_pat); /* Keep arch.pat sync with GUEST_IA32_PAT */ vmx->vcpu.arch.pat = host_pat; } for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { u32 index = vmx_msr_index[i]; u32 data_low, data_high; int j = vmx->nmsrs; if (rdmsr_safe(index, &data_low, &data_high) < 0) continue; if (wrmsr_safe(index, data_low, data_high) < 0) continue; vmx->guest_msrs[j].index = i; vmx->guest_msrs[j].data = 0; vmx->guest_msrs[j].mask = -1ull; ++vmx->nmsrs; } vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); /* 22.2.1, 20.8.1 */ vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); set_cr4_guest_host_mask(vmx); return 0; } static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct msr_data apic_base_msr; vmx->rmode.vm86_active = 0; vmx->soft_vnmi_blocked = 0; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(&vmx->vcpu, 0); apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_bsp(&vmx->vcpu)) apic_base_msr.data |= MSR_IA32_APICBASE_BSP; apic_base_msr.host_initiated = true; kvm_set_apic_base(&vmx->vcpu, &apic_base_msr); vmx_segment_cache_clear(vmx); seg_setup(VCPU_SREG_CS); vmcs_write16(GUEST_CS_SELECTOR, 0xf000); vmcs_write32(GUEST_CS_BASE, 0xffff0000); seg_setup(VCPU_SREG_DS); seg_setup(VCPU_SREG_ES); seg_setup(VCPU_SREG_FS); seg_setup(VCPU_SREG_GS); seg_setup(VCPU_SREG_SS); vmcs_write16(GUEST_TR_SELECTOR, 0); vmcs_writel(GUEST_TR_BASE, 0); vmcs_write32(GUEST_TR_LIMIT, 0xffff); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); vmcs_write16(GUEST_LDTR_SELECTOR, 0); vmcs_writel(GUEST_LDTR_BASE, 0); vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); vmcs_write32(GUEST_SYSENTER_CS, 0); vmcs_writel(GUEST_SYSENTER_ESP, 0); vmcs_writel(GUEST_SYSENTER_EIP, 0); vmcs_writel(GUEST_RFLAGS, 0x02); kvm_rip_write(vcpu, 0xfff0); vmcs_writel(GUEST_GDTR_BASE, 0); vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); vmcs_writel(GUEST_IDTR_BASE, 0); vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); /* Special registers */ vmcs_write64(GUEST_IA32_DEBUGCTL, 0); setup_msrs(vmx); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ if (cpu_has_vmx_tpr_shadow()) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); if (vm_need_tpr_shadow(vmx->vcpu.kvm)) vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, __pa(vmx->vcpu.arch.apic->regs)); vmcs_write32(TPR_THRESHOLD, 0); } kvm_vcpu_reload_apic_access_page(vcpu); if (vmx_vm_has_apicv(vcpu->kvm)) memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); if (vmx->vpid != 0) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ vmx_set_cr4(&vmx->vcpu, 0); vmx_set_efer(&vmx->vcpu, 0); vmx_fpu_activate(&vmx->vcpu); update_exception_bitmap(&vmx->vcpu); vpid_sync_context(vmx); } /* * In nested virtualization, check if L1 asked to exit on external interrupts. * For most existing hypervisors, this will always return true. */ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->pin_based_vm_exec_control & PIN_BASED_EXT_INTR_MASK; } /* * In nested virtualization, check if L1 has set * VM_EXIT_ACK_INTR_ON_EXIT */ static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_ACK_INTR_ON_EXIT; } static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING; } static void enable_irq_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); } static void enable_nmi_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; if (!cpu_has_virtual_nmis() || vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { enable_irq_window(vcpu); return; } cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); } static void vmx_inject_irq(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); uint32_t intr; int irq = vcpu->arch.interrupt.nr; trace_kvm_inj_virq(irq); ++vcpu->stat.irq_injections; if (vmx->rmode.vm86_active) { int inc_eip = 0; if (vcpu->arch.interrupt.soft) inc_eip = vcpu->arch.event_exit_inst_len; if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } intr = irq | INTR_INFO_VALID_MASK; if (vcpu->arch.interrupt.soft) { intr |= INTR_TYPE_SOFT_INTR; vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); } else intr |= INTR_TYPE_EXT_INTR; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); } static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (is_guest_mode(vcpu)) return; if (!cpu_has_virtual_nmis()) { /* * Tracking the NMI-blocked state in software is built upon * finding the next open IRQ window. This, in turn, depends on * well-behaving guests: They have to keep IRQs disabled at * least as long as the NMI handler runs. Otherwise we may * cause NMI nesting, maybe breaking the guest. But as this is * highly unlikely, we can live with the residual risk. */ vmx->soft_vnmi_blocked = 1; vmx->vnmi_blocked_time = 0; } ++vcpu->stat.nmi_injections; vmx->nmi_known_unmasked = false; if (vmx->rmode.vm86_active) { if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); } static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) { if (!cpu_has_virtual_nmis()) return to_vmx(vcpu)->soft_vnmi_blocked; if (to_vmx(vcpu)->nmi_known_unmasked) return false; return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; } static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (!cpu_has_virtual_nmis()) { if (vmx->soft_vnmi_blocked != masked) { vmx->soft_vnmi_blocked = masked; vmx->vnmi_blocked_time = 0; } } else { vmx->nmi_known_unmasked = !masked; if (masked) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); else vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); } } static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) { if (to_vmx(vcpu)->nested.nested_run_pending) return 0; if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) return 0; return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | GUEST_INTR_STATE_NMI)); } static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) { return (!to_vmx(vcpu)->nested.nested_run_pending && vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); } static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { int ret; struct kvm_userspace_memory_region tss_mem = { .slot = TSS_PRIVATE_MEMSLOT, .guest_phys_addr = addr, .memory_size = PAGE_SIZE * 3, .flags = 0, }; ret = kvm_set_memory_region(kvm, &tss_mem); if (ret) return ret; kvm->arch.tss_addr = addr; return init_rmode_tss(kvm); } static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) { switch (vec) { case BP_VECTOR: /* * Update instruction length as we may reinject the exception * from user space while in guest debugging mode. */ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; /* fall through */ case DB_VECTOR: if (vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) return false; /* fall through */ case DE_VECTOR: case OF_VECTOR: case BR_VECTOR: case UD_VECTOR: case DF_VECTOR: case SS_VECTOR: case GP_VECTOR: case MF_VECTOR: return true; break; } return false; } static int handle_rmode_exception(struct kvm_vcpu *vcpu, int vec, u32 err_code) { /* * Instruction with address size override prefix opcode 0x67 * Cause the #SS fault with 0 error code in VM86 mode. */ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; return kvm_emulate_halt(vcpu); } return 1; } return 0; } /* * Forward all other exceptions that are valid in real mode. * FIXME: Breaks guest debugging in real mode, needs to be fixed with * the required debugging infrastructure rework. */ kvm_queue_exception(vcpu, vec); return 1; } /* * Trigger machine check on the host. We assume all the MSRs are already set up * by the CPU and that we still run on the same CPU as the MCE occurred on. * We pass a fake environment to the machine check handler because we want * the guest to be always treated like user space, no matter what context * it used internally. */ static void kvm_machine_check(void) { #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) struct pt_regs regs = { .cs = 3, /* Fake ring 3 no matter what the guest ran on */ .flags = X86_EFLAGS_IF, }; do_machine_check(&regs, 0); #endif } static int handle_machine_check(struct kvm_vcpu *vcpu) { /* already handled by vcpu_run */ return 1; } static int handle_exception(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_run *kvm_run = vcpu->run; u32 intr_info, ex_no, error_code; unsigned long cr2, rip, dr6; u32 vect_info; enum emulation_result er; vect_info = vmx->idt_vectoring_info; intr_info = vmx->exit_intr_info; if (is_machine_check(intr_info)) return handle_machine_check(vcpu); if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) return 1; /* already handled by vmx_vcpu_run() */ if (is_no_device(intr_info)) { vmx_fpu_activate(vcpu); return 1; } if (is_invalid_opcode(intr_info)) { er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; } error_code = 0; if (intr_info & INTR_INFO_DELIVER_CODE_MASK) error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); /* * The #PF with PFEC.RSVD = 1 indicates the guest is accessing * MMIO, it is better to report an internal error. * See the comments in vmx_handle_exit. */ if ((vect_info & VECTORING_INFO_VALID_MASK) && !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = vect_info; vcpu->run->internal.data[1] = intr_info; return 0; } if (is_page_fault(intr_info)) { /* EPT won't cause page fault directly */ BUG_ON(enable_ept); cr2 = vmcs_readl(EXIT_QUALIFICATION); trace_kvm_page_fault(cr2, error_code); if (kvm_event_needs_reinjection(vcpu)) kvm_mmu_unprotect_page_virt(vcpu, cr2); return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0); } ex_no = intr_info & INTR_INFO_VECTOR_MASK; if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) return handle_rmode_exception(vcpu, ex_no, error_code); switch (ex_no) { case DB_VECTOR: dr6 = vmcs_readl(EXIT_QUALIFICATION); if (!(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; if (!(dr6 & ~DR6_RESERVED)) /* icebp */ skip_emulated_instruction(vcpu); kvm_queue_exception(vcpu, DB_VECTOR); return 1; } kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); /* fall through */ case BP_VECTOR: /* * Update instruction length as we may reinject #BP from * user space while in guest debugging mode. Reading it for * #DB as well causes no harm, it is not used in that case. */ vmx->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_run->exit_reason = KVM_EXIT_DEBUG; rip = kvm_rip_read(vcpu); kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; kvm_run->debug.arch.exception = ex_no; break; default: kvm_run->exit_reason = KVM_EXIT_EXCEPTION; kvm_run->ex.exception = ex_no; kvm_run->ex.error_code = error_code; break; } return 0; } static int handle_external_interrupt(struct kvm_vcpu *vcpu) { ++vcpu->stat.irq_exits; return 1; } static int handle_triple_fault(struct kvm_vcpu *vcpu) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } static int handle_io(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int size, in, string; unsigned port; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); string = (exit_qualification & 16) != 0; in = (exit_qualification & 8) != 0; ++vcpu->stat.io_exits; if (string || in) return emulate_instruction(vcpu, 0) == EMULATE_DONE; port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; skip_emulated_instruction(vcpu); return kvm_fast_pio_out(vcpu, size, port); } static void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) { /* * Patch in the VMCALL instruction: */ hypercall[0] = 0x0f; hypercall[1] = 0x01; hypercall[2] = 0xc1; } static bool nested_cr0_valid(struct vmcs12 *vmcs12, unsigned long val) { unsigned long always_on = VMXON_CR0_ALWAYSON; if (nested_vmx_secondary_ctls_high & SECONDARY_EXEC_UNRESTRICTED_GUEST && nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) always_on &= ~(X86_CR0_PE | X86_CR0_PG); return (val & always_on) == always_on; } /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned long orig_val = val; /* * We get here when L2 changed cr0 in a way that did not change * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), * but did change L0 shadowed bits. So we first calculate the * effective cr0 value that L1 would like to write into the * hardware. It consists of the L2-owned bits from the new * value combined with the L1-owned bits from L1's guest_cr0. */ val = (val & ~vmcs12->cr0_guest_host_mask) | (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); if (!nested_cr0_valid(vmcs12, val)) return 1; if (kvm_set_cr0(vcpu, val)) return 1; vmcs_writel(CR0_READ_SHADOW, orig_val); return 0; } else { if (to_vmx(vcpu)->nested.vmxon && ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)) return 1; return kvm_set_cr0(vcpu, val); } } static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned long orig_val = val; /* analogously to handle_set_cr0 */ val = (val & ~vmcs12->cr4_guest_host_mask) | (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); if (kvm_set_cr4(vcpu, val)) return 1; vmcs_writel(CR4_READ_SHADOW, orig_val); return 0; } else return kvm_set_cr4(vcpu, val); } /* called to set cr0 as approriate for clts instruction exit. */ static void handle_clts(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) { /* * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS * but we did (!fpu_active). We need to keep GUEST_CR0.TS on, * just pretend it's off (also in arch.cr0 for fpu_activate). */ vmcs_writel(CR0_READ_SHADOW, vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS); vcpu->arch.cr0 &= ~X86_CR0_TS; } else vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); } static int handle_cr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification, val; int cr; int reg; int err; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); cr = exit_qualification & 15; reg = (exit_qualification >> 8) & 15; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ val = kvm_register_readl(vcpu, reg); trace_kvm_cr_write(cr, val); switch (cr) { case 0: err = handle_set_cr0(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 3: err = kvm_set_cr3(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 4: err = handle_set_cr4(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 8: { u8 cr8_prev = kvm_get_cr8(vcpu); u8 cr8 = (u8)val; err = kvm_set_cr8(vcpu, cr8); kvm_complete_insn_gp(vcpu, err); if (irqchip_in_kernel(vcpu->kvm)) return 1; if (cr8_prev <= cr8) return 1; vcpu->run->exit_reason = KVM_EXIT_SET_TPR; return 0; } } break; case 2: /* clts */ handle_clts(vcpu); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); skip_emulated_instruction(vcpu); vmx_fpu_activate(vcpu); return 1; case 1: /*mov from cr*/ switch (cr) { case 3: val = kvm_read_cr3(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); skip_emulated_instruction(vcpu); return 1; case 8: val = kvm_get_cr8(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); skip_emulated_instruction(vcpu); return 1; } break; case 3: /* lmsw */ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); kvm_lmsw(vcpu, val); skip_emulated_instruction(vcpu); return 1; default: break; } vcpu->run->exit_reason = 0; vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", (int)(exit_qualification >> 4) & 3, cr); return 0; } static int handle_dr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int dr, reg; /* Do not handle if the CPL > 0, will trigger GP on re-entry */ if (!kvm_require_cpl(vcpu, 0)) return 1; dr = vmcs_readl(GUEST_DR7); if (dr & DR7_GD) { /* * As the vm-exit takes precedence over the debug trap, we * need to emulate the latter, either for the host or the * guest debugging itself. */ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; vcpu->run->debug.arch.dr7 = dr; vcpu->run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + vmcs_readl(GUEST_RIP); vcpu->run->debug.arch.exception = DB_VECTOR; vcpu->run->exit_reason = KVM_EXIT_DEBUG; return 0; } else { vcpu->arch.dr7 &= ~DR7_GD; vcpu->arch.dr6 |= DR6_BD | DR6_RTM; vmcs_writel(GUEST_DR7, vcpu->arch.dr7); kvm_queue_exception(vcpu, DB_VECTOR); return 1; } } if (vcpu->guest_debug == 0) { u32 cpu_based_vm_exec_control; cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control &= ~CPU_BASED_MOV_DR_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); /* * No more DR vmexits; force a reload of the debug registers * and reenter on this instruction. The next vmexit will * retrieve the full state of the debug registers. */ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; return 1; } exit_qualification = vmcs_readl(EXIT_QUALIFICATION); dr = exit_qualification & DEBUG_REG_ACCESS_NUM; reg = DEBUG_REG_ACCESS_REG(exit_qualification); if (exit_qualification & TYPE_MOV_FROM_DR) { unsigned long val; if (kvm_get_dr(vcpu, dr, &val)) return 1; kvm_register_write(vcpu, reg, val); } else if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) return 1; skip_emulated_instruction(vcpu); return 1; } static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) { return vcpu->arch.dr6; } static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) { } static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; get_debugreg(vcpu->arch.db[0], 0); get_debugreg(vcpu->arch.db[1], 1); get_debugreg(vcpu->arch.db[2], 2); get_debugreg(vcpu->arch.db[3], 3); get_debugreg(vcpu->arch.dr6, 6); vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); } static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) { vmcs_writel(GUEST_DR7, val); } static int handle_cpuid(struct kvm_vcpu *vcpu) { kvm_emulate_cpuid(vcpu); return 1; } static int handle_rdmsr(struct kvm_vcpu *vcpu) { u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u64 data; if (vmx_get_msr(vcpu, ecx, &data)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(vcpu, 0); return 1; } trace_kvm_msr_read(ecx, data); /* FIXME: handling of bits 32:63 of rax, rdx */ vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; skip_emulated_instruction(vcpu); return 1; } static int handle_wrmsr(struct kvm_vcpu *vcpu) { struct msr_data msr; u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); msr.data = data; msr.index = ecx; msr.host_initiated = false; if (vmx_set_msr(vcpu, &msr) != 0) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(vcpu, 0); return 1; } trace_kvm_msr_write(ecx, data); skip_emulated_instruction(vcpu); return 1; } static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) { kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; } static int handle_interrupt_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; /* clear pending irq */ cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); kvm_make_request(KVM_REQ_EVENT, vcpu); ++vcpu->stat.irq_window_exits; /* * If the user space waits to inject interrupts, exit as soon as * possible */ if (!irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window && !kvm_cpu_has_interrupt(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; return 0; } return 1; } static int handle_halt(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); return kvm_emulate_halt(vcpu); } static int handle_vmcall(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); kvm_emulate_hypercall(vcpu); return 1; } static int handle_invd(struct kvm_vcpu *vcpu) { return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_invlpg(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); kvm_mmu_invlpg(vcpu, exit_qualification); skip_emulated_instruction(vcpu); return 1; } static int handle_rdpmc(struct kvm_vcpu *vcpu) { int err; err = kvm_rdpmc(vcpu); kvm_complete_insn_gp(vcpu, err); return 1; } static int handle_wbinvd(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); kvm_emulate_wbinvd(vcpu); return 1; } static int handle_xsetbv(struct kvm_vcpu *vcpu) { u64 new_bv = kvm_read_edx_eax(vcpu); u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); if (kvm_set_xcr(vcpu, index, new_bv) == 0) skip_emulated_instruction(vcpu); return 1; } static int handle_apic_access(struct kvm_vcpu *vcpu) { if (likely(fasteoi)) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int access_type, offset; access_type = exit_qualification & APIC_ACCESS_TYPE; offset = exit_qualification & APIC_ACCESS_OFFSET; /* * Sane guest uses MOV to write EOI, with written value * not cared. So make a short-circuit here by avoiding * heavy instruction emulation. */ if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && (offset == APIC_EOI)) { kvm_lapic_set_eoi(vcpu); skip_emulated_instruction(vcpu); return 1; } } return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int vector = exit_qualification & 0xff; /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ kvm_apic_set_eoi_accelerated(vcpu, vector); return 1; } static int handle_apic_write(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 offset = exit_qualification & 0xfff; /* APIC-write VM exit is trap-like and thus no need to adjust IP */ kvm_apic_write_nodecode(vcpu, offset); return 1; } static int handle_task_switch(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qualification; bool has_error_code = false; u32 error_code = 0; u16 tss_selector; int reason, type, idt_v, idt_index; idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); reason = (u32)exit_qualification >> 30; if (reason == TASK_SWITCH_GATE && idt_v) { switch (type) { case INTR_TYPE_NMI_INTR: vcpu->arch.nmi_injected = false; vmx_set_nmi_mask(vcpu, true); break; case INTR_TYPE_EXT_INTR: case INTR_TYPE_SOFT_INTR: kvm_clear_interrupt_queue(vcpu); break; case INTR_TYPE_HARD_EXCEPTION: if (vmx->idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { has_error_code = true; error_code = vmcs_read32(IDT_VECTORING_ERROR_CODE); } /* fall through */ case INTR_TYPE_SOFT_EXCEPTION: kvm_clear_exception_queue(vcpu); break; default: break; } } tss_selector = exit_qualification; if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && type != INTR_TYPE_EXT_INTR && type != INTR_TYPE_NMI_INTR)) skip_emulated_instruction(vcpu); if (kvm_task_switch(vcpu, tss_selector, type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, has_error_code, error_code) == EMULATE_FAIL) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; return 0; } /* clear all local breakpoint enable flags */ vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~0x55); /* * TODO: What about debug traps on tss switch? * Are we supposed to inject them and update dr6? */ return 1; } static int handle_ept_violation(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; gpa_t gpa; u32 error_code; int gla_validity; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); gla_validity = (exit_qualification >> 7) & 0x3; if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) { printk(KERN_ERR "EPT: Handling EPT violation failed!\n"); printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), vmcs_readl(GUEST_LINEAR_ADDRESS)); printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", (long unsigned int)exit_qualification); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; return 0; } /* * EPT violation happened while executing iret from NMI, * "blocked by NMI" bit has to be set before next VM entry. * There are errata that may cause this bit to not be set: * AAK134, BY25. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); trace_kvm_page_fault(gpa, exit_qualification); /* It is a write fault? */ error_code = exit_qualification & (1U << 1); /* It is a fetch fault? */ error_code |= (exit_qualification & (1U << 2)) << 2; /* ept page table is present? */ error_code |= (exit_qualification >> 3) & 0x1; vcpu->arch.exit_qualification = exit_qualification; return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); } static u64 ept_rsvd_mask(u64 spte, int level) { int i; u64 mask = 0; for (i = 51; i > boot_cpu_data.x86_phys_bits; i--) mask |= (1ULL << i); if (level == 4) /* bits 7:3 reserved */ mask |= 0xf8; else if (spte & (1ULL << 7)) /* * 1GB/2MB page, bits 29:12 or 20:12 reserved respectively, * level == 1 if the hypervisor is using the ignored bit 7. */ mask |= (PAGE_SIZE << ((level - 1) * 9)) - PAGE_SIZE; else if (level > 1) /* bits 6:3 reserved */ mask |= 0x78; return mask; } static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, int level) { printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level); /* 010b (write-only) */ WARN_ON((spte & 0x7) == 0x2); /* 110b (write/execute) */ WARN_ON((spte & 0x7) == 0x6); /* 100b (execute-only) and value not supported by logical processor */ if (!cpu_has_vmx_ept_execute_only()) WARN_ON((spte & 0x7) == 0x4); /* not 000b */ if ((spte & 0x7)) { u64 rsvd_bits = spte & ept_rsvd_mask(spte, level); if (rsvd_bits != 0) { printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n", __func__, rsvd_bits); WARN_ON(1); } /* bits 5:3 are _not_ reserved for large page or leaf page */ if ((rsvd_bits & 0x38) == 0) { u64 ept_mem_type = (spte & 0x38) >> 3; if (ept_mem_type == 2 || ept_mem_type == 3 || ept_mem_type == 7) { printk(KERN_ERR "%s: ept_mem_type=0x%llx\n", __func__, ept_mem_type); WARN_ON(1); } } } } static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { u64 sptes[4]; int nr_sptes, i, ret; gpa_t gpa; gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); if (!kvm_io_bus_write(vcpu->kvm, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { skip_emulated_instruction(vcpu); return 1; } ret = handle_mmio_page_fault_common(vcpu, gpa, true); if (likely(ret == RET_MMIO_PF_EMULATE)) return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == EMULATE_DONE; if (unlikely(ret == RET_MMIO_PF_INVALID)) return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0); if (unlikely(ret == RET_MMIO_PF_RETRY)) return 1; /* It is the real ept misconfig */ printk(KERN_ERR "EPT: Misconfiguration.\n"); printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa); nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes); for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; return 0; } static int handle_nmi_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; /* clear pending NMI */ cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); ++vcpu->stat.nmi_window_exits; kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; } static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); enum emulation_result err = EMULATE_DONE; int ret = 1; u32 cpu_exec_ctrl; bool intr_window_requested; unsigned count = 130; cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; while (vmx->emulation_required && count-- != 0) { if (intr_window_requested && vmx_interrupt_allowed(vcpu)) return handle_interrupt_window(&vmx->vcpu); if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) return 1; err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); if (err == EMULATE_USER_EXIT) { ++vcpu->stat.mmio_exits; ret = 0; goto out; } if (err != EMULATE_DONE) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; return 0; } if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; ret = kvm_emulate_halt(vcpu); goto out; } if (signal_pending(current)) goto out; if (need_resched()) schedule(); } out: return ret; } static int __grow_ple_window(int val) { if (ple_window_grow < 1) return ple_window; val = min(val, ple_window_actual_max); if (ple_window_grow < ple_window) val *= ple_window_grow; else val += ple_window_grow; return val; } static int __shrink_ple_window(int val, int modifier, int minimum) { if (modifier < 1) return ple_window; if (modifier < ple_window) val /= modifier; else val -= modifier; return max(val, minimum); } static void grow_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int old = vmx->ple_window; vmx->ple_window = __grow_ple_window(old); if (vmx->ple_window != old) vmx->ple_window_dirty = true; trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); } static void shrink_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int old = vmx->ple_window; vmx->ple_window = __shrink_ple_window(old, ple_window_shrink, ple_window); if (vmx->ple_window != old) vmx->ple_window_dirty = true; trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); } /* * ple_window_actual_max is computed to be one grow_ple_window() below * ple_window_max. (See __grow_ple_window for the reason.) * This prevents overflows, because ple_window_max is int. * ple_window_max effectively rounded down to a multiple of ple_window_grow in * this process. * ple_window_max is also prevented from setting vmx->ple_window < ple_window. */ static void update_ple_window_actual_max(void) { ple_window_actual_max = __shrink_ple_window(max(ple_window_max, ple_window), ple_window_grow, INT_MIN); } /* * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE * exiting, so only get here on cpu with PAUSE-Loop-Exiting. */ static int handle_pause(struct kvm_vcpu *vcpu) { if (ple_gap) grow_ple_window(vcpu); skip_emulated_instruction(vcpu); kvm_vcpu_on_spin(vcpu); return 1; } static int handle_nop(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); return 1; } static int handle_mwait(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); return handle_nop(vcpu); } static int handle_monitor(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); return handle_nop(vcpu); } /* * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12. * We could reuse a single VMCS for all the L2 guests, but we also want the * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this * allows keeping them loaded on the processor, and in the future will allow * optimizations where prepare_vmcs02 doesn't need to set all the fields on * every entry if they never change. * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first. * * The following functions allocate and free a vmcs02 in this pool. */ /* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) { struct vmcs02_list *item; list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) if (item->vmptr == vmx->nested.current_vmptr) { list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; } if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { /* Recycle the least recently used VMCS. */ item = list_entry(vmx->nested.vmcs02_pool.prev, struct vmcs02_list, list); item->vmptr = vmx->nested.current_vmptr; list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; } /* Create a new VMCS */ item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); if (!item) return NULL; item->vmcs02.vmcs = alloc_vmcs(); if (!item->vmcs02.vmcs) { kfree(item); return NULL; } loaded_vmcs_init(&item->vmcs02); item->vmptr = vmx->nested.current_vmptr; list_add(&(item->list), &(vmx->nested.vmcs02_pool)); vmx->nested.vmcs02_num++; return &item->vmcs02; } /* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) { struct vmcs02_list *item; list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) if (item->vmptr == vmptr) { free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); vmx->nested.vmcs02_num--; return; } } /* * Free all VMCSs saved for this vcpu, except the one pointed by * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs * must be &vmx->vmcs01. */ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) { struct vmcs02_list *item, *n; WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { /* * Something will leak if the above WARN triggers. Better than * a use-after-free. */ if (vmx->loaded_vmcs == &item->vmcs02) continue; free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); vmx->nested.vmcs02_num--; } } /* * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), * set the success or error code of an emulated VMX instruction, as specified * by Vol 2B, VMX Instruction Reference, "Conventions". */ static void nested_vmx_succeed(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); } static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_CF); } static void nested_vmx_failValid(struct kvm_vcpu *vcpu, u32 vm_instruction_error) { if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { /* * failValid writes the error number to the current VMCS, which * can't be done there isn't a current VMCS. */ nested_vmx_failInvalid(vcpu); return; } vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_ZF); get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; /* * We don't need to force a shadow sync because * VM_INSTRUCTION_ERROR is not shadowed */ } static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) { struct vcpu_vmx *vmx = container_of(timer, struct vcpu_vmx, nested.preemption_timer); vmx->nested.preemption_timer_expired = true; kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); kvm_vcpu_kick(&vmx->vcpu); return HRTIMER_NORESTART; } /* * Decode the memory-address operand of a vmx instruction, as recorded on an * exit caused by such an instruction (run by a guest hypervisor). * On success, returns 0. When the operand is invalid, returns 1 and throws * #UD or #GP. */ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, gva_t *ret) { /* * According to Vol. 3B, "Information for VM Exits Due to Instruction * Execution", on an exit, vmx_instruction_info holds most of the * addressing components of the operand. Only the displacement part * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). * For how an actual address is calculated from all these components, * refer to Vol. 1, "Operand Addressing". */ int scaling = vmx_instruction_info & 3; int addr_size = (vmx_instruction_info >> 7) & 7; bool is_reg = vmx_instruction_info & (1u << 10); int seg_reg = (vmx_instruction_info >> 15) & 7; int index_reg = (vmx_instruction_info >> 18) & 0xf; bool index_is_valid = !(vmx_instruction_info & (1u << 22)); int base_reg = (vmx_instruction_info >> 23) & 0xf; bool base_is_valid = !(vmx_instruction_info & (1u << 27)); if (is_reg) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } /* Addr = segment_base + offset */ /* offset = base + [index * scale] + displacement */ *ret = vmx_get_segment_base(vcpu, seg_reg); if (base_is_valid) *ret += kvm_register_read(vcpu, base_reg); if (index_is_valid) *ret += kvm_register_read(vcpu, index_reg)<<scaling; *ret += exit_qualification; /* holds the displacement */ if (addr_size == 1) /* 32 bit */ *ret &= 0xffffffff; /* * TODO: throw #GP (and return 1) in various cases that the VM* * instructions require it - e.g., offset beyond segment limit, * unusable or unreadable/unwritable segment, non-canonical 64-bit * address, and so on. Currently these are not checked. */ return 0; } /* * This function performs the various checks including * - if it's 4KB aligned * - No bits beyond the physical address width are set * - Returns 0 on success or else 1 * (Intel SDM Section 30.3) */ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, gpa_t *vmpointer) { gva_t gva; gpa_t vmptr; struct x86_exception e; struct page *page; struct vcpu_vmx *vmx = to_vmx(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu); if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmcs_read32(VMX_INSTRUCTION_INFO), &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, sizeof(vmptr), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (exit_reason) { case EXIT_REASON_VMON: /* * SDM 3: 24.11.5 * The first 4 bytes of VMXON region contain the supported * VMCS revision identifier * * Note - IA32_VMX_BASIC[48] will never be 1 * for the nested case; * which replaces physical address width with 32 * */ if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failInvalid(vcpu); skip_emulated_instruction(vcpu); return 1; } page = nested_get_page(vcpu, vmptr); if (page == NULL || *(u32 *)kmap(page) != VMCS12_REVISION) { nested_vmx_failInvalid(vcpu); kunmap(page); skip_emulated_instruction(vcpu); return 1; } kunmap(page); vmx->nested.vmxon_ptr = vmptr; break; case EXIT_REASON_VMCLEAR: if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); skip_emulated_instruction(vcpu); return 1; } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); skip_emulated_instruction(vcpu); return 1; } break; case EXIT_REASON_VMPTRLD: if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); skip_emulated_instruction(vcpu); return 1; } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); skip_emulated_instruction(vcpu); return 1; } break; default: return 1; /* shouldn't happen */ } if (vmpointer) *vmpointer = vmptr; return 0; } /* * Emulate the VMXON instruction. * Currently, we just remember that VMX is active, and do not save or even * inspect the argument to VMXON (the so-called "VMXON pointer") because we * do not currently need to store anything in that guest-allocated memory * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their * argument is different from the VMXON pointer (which the spec says they do). */ static int handle_vmon(struct kvm_vcpu *vcpu) { struct kvm_segment cs; struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs *shadow_vmcs; const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; /* The Intel VMX Instruction Reference lists a bunch of bits that * are prerequisite to running VMXON, most notably cr4.VMXE must be * set to 1 (see vmx_set_cr4() for when we allow the guest to set this). * Otherwise, we should fail with #UD. We test these now: */ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) || !kvm_read_cr0_bits(vcpu, X86_CR0_PE) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); if (is_long_mode(vcpu) && !cs.l) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); return 1; } if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) return 1; if (vmx->nested.vmxon) { nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); skip_emulated_instruction(vcpu); return 1; } if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); return 1; } if (enable_shadow_vmcs) { shadow_vmcs = alloc_vmcs(); if (!shadow_vmcs) return -ENOMEM; /* mark vmcs as shadow */ shadow_vmcs->revision_id |= (1u << 31); /* init shadow vmcs */ vmcs_clear(shadow_vmcs); vmx->nested.current_shadow_vmcs = shadow_vmcs; } INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); vmx->nested.vmcs02_num = 0; hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; vmx->nested.vmxon = true; skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); return 1; } /* * Intel's VMX Instruction Reference specifies a common set of prerequisites * for running VMX instructions (except VMXON, whose prerequisites are * slightly different). It also specifies what exception to inject otherwise. */ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) { struct kvm_segment cs; struct vcpu_vmx *vmx = to_vmx(vcpu); if (!vmx->nested.vmxon) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) || (is_long_mode(vcpu) && !cs.l)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); return 0; } return 1; } static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) { u32 exec_control; if (vmx->nested.current_vmptr == -1ull) return; /* current_vmptr and current_vmcs12 are always set/reset together */ if (WARN_ON(vmx->nested.current_vmcs12 == NULL)) return; if (enable_shadow_vmcs) { /* copy to memory all shadowed fields in case they were modified */ copy_shadow_to_vmcs12(vmx); vmx->nested.sync_shadow_vmcs = false; exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); vmcs_write64(VMCS_LINK_POINTER, -1ull); } kunmap(vmx->nested.current_vmcs12_page); nested_release_page(vmx->nested.current_vmcs12_page); vmx->nested.current_vmptr = -1ull; vmx->nested.current_vmcs12 = NULL; } /* * Free whatever needs to be freed from vmx->nested when L1 goes down, or * just stops using VMX. */ static void free_nested(struct vcpu_vmx *vmx) { if (!vmx->nested.vmxon) return; vmx->nested.vmxon = false; nested_release_vmcs12(vmx); if (enable_shadow_vmcs) free_vmcs(vmx->nested.current_shadow_vmcs); /* Unpin physical memory we referred to in current vmcs02 */ if (vmx->nested.apic_access_page) { nested_release_page(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { nested_release_page(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } nested_free_all_saved_vmcss(vmx); } /* Emulate the VMXOFF instruction */ static int handle_vmoff(struct kvm_vcpu *vcpu) { if (!nested_vmx_check_permission(vcpu)) return 1; free_nested(to_vmx(vcpu)); skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); return 1; } /* Emulate the VMCLEAR instruction */ static int handle_vmclear(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; struct vmcs12 *vmcs12; struct page *page; if (!nested_vmx_check_permission(vcpu)) return 1; if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) return 1; if (vmptr == vmx->nested.current_vmptr) nested_release_vmcs12(vmx); page = nested_get_page(vcpu, vmptr); if (page == NULL) { /* * For accurate processor emulation, VMCLEAR beyond available * physical memory should do nothing at all. However, it is * possible that a nested vmx bug, not a guest hypervisor bug, * resulted in this case, so let's shut down before doing any * more damage: */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 1; } vmcs12 = kmap(page); vmcs12->launch_state = 0; kunmap(page); nested_release_page(page); nested_free_vmcs02(vmx, vmptr); skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); return 1; } static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); /* Emulate the VMLAUNCH instruction */ static int handle_vmlaunch(struct kvm_vcpu *vcpu) { return nested_vmx_run(vcpu, true); } /* Emulate the VMRESUME instruction */ static int handle_vmresume(struct kvm_vcpu *vcpu) { return nested_vmx_run(vcpu, false); } enum vmcs_field_type { VMCS_FIELD_TYPE_U16 = 0, VMCS_FIELD_TYPE_U64 = 1, VMCS_FIELD_TYPE_U32 = 2, VMCS_FIELD_TYPE_NATURAL_WIDTH = 3 }; static inline int vmcs_field_type(unsigned long field) { if (0x1 & field) /* the *_HIGH fields are all 32 bit */ return VMCS_FIELD_TYPE_U32; return (field >> 13) & 0x3 ; } static inline int vmcs_field_readonly(unsigned long field) { return (((field >> 10) & 0x3) == 1); } /* * Read a vmcs12 field. Since these can have varying lengths and we return * one type, we chose the biggest type (u64) and zero-extend the return value * to that size. Note that the caller, handle_vmread, might need to use only * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of * 64-bit fields are to be returned). */ static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu, unsigned long field, u64 *ret) { short offset = vmcs_field_to_offset(field); char *p; if (offset < 0) return 0; p = ((char *)(get_vmcs12(vcpu))) + offset; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_NATURAL_WIDTH: *ret = *((natural_width *)p); return 1; case VMCS_FIELD_TYPE_U16: *ret = *((u16 *)p); return 1; case VMCS_FIELD_TYPE_U32: *ret = *((u32 *)p); return 1; case VMCS_FIELD_TYPE_U64: *ret = *((u64 *)p); return 1; default: return 0; /* can never happen. */ } } static inline bool vmcs12_write_any(struct kvm_vcpu *vcpu, unsigned long field, u64 field_value){ short offset = vmcs_field_to_offset(field); char *p = ((char *) get_vmcs12(vcpu)) + offset; if (offset < 0) return false; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: *(u16 *)p = field_value; return true; case VMCS_FIELD_TYPE_U32: *(u32 *)p = field_value; return true; case VMCS_FIELD_TYPE_U64: *(u64 *)p = field_value; return true; case VMCS_FIELD_TYPE_NATURAL_WIDTH: *(natural_width *)p = field_value; return true; default: return false; /* can never happen. */ } } static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) { int i; unsigned long field; u64 field_value; struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; const unsigned long *fields = shadow_read_write_fields; const int num_fields = max_shadow_read_write_fields; vmcs_load(shadow_vmcs); for (i = 0; i < num_fields; i++) { field = fields[i]; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: field_value = vmcs_read16(field); break; case VMCS_FIELD_TYPE_U32: field_value = vmcs_read32(field); break; case VMCS_FIELD_TYPE_U64: field_value = vmcs_read64(field); break; case VMCS_FIELD_TYPE_NATURAL_WIDTH: field_value = vmcs_readl(field); break; } vmcs12_write_any(&vmx->vcpu, field, field_value); } vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); } static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) { const unsigned long *fields[] = { shadow_read_write_fields, shadow_read_only_fields }; const int max_fields[] = { max_shadow_read_write_fields, max_shadow_read_only_fields }; int i, q; unsigned long field; u64 field_value = 0; struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; vmcs_load(shadow_vmcs); for (q = 0; q < ARRAY_SIZE(fields); q++) { for (i = 0; i < max_fields[q]; i++) { field = fields[q][i]; vmcs12_read_any(&vmx->vcpu, field, &field_value); switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: vmcs_write16(field, (u16)field_value); break; case VMCS_FIELD_TYPE_U32: vmcs_write32(field, (u32)field_value); break; case VMCS_FIELD_TYPE_U64: vmcs_write64(field, (u64)field_value); break; case VMCS_FIELD_TYPE_NATURAL_WIDTH: vmcs_writel(field, (long)field_value); break; } } } vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); } /* * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was * used before) all generate the same failure when it is missing. */ static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmx->nested.current_vmptr == -1ull) { nested_vmx_failInvalid(vcpu); skip_emulated_instruction(vcpu); return 0; } return 1; } static int handle_vmread(struct kvm_vcpu *vcpu) { unsigned long field; u64 field_value; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t gva = 0; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) return 1; /* Decode instruction info and find the field to read */ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); /* Read the field, zero-extended to a u64 field_value */ if (!vmcs12_read_any(vcpu, field, &field_value)) { nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); skip_emulated_instruction(vcpu); return 1; } /* * Now copy part of this value to register or memory, as requested. * Note that the number of bits actually copied is 32 or 64 depending * on the guest's mode (32 or 64 bit), not on the given field's length. */ if (vmx_instruction_info & (1u << 10)) { kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), field_value); } else { if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, &gva)) return 1; /* _system ok, as nested_vmx_check_permission verified cpl=0 */ kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } static int handle_vmwrite(struct kvm_vcpu *vcpu) { unsigned long field; gva_t gva; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); /* The value to write might be 32 or 64 bits, depending on L1's long * mode, and eventually we need to write that into a field of several * possible lengths. The code below first zero-extends the value to 64 * bit (field_value), and then copies only the approriate number of * bits into the vmcs12 field. */ u64 field_value = 0; struct x86_exception e; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) return 1; if (vmx_instruction_info & (1u << 10)) field_value = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 3) & 0xf)); else { if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } } field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); if (vmcs_field_readonly(field)) { nested_vmx_failValid(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); skip_emulated_instruction(vcpu); return 1; } if (!vmcs12_write_any(vcpu, field, field_value)) { nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); skip_emulated_instruction(vcpu); return 1; } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } /* Emulate the VMPTRLD instruction */ static int handle_vmptrld(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; u32 exec_control; if (!nested_vmx_check_permission(vcpu)) return 1; if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) return 1; if (vmx->nested.current_vmptr != vmptr) { struct vmcs12 *new_vmcs12; struct page *page; page = nested_get_page(vcpu, vmptr); if (page == NULL) { nested_vmx_failInvalid(vcpu); skip_emulated_instruction(vcpu); return 1; } new_vmcs12 = kmap(page); if (new_vmcs12->revision_id != VMCS12_REVISION) { kunmap(page); nested_release_page_clean(page); nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); skip_emulated_instruction(vcpu); return 1; } nested_release_vmcs12(vmx); vmx->nested.current_vmptr = vmptr; vmx->nested.current_vmcs12 = new_vmcs12; vmx->nested.current_vmcs12_page = page; if (enable_shadow_vmcs) { exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); exec_control |= SECONDARY_EXEC_SHADOW_VMCS; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); vmcs_write64(VMCS_LINK_POINTER, __pa(vmx->nested.current_shadow_vmcs)); vmx->nested.sync_shadow_vmcs = true; } } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } /* Emulate the VMPTRST instruction */ static int handle_vmptrst(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t vmcs_gva; struct x86_exception e; if (!nested_vmx_check_permission(vcpu)) return 1; if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, &vmcs_gva)) return 1; /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, (void *)&to_vmx(vcpu)->nested.current_vmptr, sizeof(u64), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } nested_vmx_succeed(vcpu); skip_emulated_instruction(vcpu); return 1; } /* Emulate the INVEPT instruction */ static int handle_invept(struct kvm_vcpu *vcpu) { u32 vmx_instruction_info, types; unsigned long type; gva_t gva; struct x86_exception e; struct { u64 eptp, gpa; } operand; if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) || !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (!nested_vmx_check_permission(vcpu)) return 1; if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; if (!(types & (1UL << type))) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return 1; } /* According to the Intel VMX instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (type) { case VMX_EPT_EXTENT_GLOBAL: kvm_mmu_sync_roots(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); nested_vmx_succeed(vcpu); break; default: /* Trap single context invalidation invept calls */ BUG_ON(1); break; } skip_emulated_instruction(vcpu); return 1; } /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs * to be done to userspace and return 0. */ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_EXCEPTION_NMI] = handle_exception, [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, [EXIT_REASON_IO_INSTRUCTION] = handle_io, [EXIT_REASON_CR_ACCESS] = handle_cr, [EXIT_REASON_DR_ACCESS] = handle_dr, [EXIT_REASON_CPUID] = handle_cpuid, [EXIT_REASON_MSR_READ] = handle_rdmsr, [EXIT_REASON_MSR_WRITE] = handle_wrmsr, [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, [EXIT_REASON_HLT] = handle_halt, [EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_RDPMC] = handle_rdpmc, [EXIT_REASON_VMCALL] = handle_vmcall, [EXIT_REASON_VMCLEAR] = handle_vmclear, [EXIT_REASON_VMLAUNCH] = handle_vmlaunch, [EXIT_REASON_VMPTRLD] = handle_vmptrld, [EXIT_REASON_VMPTRST] = handle_vmptrst, [EXIT_REASON_VMREAD] = handle_vmread, [EXIT_REASON_VMRESUME] = handle_vmresume, [EXIT_REASON_VMWRITE] = handle_vmwrite, [EXIT_REASON_VMOFF] = handle_vmoff, [EXIT_REASON_VMON] = handle_vmon, [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_APIC_WRITE] = handle_apic_write, [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, [EXIT_REASON_WBINVD] = handle_wbinvd, [EXIT_REASON_XSETBV] = handle_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, [EXIT_REASON_INVEPT] = handle_invept, }; static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { unsigned long exit_qualification; gpa_t bitmap, last_bitmap; unsigned int port; int size; u8 b; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; last_bitmap = (gpa_t)-1; b = -1; while (size > 0) { if (port < 0x8000) bitmap = vmcs12->io_bitmap_a; else if (port < 0x10000) bitmap = vmcs12->io_bitmap_b; else return 1; bitmap += (port & 0x7fff) / 8; if (last_bitmap != bitmap) if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1)) return 1; if (b & (1 << (port & 7))) return 1; port++; size--; last_bitmap = bitmap; } return 0; } /* * Return 1 if we should exit from L2 to L1 to handle an MSR access access, * rather than handle it ourselves in L0. I.e., check whether L1 expressed * disinterest in the current event (read or write a specific MSR) by using an * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. */ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason) { u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; gpa_t bitmap; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) return 1; /* * The MSR_BITMAP page is divided into four 1024-byte bitmaps, * for the four combinations of read/write and low/high MSR numbers. * First we need to figure out which of the four to use: */ bitmap = vmcs12->msr_bitmap; if (exit_reason == EXIT_REASON_MSR_WRITE) bitmap += 2048; if (msr_index >= 0xc0000000) { msr_index -= 0xc0000000; bitmap += 1024; } /* Then read the msr_index'th bit from this bitmap: */ if (msr_index < 1024*8) { unsigned char b; if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1)) return 1; return 1 & (b >> (msr_index & 7)); } else return 1; /* let L1 handle the wrong parameter */ } /* * Return 1 if we should exit from L2 to L1 to handle a CR access exit, * rather than handle it ourselves in L0. I.e., check if L1 wanted to * intercept (via guest_host_mask etc.) the current event. */ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int cr = exit_qualification & 15; int reg = (exit_qualification >> 8) & 15; unsigned long val = kvm_register_readl(vcpu, reg); switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ switch (cr) { case 0: if (vmcs12->cr0_guest_host_mask & (val ^ vmcs12->cr0_read_shadow)) return 1; break; case 3: if ((vmcs12->cr3_target_count >= 1 && vmcs12->cr3_target_value0 == val) || (vmcs12->cr3_target_count >= 2 && vmcs12->cr3_target_value1 == val) || (vmcs12->cr3_target_count >= 3 && vmcs12->cr3_target_value2 == val) || (vmcs12->cr3_target_count >= 4 && vmcs12->cr3_target_value3 == val)) return 0; if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) return 1; break; case 4: if (vmcs12->cr4_guest_host_mask & (vmcs12->cr4_read_shadow ^ val)) return 1; break; case 8: if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) return 1; break; } break; case 2: /* clts */ if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && (vmcs12->cr0_read_shadow & X86_CR0_TS)) return 1; break; case 1: /* mov from cr */ switch (cr) { case 3: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR3_STORE_EXITING) return 1; break; case 8: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR8_STORE_EXITING) return 1; break; } break; case 3: /* lmsw */ /* * lmsw can change bits 1..3 of cr0, and only set bit 0 of * cr0. Other attempted changes are ignored, with no exit. */ if (vmcs12->cr0_guest_host_mask & 0xe & (val ^ vmcs12->cr0_read_shadow)) return 1; if ((vmcs12->cr0_guest_host_mask & 0x1) && !(vmcs12->cr0_read_shadow & 0x1) && (val & 0x1)) return 1; break; } return 0; } /* * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we * should handle it ourselves in L0 (and then continue L2). Only call this * when in is_guest_mode (L2). */ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) { u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 exit_reason = vmx->exit_reason; trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, vmcs_readl(EXIT_QUALIFICATION), vmx->idt_vectoring_info, intr_info, vmcs_read32(VM_EXIT_INTR_ERROR_CODE), KVM_ISA_VMX); if (vmx->nested.nested_run_pending) return 0; if (unlikely(vmx->fail)) { pr_info_ratelimited("%s failed vm entry %x\n", __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); return 1; } switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: if (!is_exception(intr_info)) return 0; else if (is_page_fault(intr_info)) return enable_ept; else if (is_no_device(intr_info) && !(vmcs12->guest_cr0 & X86_CR0_TS)) return 0; return vmcs12->exception_bitmap & (1u << (intr_info & INTR_INFO_VECTOR_MASK)); case EXIT_REASON_EXTERNAL_INTERRUPT: return 0; case EXIT_REASON_TRIPLE_FAULT: return 1; case EXIT_REASON_PENDING_INTERRUPT: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); case EXIT_REASON_NMI_WINDOW: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); case EXIT_REASON_TASK_SWITCH: return 1; case EXIT_REASON_CPUID: if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) return 0; return 1; case EXIT_REASON_HLT: return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); case EXIT_REASON_INVD: return 1; case EXIT_REASON_INVLPG: return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); case EXIT_REASON_RDPMC: return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); case EXIT_REASON_RDTSC: return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: case EXIT_REASON_INVEPT: /* * VMX instructions trap unconditionally. This allows L1 to * emulate them for its L2 guest, i.e., allows 3-level nesting! */ return 1; case EXIT_REASON_CR_ACCESS: return nested_vmx_exit_handled_cr(vcpu, vmcs12); case EXIT_REASON_DR_ACCESS: return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); case EXIT_REASON_IO_INSTRUCTION: return nested_vmx_exit_handled_io(vcpu, vmcs12); case EXIT_REASON_MSR_READ: case EXIT_REASON_MSR_WRITE: return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); case EXIT_REASON_INVALID_STATE: return 1; case EXIT_REASON_MWAIT_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); case EXIT_REASON_MONITOR_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); case EXIT_REASON_PAUSE_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || nested_cpu_has2(vmcs12, SECONDARY_EXEC_PAUSE_LOOP_EXITING); case EXIT_REASON_MCE_DURING_VMENTRY: return 0; case EXIT_REASON_TPR_BELOW_THRESHOLD: return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); case EXIT_REASON_APIC_ACCESS: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); case EXIT_REASON_EPT_VIOLATION: /* * L0 always deals with the EPT violation. If nested EPT is * used, and the nested mmu code discovers that the address is * missing in the guest EPT table (EPT12), the EPT violation * will be injected with nested_ept_inject_page_fault() */ return 0; case EXIT_REASON_EPT_MISCONFIG: /* * L2 never uses directly L1's EPT, but rather L0's own EPT * table (shadow on EPT) or a merged EPT table that L0 built * (EPT on EPT). So any problems with the structure of the * table is L0's fault. */ return 0; case EXIT_REASON_WBINVD: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); case EXIT_REASON_XSETBV: return 1; default: return 1; } } static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { *info1 = vmcs_readl(EXIT_QUALIFICATION); *info2 = vmcs_read32(VM_EXIT_INTR_INFO); } /* * The guest has exited. See if we can fix it or if we need userspace * assistance. */ static int vmx_handle_exit(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; /* If guest state is invalid, start emulating */ if (vmx->emulation_required) return handle_invalid_guest_state(vcpu); if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { nested_vmx_vmexit(vcpu, exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; } if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason; return 0; } if (unlikely(vmx->fail)) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = vmcs_read32(VM_INSTRUCTION_ERROR); return 0; } /* * Note: * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by * delivery event since it indicates guest is accessing MMIO. * The vm-exit can be triggered again after return to guest that * will cause infinite loop. */ if ((vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = vectoring_info; vcpu->run->internal.data[1] = exit_reason; return 0; } if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( get_vmcs12(vcpu))))) { if (vmx_interrupt_allowed(vcpu)) { vmx->soft_vnmi_blocked = 0; } else if (vmx->vnmi_blocked_time > 1000000000LL && vcpu->arch.nmi_pending) { /* * This CPU don't support us in finding the end of an * NMI-blocked window if the guest runs with IRQs * disabled. So we pull the trigger after 1 s of * futile waiting, but inform the user about this. */ printk(KERN_WARNING "%s: Breaking out of NMI-blocked " "state on VCPU %d after 1 s timeout\n", __func__, vcpu->vcpu_id); vmx->soft_vnmi_blocked = 0; } } if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); else { vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = exit_reason; } return 0; } static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (is_guest_mode(vcpu) && nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return; if (irr == -1 || tpr < irr) { vmcs_write32(TPR_THRESHOLD, 0); return; } vmcs_write32(TPR_THRESHOLD, irr); } static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) { u32 sec_exec_control; /* * There is not point to enable virtualize x2apic without enable * apicv */ if (!cpu_has_vmx_virtualize_x2apic_mode() || !vmx_vm_has_apicv(vcpu->kvm)) return; if (!vm_need_tpr_shadow(vcpu->kvm)) return; sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); if (set) { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; } else { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); vmx_set_msr_bitmap(vcpu); } static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) { struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Currently we do not handle the nested case where L2 has an * APIC access page of its own; that page is still pinned. * Hence, we skip the case where the VCPU is in guest mode _and_ * L1 prepared an APIC access page for L2. * * For the case where L1 and L2 share the same APIC access page * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear * in the vmcs12), this function will only update either the vmcs01 * or the vmcs02. If the former, the vmcs02 will be updated by * prepare_vmcs02. If the latter, the vmcs01 will be updated in * the next L2->L1 exit. */ if (!is_guest_mode(vcpu) || !nested_cpu_has2(vmx->nested.current_vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) vmcs_write64(APIC_ACCESS_ADDR, hpa); } static void vmx_hwapic_isr_update(struct kvm *kvm, int isr) { u16 status; u8 old; if (!vmx_vm_has_apicv(kvm)) return; if (isr == -1) isr = 0; status = vmcs_read16(GUEST_INTR_STATUS); old = status >> 8; if (isr != old) { status &= 0xff; status |= isr << 8; vmcs_write16(GUEST_INTR_STATUS, status); } } static void vmx_set_rvi(int vector) { u16 status; u8 old; status = vmcs_read16(GUEST_INTR_STATUS); old = (u8)status & 0xff; if ((u8)vector != old) { status &= ~0xff; status |= (u8)vector; vmcs_write16(GUEST_INTR_STATUS, status); } } static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) { if (max_irr == -1) return; /* * If a vmexit is needed, vmx_check_nested_events handles it. */ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) return; if (!is_guest_mode(vcpu)) { vmx_set_rvi(max_irr); return; } /* * Fall back to pre-APICv interrupt injection since L2 * is run without virtual interrupt delivery. */ if (!kvm_event_needs_reinjection(vcpu) && vmx_interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, max_irr, false); vmx_inject_irq(vcpu); } } static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) { if (!vmx_vm_has_apicv(vcpu->kvm)) return; vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); } static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) { u32 exit_intr_info; if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)) return; vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); exit_intr_info = vmx->exit_intr_info; /* Handle machine checks before interrupts are enabled */ if (is_machine_check(exit_intr_info)) kvm_machine_check(); /* We need to handle NMIs before interrupts are enabled */ if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && (exit_intr_info & INTR_INFO_VALID_MASK)) { kvm_before_handle_nmi(&vmx->vcpu); asm("int $2"); kvm_after_handle_nmi(&vmx->vcpu); } } static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) { u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); /* * If external interrupt exists, IF bit is set in rflags/eflags on the * interrupt stack frame, and interrupt will be enabled on a return * from interrupt handler. */ if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { unsigned int vector; unsigned long entry; gate_desc *desc; struct vcpu_vmx *vmx = to_vmx(vcpu); #ifdef CONFIG_X86_64 unsigned long tmp; #endif vector = exit_intr_info & INTR_INFO_VECTOR_MASK; desc = (gate_desc *)vmx->host_idt_base + vector; entry = gate_offset(*desc); asm volatile( #ifdef CONFIG_X86_64 "mov %%" _ASM_SP ", %[sp]\n\t" "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" "push $%c[ss]\n\t" "push %[sp]\n\t" #endif "pushf\n\t" "orl $0x200, (%%" _ASM_SP ")\n\t" __ASM_SIZE(push) " $%c[cs]\n\t" "call *%[entry]\n\t" : #ifdef CONFIG_X86_64 [sp]"=&r"(tmp) #endif : [entry]"r"(entry), [ss]"i"(__KERNEL_DS), [cs]"i"(__KERNEL_CS) ); } else local_irq_enable(); } static bool vmx_mpx_supported(void) { return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) && (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS); } static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) { u32 exit_intr_info; bool unblock_nmi; u8 vector; bool idtv_info_valid; idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; if (cpu_has_virtual_nmis()) { if (vmx->nmi_known_unmasked) return; /* * Can't use vmx->exit_intr_info since we're not sure what * the exit reason is. */ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; vector = exit_intr_info & INTR_INFO_VECTOR_MASK; /* * SDM 3: 27.7.1.2 (September 2008) * Re-set bit "block by NMI" before VM entry if vmexit caused by * a guest IRET fault. * SDM 3: 23.2.2 (September 2008) * Bit 12 is undefined in any of the following cases: * If the VM exit sets the valid bit in the IDT-vectoring * information field. * If the VM exit is due to a double fault. */ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && vector != DF_VECTOR && !idtv_info_valid) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); else vmx->nmi_known_unmasked = !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI); } else if (unlikely(vmx->soft_vnmi_blocked)) vmx->vnmi_blocked_time += ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); } static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, u32 idt_vectoring_info, int instr_len_field, int error_code_field) { u8 vector; int type; bool idtv_info_valid; idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); if (!idtv_info_valid) return; kvm_make_request(KVM_REQ_EVENT, vcpu); vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; switch (type) { case INTR_TYPE_NMI_INTR: vcpu->arch.nmi_injected = true; /* * SDM 3: 27.7.1.2 (September 2008) * Clear bit "block by NMI" before VM entry if a NMI * delivery faulted. */ vmx_set_nmi_mask(vcpu, false); break; case INTR_TYPE_SOFT_EXCEPTION: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(error_code_field); kvm_requeue_exception_e(vcpu, vector, err); } else kvm_requeue_exception(vcpu, vector); break; case INTR_TYPE_SOFT_INTR: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_EXT_INTR: kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); break; default: break; } } static void vmx_complete_interrupts(struct vcpu_vmx *vmx) { __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_ERROR_CODE); } static void vmx_cancel_injection(struct kvm_vcpu *vcpu) { __vmx_complete_interrupts(vcpu, vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) { int i, nr_msrs; struct perf_guest_switch_msr *msrs; msrs = perf_guest_get_msrs(&nr_msrs); if (!msrs) return; for (i = 0; i < nr_msrs; i++) if (msrs[i].host == msrs[i].guest) clear_atomic_switch_msr(vmx, msrs[i].msr); else add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, msrs[i].host); } static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long debugctlmsr, cr4; /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) vmx->entry_time = ktime_get(); /* Don't enter VMX if guest state is invalid, let the exit handler start emulation until we arrive back to a valid state */ if (vmx->emulation_required) return; if (vmx->ple_window_dirty) { vmx->ple_window_dirty = false; vmcs_write32(PLE_WINDOW, vmx->ple_window); } if (vmx->nested.sync_shadow_vmcs) { copy_vmcs12_to_shadow(vmx); vmx->nested.sync_shadow_vmcs = false; } if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); cr4 = read_cr4(); if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { vmcs_writel(HOST_CR4, cr4); vmx->host_state.vmcs_host_cr4 = cr4; } /* When single-stepping over STI and MOV SS, we must clear the * corresponding interruptibility bits in the guest state. Otherwise * vmentry fails as it then expects bit 14 (BS) in pending debug * exceptions being set, but that's not correct for the guest debugging * case. */ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmx_set_interrupt_shadow(vcpu, 0); atomic_switch_perf_msrs(vmx); debugctlmsr = get_debugctlmsr(); vmx->__launched = vmx->loaded_vmcs->launched; asm( /* Store host registers */ "push %%" _ASM_DX "; push %%" _ASM_BP ";" "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */ "push %%" _ASM_CX " \n\t" "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t" "je 1f \n\t" "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t" __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" "1: \n\t" /* Reload cr2 if changed */ "mov %c[cr2](%0), %%" _ASM_AX " \n\t" "mov %%cr2, %%" _ASM_DX " \n\t" "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t" "je 2f \n\t" "mov %%" _ASM_AX", %%cr2 \n\t" "2: \n\t" /* Check if vmlaunch of vmresume is needed */ "cmpl $0, %c[launched](%0) \n\t" /* Load guest registers. Don't clobber flags. */ "mov %c[rax](%0), %%" _ASM_AX " \n\t" "mov %c[rbx](%0), %%" _ASM_BX " \n\t" "mov %c[rdx](%0), %%" _ASM_DX " \n\t" "mov %c[rsi](%0), %%" _ASM_SI " \n\t" "mov %c[rdi](%0), %%" _ASM_DI " \n\t" "mov %c[rbp](%0), %%" _ASM_BP " \n\t" #ifdef CONFIG_X86_64 "mov %c[r8](%0), %%r8 \n\t" "mov %c[r9](%0), %%r9 \n\t" "mov %c[r10](%0), %%r10 \n\t" "mov %c[r11](%0), %%r11 \n\t" "mov %c[r12](%0), %%r12 \n\t" "mov %c[r13](%0), %%r13 \n\t" "mov %c[r14](%0), %%r14 \n\t" "mov %c[r15](%0), %%r15 \n\t" #endif "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */ /* Enter guest mode */ "jne 1f \n\t" __ex(ASM_VMX_VMLAUNCH) "\n\t" "jmp 2f \n\t" "1: " __ex(ASM_VMX_VMRESUME) "\n\t" "2: " /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" "mov %%" _ASM_AX ", %c[rax](%0) \n\t" "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" __ASM_SIZE(pop) " %c[rcx](%0) \n\t" "mov %%" _ASM_DX ", %c[rdx](%0) \n\t" "mov %%" _ASM_SI ", %c[rsi](%0) \n\t" "mov %%" _ASM_DI ", %c[rdi](%0) \n\t" "mov %%" _ASM_BP ", %c[rbp](%0) \n\t" #ifdef CONFIG_X86_64 "mov %%r8, %c[r8](%0) \n\t" "mov %%r9, %c[r9](%0) \n\t" "mov %%r10, %c[r10](%0) \n\t" "mov %%r11, %c[r11](%0) \n\t" "mov %%r12, %c[r12](%0) \n\t" "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" #endif "mov %%cr2, %%" _ASM_AX " \n\t" "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" "setbe %c[fail](%0) \n\t" ".pushsection .rodata \n\t" ".global vmx_return \n\t" "vmx_return: " _ASM_PTR " 2b \n\t" ".popsection" : : "c"(vmx), "d"((unsigned long)HOST_RSP), [launched]"i"(offsetof(struct vcpu_vmx, __launched)), [fail]"i"(offsetof(struct vcpu_vmx, fail)), [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), #ifdef CONFIG_X86_64 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), [wordsize]"i"(sizeof(ulong)) : "cc", "memory" #ifdef CONFIG_X86_64 , "rax", "rbx", "rdi", "rsi" , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" #else , "eax", "ebx", "edi", "esi" #endif ); /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (debugctlmsr) update_debugctlmsr(debugctlmsr); #ifndef CONFIG_X86_64 /* * The sysexit path does not restore ds/es, so we must set them to * a reasonable value ourselves. * * We can't defer this to vmx_load_host_state() since that function * may be executed in interrupt context, which saves and restore segments * around it, nullifying its effect. */ loadsegment(ds, __USER_DS); loadsegment(es, __USER_DS); #endif vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) | (1 << VCPU_EXREG_RFLAGS) | (1 << VCPU_EXREG_PDPTR) | (1 << VCPU_EXREG_SEGMENTS) | (1 << VCPU_EXREG_CR3)); vcpu->arch.regs_dirty = 0; vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx->loaded_vmcs->launched = 1; vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); /* * the KVM_REQ_EVENT optimization bit is only on for one entry, and if * we did not inject a still-pending event to L1 now because of * nested_run_pending, we need to re-enable this bit. */ if (vmx->nested.nested_run_pending) kvm_make_request(KVM_REQ_EVENT, vcpu); vmx->nested.nested_run_pending = 0; vmx_complete_atomic_exit(vmx); vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); } static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; if (vmx->loaded_vmcs == &vmx->vmcs01) return; cpu = get_cpu(); vmx->loaded_vmcs = &vmx->vmcs01; vmx_vcpu_put(vcpu); vmx_vcpu_load(vcpu, cpu); vcpu->cpu = cpu; put_cpu(); } static void vmx_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); free_vpid(vmx); leave_guest_mode(vcpu); vmx_load_vmcs01(vcpu); free_nested(vmx); free_loaded_vmcs(vmx->loaded_vmcs); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); } static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) { int err; struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); int cpu; if (!vmx) return ERR_PTR(-ENOMEM); allocate_vpid(vmx); err = kvm_vcpu_init(&vmx->vcpu, kvm, id); if (err) goto free_vcpu; vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) > PAGE_SIZE); err = -ENOMEM; if (!vmx->guest_msrs) { goto uninit_vcpu; } vmx->loaded_vmcs = &vmx->vmcs01; vmx->loaded_vmcs->vmcs = alloc_vmcs(); if (!vmx->loaded_vmcs->vmcs) goto free_msrs; if (!vmm_exclusive) kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id()))); loaded_vmcs_init(vmx->loaded_vmcs); if (!vmm_exclusive) kvm_cpu_vmxoff(); cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); vmx->vcpu.cpu = cpu; err = vmx_vcpu_setup(vmx); vmx_vcpu_put(&vmx->vcpu); put_cpu(); if (err) goto free_vmcs; if (vm_need_virtualize_apic_accesses(kvm)) { err = alloc_apic_access_page(kvm); if (err) goto free_vmcs; } if (enable_ept) { if (!kvm->arch.ept_identity_map_addr) kvm->arch.ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; err = init_rmode_identity_map(kvm); if (err) goto free_vmcs; } vmx->nested.current_vmptr = -1ull; vmx->nested.current_vmcs12 = NULL; return &vmx->vcpu; free_vmcs: free_loaded_vmcs(vmx->loaded_vmcs); free_msrs: kfree(vmx->guest_msrs); uninit_vcpu: kvm_vcpu_uninit(&vmx->vcpu); free_vcpu: free_vpid(vmx); kmem_cache_free(kvm_vcpu_cache, vmx); return ERR_PTR(err); } static void __init vmx_check_processor_compat(void *rtn) { struct vmcs_config vmcs_conf; *(int *)rtn = 0; if (setup_vmcs_config(&vmcs_conf) < 0) *(int *)rtn = -EIO; if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", smp_processor_id()); *(int *)rtn = -EIO; } } static int get_ept_level(void) { return VMX_EPT_DEFAULT_GAW + 1; } static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { u64 ret; /* For VT-d and EPT combination * 1. MMIO: always map as UC * 2. EPT with VT-d: * a. VT-d without snooping control feature: can't guarantee the * result, try to trust guest. * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ if (is_mmio) ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; else if (kvm_arch_has_noncoherent_dma(vcpu->kvm)) ret = kvm_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT; else ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; return ret; } static int vmx_get_lpage_level(void) { if (enable_ept && !cpu_has_vmx_ept_1g_page()) return PT_DIRECTORY_LEVEL; else /* For shadow and EPT supported 1GB page */ return PT_PDPE_LEVEL; } static void vmx_cpuid_update(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control; vmx->rdtscp_enabled = false; if (vmx_rdtscp_supported()) { exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); if (exec_control & SECONDARY_EXEC_RDTSCP) { best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (best && (best->edx & bit(X86_FEATURE_RDTSCP))) vmx->rdtscp_enabled = true; else { exec_control &= ~SECONDARY_EXEC_RDTSCP; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } } } /* Exposing INVPCID only when PCID is exposed */ best = kvm_find_cpuid_entry(vcpu, 0x7, 0); if (vmx_invpcid_supported() && best && (best->ebx & bit(X86_FEATURE_INVPCID)) && guest_cpuid_has_pcid(vcpu)) { exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } else { if (cpu_has_secondary_exec_ctrls()) { exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } if (best) best->ebx &= ~bit(X86_FEATURE_INVPCID); } } static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { if (func == 1 && nested) entry->ecx |= bit(X86_FEATURE_VMX); } static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 exit_reason; if (fault->error_code & PFERR_RSVD_MASK) exit_reason = EXIT_REASON_EPT_MISCONFIG; else exit_reason = EXIT_REASON_EPT_VIOLATION; nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification); vmcs12->guest_physical_address = fault->address; } /* Callbacks for nested_ept_init_mmu_context: */ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) { /* return the page table to be shadowed - in our case, EPT12 */ return get_vmcs12(vcpu)->ept_pointer; } static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu, nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT); vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) { vcpu->arch.walk_mmu = &vcpu->arch.mmu; } static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); WARN_ON(!is_guest_mode(vcpu)); /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */ if (vmcs12->exception_bitmap & (1u << PF_VECTOR)) nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); else kvm_inject_page_fault(vcpu, fault); } static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { /* TODO: Also verify bits beyond physical address width are 0 */ if (!PAGE_ALIGNED(vmcs12->apic_access_addr)) return false; /* * Translate L1 physical address to host physical * address for vmcs02. Keep the page pinned, so this * physical address remains valid. We keep a reference * to it so we can release it later. */ if (vmx->nested.apic_access_page) /* shouldn't happen */ nested_release_page(vmx->nested.apic_access_page); vmx->nested.apic_access_page = nested_get_page(vcpu, vmcs12->apic_access_addr); } if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { /* TODO: Also verify bits beyond physical address width are 0 */ if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr)) return false; if (vmx->nested.virtual_apic_page) /* shouldn't happen */ nested_release_page(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = nested_get_page(vcpu, vmcs12->virtual_apic_page_addr); /* * Failing the vm entry is _not_ what the processor does * but it's basically the only possibility we have. * We could still enter the guest if CR8 load exits are * enabled, CR8 store exits are enabled, and virtualize APIC * access is disabled; in this case the processor would never * use the TPR shadow and we could simply clear the bit from * the execution control. But such a configuration is useless, * so let's keep the code simple. */ if (!vmx->nested.virtual_apic_page) return false; } return true; } static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) { u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; struct vcpu_vmx *vmx = to_vmx(vcpu); if (vcpu->arch.virtual_tsc_khz == 0) return; /* Make sure short timeouts reliably trigger an immediate vmexit. * hrtimer_start does not guarantee this. */ if (preemption_timeout <= 1) { vmx_preemption_timer_fn(&vmx->nested.preemption_timer); return; } preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; preemption_timeout *= 1000000; do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); hrtimer_start(&vmx->nested.preemption_timer, ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); } /* * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 * guest in a way that will both be appropriate to L1's requests, and our * needs. In addition to modifying the active vmcs (which is vmcs02), this * function also has additional necessary side-effects, like setting various * vcpu->arch fields. */ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control; vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); } else { kvm_set_dr(vcpu, 7, vcpu->arch.dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs12->vm_entry_intr_info_field); vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, vmcs12->vm_entry_exception_error_code); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmcs12->vm_entry_instruction_len); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, vmcs12->guest_interruptibility_info); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); vmx_set_rflags(vcpu, vmcs12->guest_rflags); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, vmcs12->guest_pending_dbg_exceptions); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); vmcs_write64(VMCS_LINK_POINTER, -1ull); exec_control = vmcs12->pin_based_vm_exec_control; exec_control |= vmcs_config.pin_based_exec_ctrl; exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER | PIN_BASED_POSTED_INTR); vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); vmx->nested.preemption_timer_expired = false; if (nested_cpu_has_preemption_timer(vmcs12)) vmx_start_preemption_timer(vcpu); /* * Whether page-faults are trapped is determined by a combination of * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. * If enable_ept, L0 doesn't care about page faults and we should * set all of these to L1's desires. However, if !enable_ept, L0 does * care about (at least some) page faults, and because it is not easy * (if at all possible?) to merge L0 and L1's desires, we simply ask * to exit on each and every L2 page fault. This is done by setting * MASK=MATCH=0 and (see below) EB.PF=1. * Note that below we don't need special code to set EB.PF beyond the * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when * !enable_ept, EB.PF is 1, so the "or" will always be 1. * * A problem with this approach (when !enable_ept) is that L1 may be * injected with more page faults than it asked for. This could have * caused problems, but in practice existing hypervisors don't care. * To fix this, we will need to emulate the PFEC checking (on the L1 * page tables), using walk_addr(), when injecting PFs to L1. */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, enable_ept ? vmcs12->page_fault_error_code_mask : 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, enable_ept ? vmcs12->page_fault_error_code_match : 0); if (cpu_has_secondary_exec_ctrls()) { exec_control = vmx_secondary_exec_control(vmx); if (!vmx->rdtscp_enabled) exec_control &= ~SECONDARY_EXEC_RDTSCP; /* Take the following fields only from vmcs12 */ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_APIC_REGISTER_VIRT); if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) exec_control |= vmcs12->secondary_vm_exec_control; if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) { /* * If translation failed, no matter: This feature asks * to exit when accessing the given address, and if it * can never be accessed, this feature won't do * anything anyway. */ if (!vmx->nested.apic_access_page) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; else vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(vmx->nested.apic_access_page)); } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) { exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; kvm_vcpu_reload_apic_access_page(vcpu); } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } /* * Set host-state according to L0's settings (vmcs12 is irrelevant here) * Some constant fields are set here by vmx_set_constant_host_state(). * Other fields are different per CPU, and will be set later when * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. */ vmx_set_constant_host_state(vmx); /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before * entry, but only if the current (host) sp changed from the value * we wrote last (vmx->host_rsp). This cache is no longer relevant * if we switch vmcs, and rather than hold a separate cache per vmcs, * here we just force the write to happen on entry. */ vmx->host_rsp = 0; exec_control = vmx_exec_control(vmx); /* L0's desires */ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; exec_control &= ~CPU_BASED_TPR_SHADOW; exec_control |= vmcs12->cpu_based_vm_exec_control; if (exec_control & CPU_BASED_TPR_SHADOW) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, page_to_phys(vmx->nested.virtual_apic_page)); vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); } /* * Merging of IO and MSR bitmaps not currently supported. * Rather, exit every time. */ exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; exec_control &= ~CPU_BASED_USE_IO_BITMAPS; exec_control |= CPU_BASED_UNCOND_IO_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the * bitwise-or of what L1 wants to trap for L2, and what we want to * trap. Note that CR0.TS also needs updating - we do this later. */ update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); /* L2->L1 exit controls are emulated - the hardware exit is to L0 so * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits are further modified by vmx_set_efer() below. */ vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are * emulated by vmx_set_efer(), below. */ vm_entry_controls_init(vmx, (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & ~VM_ENTRY_IA32E_MODE) | (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); vcpu->arch.pat = vmcs12->guest_ia32_pat; } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); set_cr4_guest_host_mask(vmx); if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); else vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); if (enable_vpid) { /* * Trivially support vpid by letting L2s share their parent * L1's vpid. TODO: move to a more elaborate solution, giving * each L2 its own vpid and exposing the vpid feature to L1. */ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx_flush_tlb(vcpu); } if (nested_cpu_has_ept(vmcs12)) { kvm_mmu_unload(vcpu); nested_ept_init_mmu_context(vcpu); } if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->guest_ia32_efer; else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); /* * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified * TS bit (for lazy fpu) and bits which we consider mandatory enabled. * The CR0_READ_SHADOW is what L2 should have expected to read given * the specifications by L1; It's not enough to take * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we * have more bits than L1 expected. */ vmx_set_cr0(vcpu, vmcs12->guest_cr0); vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); /* shadow page tables on either EPT or shadow page tables */ kvm_set_cr3(vcpu, vmcs12->guest_cr3); kvm_mmu_reset_context(vcpu); if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; /* * L1 may access the L2's PDPTR, so save them to construct vmcs12 */ if (enable_ept) { vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); } kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); } /* * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 * for running an L2 nested guest. */ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; struct loaded_vmcs *vmcs02; bool ia32e; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) return 1; skip_emulated_instruction(vcpu); vmcs12 = get_vmcs12(vcpu); if (enable_shadow_vmcs) copy_shadow_to_vmcs12(vmx); /* * The nested entry process starts with enforcing various prerequisites * on vmcs12 as required by the Intel SDM, and act appropriately when * they fail: As the SDM explains, some conditions should cause the * instruction to fail, while others will cause the instruction to seem * to succeed, but return an EXIT_REASON_INVALID_STATE. * To speed up the normal (success) code path, we should avoid checking * for misconfigurations which will anyway be caught by the processor * when using the merged vmcs02. */ if (vmcs12->launch_state == launch) { nested_vmx_failValid(vcpu, launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS : VMXERR_VMRESUME_NONLAUNCHED_VMCS); return 1; } if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) && !PAGE_ALIGNED(vmcs12->msr_bitmap)) { /*TODO: Also verify bits beyond physical address width are 0*/ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { /*TODO: Also verify bits beyond physical address width are 0*/ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (vmcs12->vm_entry_msr_load_count > 0 || vmcs12->vm_exit_msr_load_count > 0 || vmcs12->vm_exit_msr_store_count > 0) { pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n", __func__); nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, nested_vmx_true_procbased_ctls_low, nested_vmx_procbased_ctls_high) || !vmx_control_verify(vmcs12->secondary_vm_exec_control, nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) || !vmx_control_verify(vmcs12->pin_based_vm_exec_control, nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) || !vmx_control_verify(vmcs12->vm_exit_controls, nested_vmx_true_exit_ctls_low, nested_vmx_exit_ctls_high) || !vmx_control_verify(vmcs12->vm_entry_controls, nested_vmx_true_entry_ctls_low, nested_vmx_entry_ctls_high)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); return 1; } if (!nested_cr0_valid(vmcs12, vmcs12->guest_cr0) || ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); return 1; } if (vmcs12->vmcs_link_pointer != -1ull) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR); return 1; } /* * If the load IA32_EFER VM-entry control is 1, the following checks * are performed on the field for the IA32_EFER MSR: * - Bits reserved in the IA32_EFER MSR must be 0. * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of * the IA-32e mode guest VM-exit control. It must also be identical * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to * CR0.PG) is 1. */ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) { ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || ((vmcs12->guest_cr0 & X86_CR0_PG) && ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); return 1; } } /* * If the load IA32_EFER VM-exit control is 1, bits reserved in the * IA32_EFER MSR must be 0 in the field for that register. In addition, * the values of the LMA and LME bits in the field must each be that of * the host address-space size VM-exit control. */ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { ia32e = (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); return 1; } } /* * We're finally done with prerequisite checking, and can start with * the nested entry. */ vmcs02 = nested_get_current_vmcs02(vmx); if (!vmcs02) return -ENOMEM; enter_guest_mode(vcpu); vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); cpu = get_cpu(); vmx->loaded_vmcs = vmcs02; vmx_vcpu_put(vcpu); vmx_vcpu_load(vcpu, cpu); vcpu->cpu = cpu; put_cpu(); vmx_segment_cache_clear(vmx); vmcs12->launch_state = 1; prepare_vmcs02(vcpu, vmcs12); if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) return kvm_emulate_halt(vcpu); vmx->nested.nested_run_pending = 1; /* * Note no nested_vmx_succeed or nested_vmx_fail here. At this point * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet * returned as far as L1 is concerned. It will only return (and set * the success flag) when L2 exits (see nested_vmx_vmexit()). */ return 1; } /* * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). * This function returns the new value we should put in vmcs12.guest_cr0. * It's not enough to just return the vmcs02 GUEST_CR0. Rather, * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 * didn't trap the bit, because if L1 did, so would L0). * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have * been modified by L2, and L1 knows it. So just leave the old value of * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 * isn't relevant, because if L0 traps this bit it can set it to anything. * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have * changed these bits, and therefore they need to be updated, but L0 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. */ static inline unsigned long vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { return /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | vcpu->arch.cr0_guest_owned_bits)); } static inline unsigned long vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { return /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | vcpu->arch.cr4_guest_owned_bits)); } static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { u32 idt_vectoring; unsigned int nr; if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) { nr = vcpu->arch.exception.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; if (kvm_exception_is_soft(nr)) { vmcs12->vm_exit_instruction_len = vcpu->arch.event_exit_inst_len; idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; } else idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; if (vcpu->arch.exception.has_error_code) { idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; vmcs12->idt_vectoring_error_code = vcpu->arch.exception.error_code; } vmcs12->idt_vectoring_info_field = idt_vectoring; } else if (vcpu->arch.nmi_injected) { vmcs12->idt_vectoring_info_field = INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; } else if (vcpu->arch.interrupt.pending) { nr = vcpu->arch.interrupt.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; if (vcpu->arch.interrupt.soft) { idt_vectoring |= INTR_TYPE_SOFT_INTR; vmcs12->vm_entry_instruction_len = vcpu->arch.event_exit_inst_len; } else idt_vectoring |= INTR_TYPE_EXT_INTR; vmcs12->idt_vectoring_info_field = idt_vectoring; } } static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && vmx->nested.preemption_timer_expired) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); return 0; } if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { if (vmx->nested.nested_run_pending || vcpu->arch.interrupt.pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, NMI_VECTOR | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK, 0); /* * The NMI-triggered VM exit counts as injection: * clear this one and block further NMIs. */ vcpu->arch.nmi_pending = 0; vmx_set_nmi_mask(vcpu, true); return 0; } if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && nested_exit_on_intr(vcpu)) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); } return 0; } static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) { ktime_t remaining = hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); u64 value; if (ktime_to_ns(remaining) <= 0) return 0; value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; do_div(value, 1000000); return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; } /* * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), * and this function updates it to reflect the changes to the guest state while * L2 was running (and perhaps made some exits which were handled directly by L0 * without going back to L1), and to reflect the exit reason. * Note that we do not have to copy here all VMCS fields, just those that * could have changed by the L2 guest or the exit - i.e., the guest-state and * exit-information fields only. Other fields are modified by L1 with VMWRITE, * which already writes to vmcs12 directly. */ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { /* update guest state fields: */ vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); vmcs12->guest_interruptibility_info = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); vmcs12->guest_pending_dbg_exceptions = vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; else vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; if (nested_cpu_has_preemption_timer(vmcs12)) { if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) vmcs12->vmx_preemption_timer_value = vmx_get_preemption_timer_value(vcpu); hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); } /* * In some cases (usually, nested EPT), L2 is allowed to change its * own CR3 without exiting. If it has changed it, we must keep it. * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. * * Additionally, restore L2's PDPTR to vmcs12. */ if (enable_ept) { vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3); vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); } vmcs12->vm_entry_controls = (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); } /* TODO: These cannot have changed unless we have MSR bitmaps and * the relevant bit asks not to trap the change */ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) vmcs12->guest_ia32_efer = vcpu->arch.efer; vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); if (vmx_mpx_supported()) vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); /* update exit information fields: */ vmcs12->vm_exit_reason = exit_reason; vmcs12->exit_qualification = exit_qualification; vmcs12->vm_exit_intr_info = exit_intr_info; if ((vmcs12->vm_exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); vmcs12->idt_vectoring_info_field = 0; vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { /* vm_entry_intr_info_field is cleared on exit. Emulate this * instead of reading the real value. */ vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; /* * Transfer the event that L0 or L1 may wanted to inject into * L2 to IDT_VECTORING_INFO_FIELD. */ vmcs12_save_pending_event(vcpu, vmcs12); } /* * Drop what we picked up for L2 via vmx_complete_interrupts. It is * preserved above and would only end up incorrectly in L1. */ vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); } /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified * in vmcs12. * This function is to be called not only on normal nested exit, but also on * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry * Failures During or After Loading Guest State"). * This function should be called when the active VMCS is L1's (vmcs01). */ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); vmx_set_efer(vcpu, vcpu->arch.efer); kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); /* * Note that calling vmx_set_cr0 is important, even if cr0 hasn't * actually changed, because it depends on the current state of * fpu_active (which may have changed). * Note that vmx_set_cr0 refers to efer set above. */ vmx_set_cr0(vcpu, vmcs12->host_cr0); /* * If we did fpu_activate()/fpu_deactivate() during L2's run, we need * to apply the same changes to L1's vmcs. We just set cr0 correctly, * but we also need to update cr0_guest_host_mask and exception_bitmap. */ update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0); vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); /* * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01 * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask(); */ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); kvm_set_cr4(vcpu, vmcs12->host_cr4); nested_ept_uninit_mmu_context(vcpu); kvm_set_cr3(vcpu, vmcs12->host_cr3); kvm_mmu_reset_context(vcpu); if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; if (enable_vpid) { /* * Trivially support vpid by letting L2s share their parent * L1's vpid. TODO: move to a more elaborate solution, giving * each L2 its own vpid and exposing the vpid feature to L1. */ vmx_flush_tlb(vcpu); } vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, 0); if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); vcpu->arch.pat = vmcs12->host_ia32_pat; } if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, vmcs12->host_ia32_perf_global_ctrl); /* Set L1 segment info according to Intel SDM 27.5.2 Loading Host Segment and Descriptor-Table Registers */ seg = (struct kvm_segment) { .base = 0, .limit = 0xFFFFFFFF, .selector = vmcs12->host_cs_selector, .type = 11, .present = 1, .s = 1, .g = 1 }; if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) seg.l = 1; else seg.db = 1; vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); seg = (struct kvm_segment) { .base = 0, .limit = 0xFFFFFFFF, .type = 3, .present = 1, .s = 1, .db = 1, .g = 1 }; seg.selector = vmcs12->host_ds_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); seg.selector = vmcs12->host_es_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); seg.selector = vmcs12->host_ss_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); seg.selector = vmcs12->host_fs_selector; seg.base = vmcs12->host_fs_base; vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); seg.selector = vmcs12->host_gs_selector; seg.base = vmcs12->host_gs_base; vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); seg = (struct kvm_segment) { .base = vmcs12->host_tr_base, .limit = 0x67, .selector = vmcs12->host_tr_selector, .type = 11, .present = 1 }; vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); kvm_set_dr(vcpu, 7, 0x400); vmcs_write64(GUEST_IA32_DEBUGCTL, 0); } /* * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 * and modify vmcs12 to make it see what it would expect to see there if * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) */ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); /* trying to cancel vmlaunch/vmresume is a bug */ WARN_ON_ONCE(vmx->nested.nested_run_pending); leave_guest_mode(vcpu); prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, exit_qualification); vmx_load_vmcs01(vcpu); if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) && nested_exit_intr_ack_set(vcpu)) { int irq = kvm_cpu_get_interrupt(vcpu); WARN_ON(irq < 0); vmcs12->vm_exit_intr_info = irq | INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; } trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, vmcs12->exit_qualification, vmcs12->idt_vectoring_info_field, vmcs12->vm_exit_intr_info, vmcs12->vm_exit_intr_error_code, KVM_ISA_VMX); vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); vmx_segment_cache_clear(vmx); /* if no vmcs02 cache requested, remove the one we used */ if (VMCS02_POOL_SIZE == 0) nested_free_vmcs02(vmx, vmx->nested.current_vmptr); load_vmcs12_host_state(vcpu, vmcs12); /* Update TSC_OFFSET if TSC was changed while L2 ran */ vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); /* This is needed for same reason as it was needed in prepare_vmcs02 */ vmx->host_rsp = 0; /* Unpin physical memory we referred to in vmcs02 */ if (vmx->nested.apic_access_page) { nested_release_page(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { nested_release_page(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } /* * We are now running in L2, mmu_notifier will force to reload the * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. */ kvm_vcpu_reload_apic_access_page(vcpu); /* * Exiting from L2 to L1, we're now back to L1 which thinks it just * finished a VMLAUNCH or VMRESUME instruction, so we need to set the * success or failure flag accordingly. */ if (unlikely(vmx->fail)) { vmx->fail = 0; nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR)); } else nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) vmx->nested.sync_shadow_vmcs = true; /* in case we halted in L2 */ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } /* * Forcibly leave nested mode in order to be able to reset the VCPU later on. */ static void vmx_leave_nested(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) nested_vmx_vmexit(vcpu, -1, 0, 0); free_nested(to_vmx(vcpu)); } /* * L1's failure to enter L2 is a subset of a normal exit, as explained in * 23.7 "VM-entry failures during or after loading guest state" (this also * lists the acceptable exit-reason and exit-qualification parameters). * It should only be called before L2 actually succeeded to run, and when * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss). */ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification) { load_vmcs12_host_state(vcpu, vmcs12); vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; vmcs12->exit_qualification = qualification; nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) to_vmx(vcpu)->nested.sync_shadow_vmcs = true; } static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return X86EMUL_CONTINUE; } static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) { if (ple_gap) shrink_ple_window(vcpu); } static struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, .hardware_setup = hardware_setup, .hardware_unsetup = hardware_unsetup, .check_processor_compatibility = vmx_check_processor_compat, .hardware_enable = hardware_enable, .hardware_disable = hardware_disable, .cpu_has_accelerated_tpr = report_flexpriority, .vcpu_create = vmx_create_vcpu, .vcpu_free = vmx_free_vcpu, .vcpu_reset = vmx_vcpu_reset, .prepare_guest_switch = vmx_save_host_state, .vcpu_load = vmx_vcpu_load, .vcpu_put = vmx_vcpu_put, .update_db_bp_intercept = update_exception_bitmap, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, .get_segment = vmx_get_segment, .set_segment = vmx_set_segment, .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = vmx_get_cs_db_l_bits, .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, .decache_cr3 = vmx_decache_cr3, .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, .set_cr0 = vmx_set_cr0, .set_cr3 = vmx_set_cr3, .set_cr4 = vmx_set_cr4, .set_efer = vmx_set_efer, .get_idt = vmx_get_idt, .set_idt = vmx_set_idt, .get_gdt = vmx_get_gdt, .set_gdt = vmx_set_gdt, .get_dr6 = vmx_get_dr6, .set_dr6 = vmx_set_dr6, .set_dr7 = vmx_set_dr7, .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, .cache_reg = vmx_cache_reg, .get_rflags = vmx_get_rflags, .set_rflags = vmx_set_rflags, .fpu_deactivate = vmx_fpu_deactivate, .tlb_flush = vmx_flush_tlb, .run = vmx_vcpu_run, .handle_exit = vmx_handle_exit, .skip_emulated_instruction = skip_emulated_instruction, .set_interrupt_shadow = vmx_set_interrupt_shadow, .get_interrupt_shadow = vmx_get_interrupt_shadow, .patch_hypercall = vmx_patch_hypercall, .set_irq = vmx_inject_irq, .set_nmi = vmx_inject_nmi, .queue_exception = vmx_queue_exception, .cancel_injection = vmx_cancel_injection, .interrupt_allowed = vmx_interrupt_allowed, .nmi_allowed = vmx_nmi_allowed, .get_nmi_mask = vmx_get_nmi_mask, .set_nmi_mask = vmx_set_nmi_mask, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .vm_has_apicv = vmx_vm_has_apicv, .load_eoi_exitmap = vmx_load_eoi_exitmap, .hwapic_irr_update = vmx_hwapic_irr_update, .hwapic_isr_update = vmx_hwapic_isr_update, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, .get_mt_mask = vmx_get_mt_mask, .get_exit_info = vmx_get_exit_info, .get_lpage_level = vmx_get_lpage_level, .cpuid_update = vmx_cpuid_update, .rdtscp_supported = vmx_rdtscp_supported, .invpcid_supported = vmx_invpcid_supported, .set_supported_cpuid = vmx_set_supported_cpuid, .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, .set_tsc_khz = vmx_set_tsc_khz, .read_tsc_offset = vmx_read_tsc_offset, .write_tsc_offset = vmx_write_tsc_offset, .adjust_tsc_offset = vmx_adjust_tsc_offset, .compute_tsc_offset = vmx_compute_tsc_offset, .read_l1_tsc = vmx_read_l1_tsc, .set_tdp_cr3 = vmx_set_cr3, .check_intercept = vmx_check_intercept, .handle_external_intr = vmx_handle_external_intr, .mpx_supported = vmx_mpx_supported, .check_nested_events = vmx_check_nested_events, .sched_in = vmx_sched_in, }; static int __init vmx_init(void) { int r, i, msr; rdmsrl_safe(MSR_EFER, &host_efer); for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) kvm_define_shared_msr(i, vmx_msr_index[i]); vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_io_bitmap_a) return -ENOMEM; r = -ENOMEM; vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_io_bitmap_b) goto out; vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_legacy) goto out1; vmx_msr_bitmap_legacy_x2apic = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_legacy_x2apic) goto out2; vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_longmode) goto out3; vmx_msr_bitmap_longmode_x2apic = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_longmode_x2apic) goto out4; vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_vmread_bitmap) goto out5; vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_vmwrite_bitmap) goto out6; memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); /* * Allow direct access to the PC debug port (it is often used for I/O * delays, but the vmexits simply slow things down). */ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); clear_bit(0x80, vmx_io_bitmap_a); memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx), THIS_MODULE); if (r) goto out7; #ifdef CONFIG_KEXEC rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); #endif vmx_disable_intercept_for_msr(MSR_FS_BASE, false); vmx_disable_intercept_for_msr(MSR_GS_BASE, false); vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); memcpy(vmx_msr_bitmap_legacy_x2apic, vmx_msr_bitmap_legacy, PAGE_SIZE); memcpy(vmx_msr_bitmap_longmode_x2apic, vmx_msr_bitmap_longmode, PAGE_SIZE); if (enable_apicv) { for (msr = 0x800; msr <= 0x8ff; msr++) vmx_disable_intercept_msr_read_x2apic(msr); /* According SDM, in x2apic mode, the whole id reg is used. * But in KVM, it only use the highest eight bits. Need to * intercept it */ vmx_enable_intercept_msr_read_x2apic(0x802); /* TMCCT */ vmx_enable_intercept_msr_read_x2apic(0x839); /* TPR */ vmx_disable_intercept_msr_write_x2apic(0x808); /* EOI */ vmx_disable_intercept_msr_write_x2apic(0x80b); /* SELF-IPI */ vmx_disable_intercept_msr_write_x2apic(0x83f); } if (enable_ept) { kvm_mmu_set_mask_ptes(0ull, (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull, (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull, 0ull, VMX_EPT_EXECUTABLE_MASK); ept_set_mmio_spte_mask(); kvm_enable_tdp(); } else kvm_disable_tdp(); update_ple_window_actual_max(); return 0; out7: free_page((unsigned long)vmx_vmwrite_bitmap); out6: free_page((unsigned long)vmx_vmread_bitmap); out5: free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); out4: free_page((unsigned long)vmx_msr_bitmap_longmode); out3: free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic); out2: free_page((unsigned long)vmx_msr_bitmap_legacy); out1: free_page((unsigned long)vmx_io_bitmap_b); out: free_page((unsigned long)vmx_io_bitmap_a); return r; } static void __exit vmx_exit(void) { free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic); free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); free_page((unsigned long)vmx_msr_bitmap_legacy); free_page((unsigned long)vmx_msr_bitmap_longmode); free_page((unsigned long)vmx_io_bitmap_b); free_page((unsigned long)vmx_io_bitmap_a); free_page((unsigned long)vmx_vmwrite_bitmap); free_page((unsigned long)vmx_vmread_bitmap); #ifdef CONFIG_KEXEC RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); synchronize_rcu(); #endif kvm_exit(); } module_init(vmx_init) module_exit(vmx_exit)
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2159_2
crossvul-cpp_data_good_3647_1
/* * linux/fs/hfsplus/dir.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handling of directories */ #include <linux/errno.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/random.h> #include "hfsplus_fs.h" #include "hfsplus_raw.h" static inline void hfsplus_instantiate(struct dentry *dentry, struct inode *inode, u32 cnid) { dentry->d_fsdata = (void *)(unsigned long)cnid; d_instantiate(dentry, inode); } /* Find the entry inside dir named dentry->d_name */ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct hfs_find_data fd; struct super_block *sb; hfsplus_cat_entry entry; int err; u32 cnid, linkid = 0; u16 type; sb = dir->i_sb; dentry->d_fsdata = NULL; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (err) return ERR_PTR(err); hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name); again: err = hfs_brec_read(&fd, &entry, sizeof(entry)); if (err) { if (err == -ENOENT) { hfs_find_exit(&fd); /* No such entry */ inode = NULL; goto out; } goto fail; } type = be16_to_cpu(entry.type); if (type == HFSPLUS_FOLDER) { if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { err = -EIO; goto fail; } cnid = be32_to_cpu(entry.folder.id); dentry->d_fsdata = (void *)(unsigned long)cnid; } else if (type == HFSPLUS_FILE) { if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { err = -EIO; goto fail; } cnid = be32_to_cpu(entry.file.id); if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) && entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) && (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)-> create_date || entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode)-> create_date) && HFSPLUS_SB(sb)->hidden_dir) { struct qstr str; char name[32]; if (dentry->d_fsdata) { /* * We found a link pointing to another link, * so ignore it and treat it as regular file. */ cnid = (unsigned long)dentry->d_fsdata; linkid = 0; } else { dentry->d_fsdata = (void *)(unsigned long)cnid; linkid = be32_to_cpu(entry.file.permissions.dev); str.len = sprintf(name, "iNode%d", linkid); str.name = name; hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb)->hidden_dir->i_ino, &str); goto again; } } else if (!dentry->d_fsdata) dentry->d_fsdata = (void *)(unsigned long)cnid; } else { printk(KERN_ERR "hfs: invalid catalog entry type in lookup\n"); err = -EIO; goto fail; } hfs_find_exit(&fd); inode = hfsplus_iget(dir->i_sb, cnid); if (IS_ERR(inode)) return ERR_CAST(inode); if (S_ISREG(inode->i_mode)) HFSPLUS_I(inode)->linkid = linkid; out: d_add(dentry, inode); return NULL; fail: hfs_find_exit(&fd); return ERR_PTR(err); } static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFSPLUS_MAX_STRLEN + 1]; hfsplus_cat_entry entry; struct hfs_find_data fd; struct hfsplus_readdir_data *rd; u16 type; if (filp->f_pos >= inode->i_size) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (err) return err; hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); err = hfs_brec_find(&fd); if (err) goto out; switch ((u32)filp->f_pos) { case 0: /* This is completely artificial... */ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) goto out; filp->f_pos++; /* fall through */ case 1: if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { printk(KERN_ERR "hfs: bad catalog folder thread\n"); err = -EIO; goto out; } if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) { printk(KERN_ERR "hfs: truncated catalog thread\n"); err = -EIO; goto out; } if (filldir(dirent, "..", 2, 1, be32_to_cpu(entry.thread.parentID), DT_DIR)) goto out; filp->f_pos++; /* fall through */ default: if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, filp->f_pos - 1); if (err) goto out; } for (;;) { if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { printk(KERN_ERR "hfs: walked past end of dir\n"); err = -EIO; goto out; } if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = be16_to_cpu(entry.type); len = HFSPLUS_MAX_STRLEN; err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len); if (err) goto out; if (type == HFSPLUS_FOLDER) { if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { printk(KERN_ERR "hfs: small dir entry\n"); err = -EIO; goto out; } if (HFSPLUS_SB(sb)->hidden_dir && HFSPLUS_SB(sb)->hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) goto next; if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.folder.id), DT_DIR)) break; } else if (type == HFSPLUS_FILE) { if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { printk(KERN_ERR "hfs: small file entry\n"); err = -EIO; goto out; } if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.file.id), DT_REG)) break; } else { printk(KERN_ERR "hfs: bad catalog entry type\n"); err = -EIO; goto out; } next: filp->f_pos++; if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } rd = filp->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } filp->private_data = rd; rd->file = filp; list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); } memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); out: hfs_find_exit(&fd); return err; } static int hfsplus_dir_release(struct inode *inode, struct file *file) { struct hfsplus_readdir_data *rd = file->private_data; if (rd) { mutex_lock(&inode->i_mutex); list_del(&rd->list); mutex_unlock(&inode->i_mutex); kfree(rd); } return 0; } static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, struct dentry *dst_dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb); struct inode *inode = src_dentry->d_inode; struct inode *src_dir = src_dentry->d_parent->d_inode; struct qstr str; char name[32]; u32 cnid, id; int res; if (HFSPLUS_IS_RSRC(inode)) return -EPERM; if (!S_ISREG(inode->i_mode)) return -EPERM; mutex_lock(&sbi->vh_mutex); if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) { for (;;) { get_random_bytes(&id, sizeof(cnid)); id &= 0x3fffffff; str.name = name; str.len = sprintf(name, "iNode%d", id); res = hfsplus_rename_cat(inode->i_ino, src_dir, &src_dentry->d_name, sbi->hidden_dir, &str); if (!res) break; if (res != -EEXIST) goto out; } HFSPLUS_I(inode)->linkid = id; cnid = sbi->next_cnid++; src_dentry->d_fsdata = (void *)(unsigned long)cnid; res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode); if (res) /* panic? */ goto out; sbi->file_count++; } cnid = sbi->next_cnid++; res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode); if (res) goto out; inc_nlink(inode); hfsplus_instantiate(dst_dentry, inode, cnid); ihold(inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); sbi->file_count++; dst_dir->i_sb->s_dirt = 1; out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_unlink(struct inode *dir, struct dentry *dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode = dentry->d_inode; struct qstr str; char name[32]; u32 cnid; int res; if (HFSPLUS_IS_RSRC(inode)) return -EPERM; mutex_lock(&sbi->vh_mutex); cnid = (u32)(unsigned long)dentry->d_fsdata; if (inode->i_ino == cnid && atomic_read(&HFSPLUS_I(inode)->opencnt)) { str.name = name; str.len = sprintf(name, "temp%lu", inode->i_ino); res = hfsplus_rename_cat(inode->i_ino, dir, &dentry->d_name, sbi->hidden_dir, &str); if (!res) { inode->i_flags |= S_DEAD; drop_nlink(inode); } goto out; } res = hfsplus_delete_cat(cnid, dir, &dentry->d_name); if (res) goto out; if (inode->i_nlink > 0) drop_nlink(inode); if (inode->i_ino == cnid) clear_nlink(inode); if (!inode->i_nlink) { if (inode->i_ino != cnid) { sbi->file_count--; if (!atomic_read(&HFSPLUS_I(inode)->opencnt)) { res = hfsplus_delete_cat(inode->i_ino, sbi->hidden_dir, NULL); if (!res) hfsplus_delete_inode(inode); } else inode->i_flags |= S_DEAD; } else hfsplus_delete_inode(inode); } else sbi->file_count--; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode = dentry->d_inode; int res; if (inode->i_size != 2) return -ENOTEMPTY; mutex_lock(&sbi->vh_mutex); res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name); if (res) goto out; clear_nlink(inode); inode->i_ctime = CURRENT_TIME_SEC; hfsplus_delete_inode(inode); mark_inode_dirty(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode; int res = -ENOSPC; mutex_lock(&sbi->vh_mutex); inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO); if (!inode) goto out; res = page_symlink(inode, symname, strlen(symname) + 1); if (res) goto out_err; res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); if (res) goto out_err; hfsplus_instantiate(dentry, inode, inode->i_ino); mark_inode_dirty(inode); goto out; out_err: clear_nlink(inode); hfsplus_delete_inode(inode); iput(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode; int res = -ENOSPC; mutex_lock(&sbi->vh_mutex); inode = hfsplus_new_inode(dir->i_sb, mode); if (!inode) goto out; if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) init_special_inode(inode, mode, rdev); res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); if (res) { clear_nlink(inode); hfsplus_delete_inode(inode); iput(inode); goto out; } hfsplus_instantiate(dentry, inode, inode->i_ino); mark_inode_dirty(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { return hfsplus_mknod(dir, dentry, mode, 0); } static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { return hfsplus_mknod(dir, dentry, mode | S_IFDIR, 0); } static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int res; /* Unlink destination if it already exists */ if (new_dentry->d_inode) { if (S_ISDIR(new_dentry->d_inode->i_mode)) res = hfsplus_rmdir(new_dir, new_dentry); else res = hfsplus_unlink(new_dir, new_dentry); if (res) return res; } res = hfsplus_rename_cat((u32)(unsigned long)old_dentry->d_fsdata, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name); if (!res) new_dentry->d_fsdata = old_dentry->d_fsdata; return res; } const struct inode_operations hfsplus_dir_inode_operations = { .lookup = hfsplus_lookup, .create = hfsplus_create, .link = hfsplus_link, .unlink = hfsplus_unlink, .mkdir = hfsplus_mkdir, .rmdir = hfsplus_rmdir, .symlink = hfsplus_symlink, .mknod = hfsplus_mknod, .rename = hfsplus_rename, }; const struct file_operations hfsplus_dir_operations = { .fsync = hfsplus_file_fsync, .read = generic_read_dir, .readdir = hfsplus_readdir, .unlocked_ioctl = hfsplus_ioctl, .llseek = generic_file_llseek, .release = hfsplus_dir_release, };
./CrossVul/dataset_final_sorted/CWE-264/c/good_3647_1
crossvul-cpp_data_good_3423_0
/* * Copyright (C) 2008 IBM Corporation * Author: Mimi Zohar <zohar@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * ima_policy.c * - initialize default measure policy rules * */ #include <linux/module.h> #include <linux/list.h> #include <linux/security.h> #include <linux/magic.h> #include <linux/parser.h> #include <linux/slab.h> #include "ima.h" /* flags definitions */ #define IMA_FUNC 0x0001 #define IMA_MASK 0x0002 #define IMA_FSMAGIC 0x0004 #define IMA_UID 0x0008 enum ima_action { UNKNOWN = -1, DONT_MEASURE = 0, MEASURE }; #define MAX_LSM_RULES 6 enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE, LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE }; struct ima_measure_rule_entry { struct list_head list; enum ima_action action; unsigned int flags; enum ima_hooks func; int mask; unsigned long fsmagic; uid_t uid; struct { void *rule; /* LSM file metadata specific */ int type; /* audit type */ } lsm[MAX_LSM_RULES]; }; /* * Without LSM specific knowledge, the default policy can only be * written in terms of .action, .func, .mask, .fsmagic, and .uid */ /* * The minimum rule set to allow for full TCB coverage. Measures all files * opened or mmap for exec and everything read by root. Dangerous because * normal users can easily run the machine out of memory simply building * and running executables. */ static struct ima_measure_rule_entry default_rules[] = { {.action = DONT_MEASURE,.fsmagic = PROC_SUPER_MAGIC,.flags = IMA_FSMAGIC}, {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC}, {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC}, {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC}, {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC}, {.action = DONT_MEASURE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC}, {.action = MEASURE,.func = FILE_MMAP,.mask = MAY_EXEC, .flags = IMA_FUNC | IMA_MASK}, {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, .flags = IMA_FUNC | IMA_MASK}, {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0, .flags = IMA_FUNC | IMA_MASK | IMA_UID}, }; static LIST_HEAD(measure_default_rules); static LIST_HEAD(measure_policy_rules); static struct list_head *ima_measure; static DEFINE_MUTEX(ima_measure_mutex); static bool ima_use_tcb __initdata; static int __init default_policy_setup(char *str) { ima_use_tcb = 1; return 1; } __setup("ima_tcb", default_policy_setup); /** * ima_match_rules - determine whether an inode matches the measure rule. * @rule: a pointer to a rule * @inode: a pointer to an inode * @func: LIM hook identifier * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC) * * Returns true on rule match, false on failure. */ static bool ima_match_rules(struct ima_measure_rule_entry *rule, struct inode *inode, enum ima_hooks func, int mask) { struct task_struct *tsk = current; int i; if ((rule->flags & IMA_FUNC) && rule->func != func) return false; if ((rule->flags & IMA_MASK) && rule->mask != mask) return false; if ((rule->flags & IMA_FSMAGIC) && rule->fsmagic != inode->i_sb->s_magic) return false; if ((rule->flags & IMA_UID) && rule->uid != tsk->cred->uid) return false; for (i = 0; i < MAX_LSM_RULES; i++) { int rc = 0; u32 osid, sid; if (!rule->lsm[i].rule) continue; switch (i) { case LSM_OBJ_USER: case LSM_OBJ_ROLE: case LSM_OBJ_TYPE: security_inode_getsecid(inode, &osid); rc = security_filter_rule_match(osid, rule->lsm[i].type, Audit_equal, rule->lsm[i].rule, NULL); break; case LSM_SUBJ_USER: case LSM_SUBJ_ROLE: case LSM_SUBJ_TYPE: security_task_getsecid(tsk, &sid); rc = security_filter_rule_match(sid, rule->lsm[i].type, Audit_equal, rule->lsm[i].rule, NULL); default: break; } if (!rc) return false; } return true; } /** * ima_match_policy - decision based on LSM and other conditions * @inode: pointer to an inode for which the policy decision is being made * @func: IMA hook identifier * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC) * * Measure decision based on func/mask/fsmagic and LSM(subj/obj/type) * conditions. * * (There is no need for locking when walking the policy list, * as elements in the list are never deleted, nor does the list * change.) */ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask) { struct ima_measure_rule_entry *entry; list_for_each_entry(entry, ima_measure, list) { bool rc; rc = ima_match_rules(entry, inode, func, mask); if (rc) return entry->action; } return 0; } /** * ima_init_policy - initialize the default measure rules. * * ima_measure points to either the measure_default_rules or the * the new measure_policy_rules. */ void __init ima_init_policy(void) { int i, entries; /* if !ima_use_tcb set entries = 0 so we load NO default rules */ if (ima_use_tcb) entries = ARRAY_SIZE(default_rules); else entries = 0; for (i = 0; i < entries; i++) list_add_tail(&default_rules[i].list, &measure_default_rules); ima_measure = &measure_default_rules; } /** * ima_update_policy - update default_rules with new measure rules * * Called on file .release to update the default rules with a complete new * policy. Once updated, the policy is locked, no additional rules can be * added to the policy. */ void ima_update_policy(void) { const char *op = "policy_update"; const char *cause = "already exists"; int result = 1; int audit_info = 0; if (ima_measure == &measure_default_rules) { ima_measure = &measure_policy_rules; cause = "complete"; result = 0; } integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, cause, result, audit_info); } enum { Opt_err = -1, Opt_measure = 1, Opt_dont_measure, Opt_obj_user, Opt_obj_role, Opt_obj_type, Opt_subj_user, Opt_subj_role, Opt_subj_type, Opt_func, Opt_mask, Opt_fsmagic, Opt_uid }; static match_table_t policy_tokens = { {Opt_measure, "measure"}, {Opt_dont_measure, "dont_measure"}, {Opt_obj_user, "obj_user=%s"}, {Opt_obj_role, "obj_role=%s"}, {Opt_obj_type, "obj_type=%s"}, {Opt_subj_user, "subj_user=%s"}, {Opt_subj_role, "subj_role=%s"}, {Opt_subj_type, "subj_type=%s"}, {Opt_func, "func=%s"}, {Opt_mask, "mask=%s"}, {Opt_fsmagic, "fsmagic=%s"}, {Opt_uid, "uid=%s"}, {Opt_err, NULL} }; static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, char *args, int lsm_rule, int audit_type) { int result; if (entry->lsm[lsm_rule].rule) return -EINVAL; entry->lsm[lsm_rule].type = audit_type; result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); if (!entry->lsm[lsm_rule].rule) return -EINVAL; return result; } static void ima_log_string(struct audit_buffer *ab, char *key, char *value) { audit_log_format(ab, "%s=", key); audit_log_untrustedstring(ab, value); audit_log_format(ab, " "); } static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) { struct audit_buffer *ab; char *p; int result = 0; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE); entry->uid = -1; entry->action = UNKNOWN; while ((p = strsep(&rule, " \t")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; unsigned long lnum; if (result < 0) break; if ((*p == '\0') || (*p == ' ') || (*p == '\t')) continue; token = match_token(p, policy_tokens, args); switch (token) { case Opt_measure: ima_log_string(ab, "action", "measure"); if (entry->action != UNKNOWN) result = -EINVAL; entry->action = MEASURE; break; case Opt_dont_measure: ima_log_string(ab, "action", "dont_measure"); if (entry->action != UNKNOWN) result = -EINVAL; entry->action = DONT_MEASURE; break; case Opt_func: ima_log_string(ab, "func", args[0].from); if (entry->func) result = -EINVAL; if (strcmp(args[0].from, "FILE_CHECK") == 0) entry->func = FILE_CHECK; /* PATH_CHECK is for backwards compat */ else if (strcmp(args[0].from, "PATH_CHECK") == 0) entry->func = FILE_CHECK; else if (strcmp(args[0].from, "FILE_MMAP") == 0) entry->func = FILE_MMAP; else if (strcmp(args[0].from, "BPRM_CHECK") == 0) entry->func = BPRM_CHECK; else result = -EINVAL; if (!result) entry->flags |= IMA_FUNC; break; case Opt_mask: ima_log_string(ab, "mask", args[0].from); if (entry->mask) result = -EINVAL; if ((strcmp(args[0].from, "MAY_EXEC")) == 0) entry->mask = MAY_EXEC; else if (strcmp(args[0].from, "MAY_WRITE") == 0) entry->mask = MAY_WRITE; else if (strcmp(args[0].from, "MAY_READ") == 0) entry->mask = MAY_READ; else if (strcmp(args[0].from, "MAY_APPEND") == 0) entry->mask = MAY_APPEND; else result = -EINVAL; if (!result) entry->flags |= IMA_MASK; break; case Opt_fsmagic: ima_log_string(ab, "fsmagic", args[0].from); if (entry->fsmagic) { result = -EINVAL; break; } result = strict_strtoul(args[0].from, 16, &entry->fsmagic); if (!result) entry->flags |= IMA_FSMAGIC; break; case Opt_uid: ima_log_string(ab, "uid", args[0].from); if (entry->uid != -1) { result = -EINVAL; break; } result = strict_strtoul(args[0].from, 10, &lnum); if (!result) { entry->uid = (uid_t) lnum; if (entry->uid != lnum) result = -EINVAL; else entry->flags |= IMA_UID; } break; case Opt_obj_user: ima_log_string(ab, "obj_user", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_OBJ_USER, AUDIT_OBJ_USER); break; case Opt_obj_role: ima_log_string(ab, "obj_role", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_OBJ_ROLE, AUDIT_OBJ_ROLE); break; case Opt_obj_type: ima_log_string(ab, "obj_type", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_OBJ_TYPE, AUDIT_OBJ_TYPE); break; case Opt_subj_user: ima_log_string(ab, "subj_user", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_SUBJ_USER, AUDIT_SUBJ_USER); break; case Opt_subj_role: ima_log_string(ab, "subj_role", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_SUBJ_ROLE, AUDIT_SUBJ_ROLE); break; case Opt_subj_type: ima_log_string(ab, "subj_type", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_SUBJ_TYPE, AUDIT_SUBJ_TYPE); break; case Opt_err: ima_log_string(ab, "UNKNOWN", p); result = -EINVAL; break; } } if (!result && (entry->action == UNKNOWN)) result = -EINVAL; audit_log_format(ab, "res=%d", !!result); audit_log_end(ab); return result; } /** * ima_parse_add_rule - add a rule to measure_policy_rules * @rule - ima measurement policy rule * * Uses a mutex to protect the policy list from multiple concurrent writers. * Returns the length of the rule parsed, an error code on failure */ ssize_t ima_parse_add_rule(char *rule) { const char *op = "update_policy"; char *p; struct ima_measure_rule_entry *entry; ssize_t result, len; int audit_info = 0; /* Prevent installed policy from changing */ if (ima_measure != &measure_default_rules) { integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, "already exists", -EACCES, audit_info); return -EACCES; } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, "-ENOMEM", -ENOMEM, audit_info); return -ENOMEM; } INIT_LIST_HEAD(&entry->list); p = strsep(&rule, "\n"); len = strlen(p) + 1; if (*p == '#') { kfree(entry); return len; } result = ima_parse_rule(p, entry); if (result) { kfree(entry); integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, "invalid policy", result, audit_info); return result; } mutex_lock(&ima_measure_mutex); list_add_tail(&entry->list, &measure_policy_rules); mutex_unlock(&ima_measure_mutex); return len; } /* ima_delete_rules called to cleanup invalid policy */ void ima_delete_rules(void) { struct ima_measure_rule_entry *entry, *tmp; mutex_lock(&ima_measure_mutex); list_for_each_entry_safe(entry, tmp, &measure_policy_rules, list) { list_del(&entry->list); kfree(entry); } mutex_unlock(&ima_measure_mutex); }
./CrossVul/dataset_final_sorted/CWE-264/c/good_3423_0
crossvul-cpp_data_good_5861_26
/* * Glue Code for assembler optimized version of Camellia * * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Camellia parts based on code by: * Copyright (C) 2006 NTT (Nippon Telegraph and Telephone Corporation) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <asm/processor.h> #include <asm/unaligned.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <crypto/algapi.h> #include <crypto/lrw.h> #include <crypto/xts.h> #include <asm/crypto/camellia.h> #include <asm/crypto/glue_helper.h> /* regular block cipher functions */ asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, const u8 *src, bool xor); EXPORT_SYMBOL_GPL(__camellia_enc_blk); asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_dec_blk); /* 2-way parallel cipher functions */ asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, const u8 *src, bool xor); EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way); asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_dec_blk_2way); static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { camellia_enc_blk(crypto_tfm_ctx(tfm), dst, src); } static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { camellia_dec_blk(crypto_tfm_ctx(tfm), dst, src); } /* camellia sboxes */ __visible const u64 camellia_sp10011110[256] = { 0x7000007070707000ULL, 0x8200008282828200ULL, 0x2c00002c2c2c2c00ULL, 0xec0000ecececec00ULL, 0xb30000b3b3b3b300ULL, 0x2700002727272700ULL, 0xc00000c0c0c0c000ULL, 0xe50000e5e5e5e500ULL, 0xe40000e4e4e4e400ULL, 0x8500008585858500ULL, 0x5700005757575700ULL, 0x3500003535353500ULL, 0xea0000eaeaeaea00ULL, 0x0c00000c0c0c0c00ULL, 0xae0000aeaeaeae00ULL, 0x4100004141414100ULL, 0x2300002323232300ULL, 0xef0000efefefef00ULL, 0x6b00006b6b6b6b00ULL, 0x9300009393939300ULL, 0x4500004545454500ULL, 0x1900001919191900ULL, 0xa50000a5a5a5a500ULL, 0x2100002121212100ULL, 0xed0000edededed00ULL, 0x0e00000e0e0e0e00ULL, 0x4f00004f4f4f4f00ULL, 0x4e00004e4e4e4e00ULL, 0x1d00001d1d1d1d00ULL, 0x6500006565656500ULL, 0x9200009292929200ULL, 0xbd0000bdbdbdbd00ULL, 0x8600008686868600ULL, 0xb80000b8b8b8b800ULL, 0xaf0000afafafaf00ULL, 0x8f00008f8f8f8f00ULL, 0x7c00007c7c7c7c00ULL, 0xeb0000ebebebeb00ULL, 0x1f00001f1f1f1f00ULL, 0xce0000cececece00ULL, 0x3e00003e3e3e3e00ULL, 0x3000003030303000ULL, 0xdc0000dcdcdcdc00ULL, 0x5f00005f5f5f5f00ULL, 0x5e00005e5e5e5e00ULL, 0xc50000c5c5c5c500ULL, 0x0b00000b0b0b0b00ULL, 0x1a00001a1a1a1a00ULL, 0xa60000a6a6a6a600ULL, 0xe10000e1e1e1e100ULL, 0x3900003939393900ULL, 0xca0000cacacaca00ULL, 0xd50000d5d5d5d500ULL, 0x4700004747474700ULL, 0x5d00005d5d5d5d00ULL, 0x3d00003d3d3d3d00ULL, 0xd90000d9d9d9d900ULL, 0x0100000101010100ULL, 0x5a00005a5a5a5a00ULL, 0xd60000d6d6d6d600ULL, 0x5100005151515100ULL, 0x5600005656565600ULL, 0x6c00006c6c6c6c00ULL, 0x4d00004d4d4d4d00ULL, 0x8b00008b8b8b8b00ULL, 0x0d00000d0d0d0d00ULL, 0x9a00009a9a9a9a00ULL, 0x6600006666666600ULL, 0xfb0000fbfbfbfb00ULL, 0xcc0000cccccccc00ULL, 0xb00000b0b0b0b000ULL, 0x2d00002d2d2d2d00ULL, 0x7400007474747400ULL, 0x1200001212121200ULL, 0x2b00002b2b2b2b00ULL, 0x2000002020202000ULL, 0xf00000f0f0f0f000ULL, 0xb10000b1b1b1b100ULL, 0x8400008484848400ULL, 0x9900009999999900ULL, 0xdf0000dfdfdfdf00ULL, 0x4c00004c4c4c4c00ULL, 0xcb0000cbcbcbcb00ULL, 0xc20000c2c2c2c200ULL, 0x3400003434343400ULL, 0x7e00007e7e7e7e00ULL, 0x7600007676767600ULL, 0x0500000505050500ULL, 0x6d00006d6d6d6d00ULL, 0xb70000b7b7b7b700ULL, 0xa90000a9a9a9a900ULL, 0x3100003131313100ULL, 0xd10000d1d1d1d100ULL, 0x1700001717171700ULL, 0x0400000404040400ULL, 0xd70000d7d7d7d700ULL, 0x1400001414141400ULL, 0x5800005858585800ULL, 0x3a00003a3a3a3a00ULL, 0x6100006161616100ULL, 0xde0000dededede00ULL, 0x1b00001b1b1b1b00ULL, 0x1100001111111100ULL, 0x1c00001c1c1c1c00ULL, 0x3200003232323200ULL, 0x0f00000f0f0f0f00ULL, 0x9c00009c9c9c9c00ULL, 0x1600001616161600ULL, 0x5300005353535300ULL, 0x1800001818181800ULL, 0xf20000f2f2f2f200ULL, 0x2200002222222200ULL, 0xfe0000fefefefe00ULL, 0x4400004444444400ULL, 0xcf0000cfcfcfcf00ULL, 0xb20000b2b2b2b200ULL, 0xc30000c3c3c3c300ULL, 0xb50000b5b5b5b500ULL, 0x7a00007a7a7a7a00ULL, 0x9100009191919100ULL, 0x2400002424242400ULL, 0x0800000808080800ULL, 0xe80000e8e8e8e800ULL, 0xa80000a8a8a8a800ULL, 0x6000006060606000ULL, 0xfc0000fcfcfcfc00ULL, 0x6900006969696900ULL, 0x5000005050505000ULL, 0xaa0000aaaaaaaa00ULL, 0xd00000d0d0d0d000ULL, 0xa00000a0a0a0a000ULL, 0x7d00007d7d7d7d00ULL, 0xa10000a1a1a1a100ULL, 0x8900008989898900ULL, 0x6200006262626200ULL, 0x9700009797979700ULL, 0x5400005454545400ULL, 0x5b00005b5b5b5b00ULL, 0x1e00001e1e1e1e00ULL, 0x9500009595959500ULL, 0xe00000e0e0e0e000ULL, 0xff0000ffffffff00ULL, 0x6400006464646400ULL, 0xd20000d2d2d2d200ULL, 0x1000001010101000ULL, 0xc40000c4c4c4c400ULL, 0x0000000000000000ULL, 0x4800004848484800ULL, 0xa30000a3a3a3a300ULL, 0xf70000f7f7f7f700ULL, 0x7500007575757500ULL, 0xdb0000dbdbdbdb00ULL, 0x8a00008a8a8a8a00ULL, 0x0300000303030300ULL, 0xe60000e6e6e6e600ULL, 0xda0000dadadada00ULL, 0x0900000909090900ULL, 0x3f00003f3f3f3f00ULL, 0xdd0000dddddddd00ULL, 0x9400009494949400ULL, 0x8700008787878700ULL, 0x5c00005c5c5c5c00ULL, 0x8300008383838300ULL, 0x0200000202020200ULL, 0xcd0000cdcdcdcd00ULL, 0x4a00004a4a4a4a00ULL, 0x9000009090909000ULL, 0x3300003333333300ULL, 0x7300007373737300ULL, 0x6700006767676700ULL, 0xf60000f6f6f6f600ULL, 0xf30000f3f3f3f300ULL, 0x9d00009d9d9d9d00ULL, 0x7f00007f7f7f7f00ULL, 0xbf0000bfbfbfbf00ULL, 0xe20000e2e2e2e200ULL, 0x5200005252525200ULL, 0x9b00009b9b9b9b00ULL, 0xd80000d8d8d8d800ULL, 0x2600002626262600ULL, 0xc80000c8c8c8c800ULL, 0x3700003737373700ULL, 0xc60000c6c6c6c600ULL, 0x3b00003b3b3b3b00ULL, 0x8100008181818100ULL, 0x9600009696969600ULL, 0x6f00006f6f6f6f00ULL, 0x4b00004b4b4b4b00ULL, 0x1300001313131300ULL, 0xbe0000bebebebe00ULL, 0x6300006363636300ULL, 0x2e00002e2e2e2e00ULL, 0xe90000e9e9e9e900ULL, 0x7900007979797900ULL, 0xa70000a7a7a7a700ULL, 0x8c00008c8c8c8c00ULL, 0x9f00009f9f9f9f00ULL, 0x6e00006e6e6e6e00ULL, 0xbc0000bcbcbcbc00ULL, 0x8e00008e8e8e8e00ULL, 0x2900002929292900ULL, 0xf50000f5f5f5f500ULL, 0xf90000f9f9f9f900ULL, 0xb60000b6b6b6b600ULL, 0x2f00002f2f2f2f00ULL, 0xfd0000fdfdfdfd00ULL, 0xb40000b4b4b4b400ULL, 0x5900005959595900ULL, 0x7800007878787800ULL, 0x9800009898989800ULL, 0x0600000606060600ULL, 0x6a00006a6a6a6a00ULL, 0xe70000e7e7e7e700ULL, 0x4600004646464600ULL, 0x7100007171717100ULL, 0xba0000babababa00ULL, 0xd40000d4d4d4d400ULL, 0x2500002525252500ULL, 0xab0000abababab00ULL, 0x4200004242424200ULL, 0x8800008888888800ULL, 0xa20000a2a2a2a200ULL, 0x8d00008d8d8d8d00ULL, 0xfa0000fafafafa00ULL, 0x7200007272727200ULL, 0x0700000707070700ULL, 0xb90000b9b9b9b900ULL, 0x5500005555555500ULL, 0xf80000f8f8f8f800ULL, 0xee0000eeeeeeee00ULL, 0xac0000acacacac00ULL, 0x0a00000a0a0a0a00ULL, 0x3600003636363600ULL, 0x4900004949494900ULL, 0x2a00002a2a2a2a00ULL, 0x6800006868686800ULL, 0x3c00003c3c3c3c00ULL, 0x3800003838383800ULL, 0xf10000f1f1f1f100ULL, 0xa40000a4a4a4a400ULL, 0x4000004040404000ULL, 0x2800002828282800ULL, 0xd30000d3d3d3d300ULL, 0x7b00007b7b7b7b00ULL, 0xbb0000bbbbbbbb00ULL, 0xc90000c9c9c9c900ULL, 0x4300004343434300ULL, 0xc10000c1c1c1c100ULL, 0x1500001515151500ULL, 0xe30000e3e3e3e300ULL, 0xad0000adadadad00ULL, 0xf40000f4f4f4f400ULL, 0x7700007777777700ULL, 0xc70000c7c7c7c700ULL, 0x8000008080808000ULL, 0x9e00009e9e9e9e00ULL, }; __visible const u64 camellia_sp22000222[256] = { 0xe0e0000000e0e0e0ULL, 0x0505000000050505ULL, 0x5858000000585858ULL, 0xd9d9000000d9d9d9ULL, 0x6767000000676767ULL, 0x4e4e0000004e4e4eULL, 0x8181000000818181ULL, 0xcbcb000000cbcbcbULL, 0xc9c9000000c9c9c9ULL, 0x0b0b0000000b0b0bULL, 0xaeae000000aeaeaeULL, 0x6a6a0000006a6a6aULL, 0xd5d5000000d5d5d5ULL, 0x1818000000181818ULL, 0x5d5d0000005d5d5dULL, 0x8282000000828282ULL, 0x4646000000464646ULL, 0xdfdf000000dfdfdfULL, 0xd6d6000000d6d6d6ULL, 0x2727000000272727ULL, 0x8a8a0000008a8a8aULL, 0x3232000000323232ULL, 0x4b4b0000004b4b4bULL, 0x4242000000424242ULL, 0xdbdb000000dbdbdbULL, 0x1c1c0000001c1c1cULL, 0x9e9e0000009e9e9eULL, 0x9c9c0000009c9c9cULL, 0x3a3a0000003a3a3aULL, 0xcaca000000cacacaULL, 0x2525000000252525ULL, 0x7b7b0000007b7b7bULL, 0x0d0d0000000d0d0dULL, 0x7171000000717171ULL, 0x5f5f0000005f5f5fULL, 0x1f1f0000001f1f1fULL, 0xf8f8000000f8f8f8ULL, 0xd7d7000000d7d7d7ULL, 0x3e3e0000003e3e3eULL, 0x9d9d0000009d9d9dULL, 0x7c7c0000007c7c7cULL, 0x6060000000606060ULL, 0xb9b9000000b9b9b9ULL, 0xbebe000000bebebeULL, 0xbcbc000000bcbcbcULL, 0x8b8b0000008b8b8bULL, 0x1616000000161616ULL, 0x3434000000343434ULL, 0x4d4d0000004d4d4dULL, 0xc3c3000000c3c3c3ULL, 0x7272000000727272ULL, 0x9595000000959595ULL, 0xabab000000abababULL, 0x8e8e0000008e8e8eULL, 0xbaba000000bababaULL, 0x7a7a0000007a7a7aULL, 0xb3b3000000b3b3b3ULL, 0x0202000000020202ULL, 0xb4b4000000b4b4b4ULL, 0xadad000000adadadULL, 0xa2a2000000a2a2a2ULL, 0xacac000000acacacULL, 0xd8d8000000d8d8d8ULL, 0x9a9a0000009a9a9aULL, 0x1717000000171717ULL, 0x1a1a0000001a1a1aULL, 0x3535000000353535ULL, 0xcccc000000ccccccULL, 0xf7f7000000f7f7f7ULL, 0x9999000000999999ULL, 0x6161000000616161ULL, 0x5a5a0000005a5a5aULL, 0xe8e8000000e8e8e8ULL, 0x2424000000242424ULL, 0x5656000000565656ULL, 0x4040000000404040ULL, 0xe1e1000000e1e1e1ULL, 0x6363000000636363ULL, 0x0909000000090909ULL, 0x3333000000333333ULL, 0xbfbf000000bfbfbfULL, 0x9898000000989898ULL, 0x9797000000979797ULL, 0x8585000000858585ULL, 0x6868000000686868ULL, 0xfcfc000000fcfcfcULL, 0xecec000000ecececULL, 0x0a0a0000000a0a0aULL, 0xdada000000dadadaULL, 0x6f6f0000006f6f6fULL, 0x5353000000535353ULL, 0x6262000000626262ULL, 0xa3a3000000a3a3a3ULL, 0x2e2e0000002e2e2eULL, 0x0808000000080808ULL, 0xafaf000000afafafULL, 0x2828000000282828ULL, 0xb0b0000000b0b0b0ULL, 0x7474000000747474ULL, 0xc2c2000000c2c2c2ULL, 0xbdbd000000bdbdbdULL, 0x3636000000363636ULL, 0x2222000000222222ULL, 0x3838000000383838ULL, 0x6464000000646464ULL, 0x1e1e0000001e1e1eULL, 0x3939000000393939ULL, 0x2c2c0000002c2c2cULL, 0xa6a6000000a6a6a6ULL, 0x3030000000303030ULL, 0xe5e5000000e5e5e5ULL, 0x4444000000444444ULL, 0xfdfd000000fdfdfdULL, 0x8888000000888888ULL, 0x9f9f0000009f9f9fULL, 0x6565000000656565ULL, 0x8787000000878787ULL, 0x6b6b0000006b6b6bULL, 0xf4f4000000f4f4f4ULL, 0x2323000000232323ULL, 0x4848000000484848ULL, 0x1010000000101010ULL, 0xd1d1000000d1d1d1ULL, 0x5151000000515151ULL, 0xc0c0000000c0c0c0ULL, 0xf9f9000000f9f9f9ULL, 0xd2d2000000d2d2d2ULL, 0xa0a0000000a0a0a0ULL, 0x5555000000555555ULL, 0xa1a1000000a1a1a1ULL, 0x4141000000414141ULL, 0xfafa000000fafafaULL, 0x4343000000434343ULL, 0x1313000000131313ULL, 0xc4c4000000c4c4c4ULL, 0x2f2f0000002f2f2fULL, 0xa8a8000000a8a8a8ULL, 0xb6b6000000b6b6b6ULL, 0x3c3c0000003c3c3cULL, 0x2b2b0000002b2b2bULL, 0xc1c1000000c1c1c1ULL, 0xffff000000ffffffULL, 0xc8c8000000c8c8c8ULL, 0xa5a5000000a5a5a5ULL, 0x2020000000202020ULL, 0x8989000000898989ULL, 0x0000000000000000ULL, 0x9090000000909090ULL, 0x4747000000474747ULL, 0xefef000000efefefULL, 0xeaea000000eaeaeaULL, 0xb7b7000000b7b7b7ULL, 0x1515000000151515ULL, 0x0606000000060606ULL, 0xcdcd000000cdcdcdULL, 0xb5b5000000b5b5b5ULL, 0x1212000000121212ULL, 0x7e7e0000007e7e7eULL, 0xbbbb000000bbbbbbULL, 0x2929000000292929ULL, 0x0f0f0000000f0f0fULL, 0xb8b8000000b8b8b8ULL, 0x0707000000070707ULL, 0x0404000000040404ULL, 0x9b9b0000009b9b9bULL, 0x9494000000949494ULL, 0x2121000000212121ULL, 0x6666000000666666ULL, 0xe6e6000000e6e6e6ULL, 0xcece000000cececeULL, 0xeded000000edededULL, 0xe7e7000000e7e7e7ULL, 0x3b3b0000003b3b3bULL, 0xfefe000000fefefeULL, 0x7f7f0000007f7f7fULL, 0xc5c5000000c5c5c5ULL, 0xa4a4000000a4a4a4ULL, 0x3737000000373737ULL, 0xb1b1000000b1b1b1ULL, 0x4c4c0000004c4c4cULL, 0x9191000000919191ULL, 0x6e6e0000006e6e6eULL, 0x8d8d0000008d8d8dULL, 0x7676000000767676ULL, 0x0303000000030303ULL, 0x2d2d0000002d2d2dULL, 0xdede000000dededeULL, 0x9696000000969696ULL, 0x2626000000262626ULL, 0x7d7d0000007d7d7dULL, 0xc6c6000000c6c6c6ULL, 0x5c5c0000005c5c5cULL, 0xd3d3000000d3d3d3ULL, 0xf2f2000000f2f2f2ULL, 0x4f4f0000004f4f4fULL, 0x1919000000191919ULL, 0x3f3f0000003f3f3fULL, 0xdcdc000000dcdcdcULL, 0x7979000000797979ULL, 0x1d1d0000001d1d1dULL, 0x5252000000525252ULL, 0xebeb000000ebebebULL, 0xf3f3000000f3f3f3ULL, 0x6d6d0000006d6d6dULL, 0x5e5e0000005e5e5eULL, 0xfbfb000000fbfbfbULL, 0x6969000000696969ULL, 0xb2b2000000b2b2b2ULL, 0xf0f0000000f0f0f0ULL, 0x3131000000313131ULL, 0x0c0c0000000c0c0cULL, 0xd4d4000000d4d4d4ULL, 0xcfcf000000cfcfcfULL, 0x8c8c0000008c8c8cULL, 0xe2e2000000e2e2e2ULL, 0x7575000000757575ULL, 0xa9a9000000a9a9a9ULL, 0x4a4a0000004a4a4aULL, 0x5757000000575757ULL, 0x8484000000848484ULL, 0x1111000000111111ULL, 0x4545000000454545ULL, 0x1b1b0000001b1b1bULL, 0xf5f5000000f5f5f5ULL, 0xe4e4000000e4e4e4ULL, 0x0e0e0000000e0e0eULL, 0x7373000000737373ULL, 0xaaaa000000aaaaaaULL, 0xf1f1000000f1f1f1ULL, 0xdddd000000ddddddULL, 0x5959000000595959ULL, 0x1414000000141414ULL, 0x6c6c0000006c6c6cULL, 0x9292000000929292ULL, 0x5454000000545454ULL, 0xd0d0000000d0d0d0ULL, 0x7878000000787878ULL, 0x7070000000707070ULL, 0xe3e3000000e3e3e3ULL, 0x4949000000494949ULL, 0x8080000000808080ULL, 0x5050000000505050ULL, 0xa7a7000000a7a7a7ULL, 0xf6f6000000f6f6f6ULL, 0x7777000000777777ULL, 0x9393000000939393ULL, 0x8686000000868686ULL, 0x8383000000838383ULL, 0x2a2a0000002a2a2aULL, 0xc7c7000000c7c7c7ULL, 0x5b5b0000005b5b5bULL, 0xe9e9000000e9e9e9ULL, 0xeeee000000eeeeeeULL, 0x8f8f0000008f8f8fULL, 0x0101000000010101ULL, 0x3d3d0000003d3d3dULL, }; __visible const u64 camellia_sp03303033[256] = { 0x0038380038003838ULL, 0x0041410041004141ULL, 0x0016160016001616ULL, 0x0076760076007676ULL, 0x00d9d900d900d9d9ULL, 0x0093930093009393ULL, 0x0060600060006060ULL, 0x00f2f200f200f2f2ULL, 0x0072720072007272ULL, 0x00c2c200c200c2c2ULL, 0x00abab00ab00ababULL, 0x009a9a009a009a9aULL, 0x0075750075007575ULL, 0x0006060006000606ULL, 0x0057570057005757ULL, 0x00a0a000a000a0a0ULL, 0x0091910091009191ULL, 0x00f7f700f700f7f7ULL, 0x00b5b500b500b5b5ULL, 0x00c9c900c900c9c9ULL, 0x00a2a200a200a2a2ULL, 0x008c8c008c008c8cULL, 0x00d2d200d200d2d2ULL, 0x0090900090009090ULL, 0x00f6f600f600f6f6ULL, 0x0007070007000707ULL, 0x00a7a700a700a7a7ULL, 0x0027270027002727ULL, 0x008e8e008e008e8eULL, 0x00b2b200b200b2b2ULL, 0x0049490049004949ULL, 0x00dede00de00dedeULL, 0x0043430043004343ULL, 0x005c5c005c005c5cULL, 0x00d7d700d700d7d7ULL, 0x00c7c700c700c7c7ULL, 0x003e3e003e003e3eULL, 0x00f5f500f500f5f5ULL, 0x008f8f008f008f8fULL, 0x0067670067006767ULL, 0x001f1f001f001f1fULL, 0x0018180018001818ULL, 0x006e6e006e006e6eULL, 0x00afaf00af00afafULL, 0x002f2f002f002f2fULL, 0x00e2e200e200e2e2ULL, 0x0085850085008585ULL, 0x000d0d000d000d0dULL, 0x0053530053005353ULL, 0x00f0f000f000f0f0ULL, 0x009c9c009c009c9cULL, 0x0065650065006565ULL, 0x00eaea00ea00eaeaULL, 0x00a3a300a300a3a3ULL, 0x00aeae00ae00aeaeULL, 0x009e9e009e009e9eULL, 0x00ecec00ec00ececULL, 0x0080800080008080ULL, 0x002d2d002d002d2dULL, 0x006b6b006b006b6bULL, 0x00a8a800a800a8a8ULL, 0x002b2b002b002b2bULL, 0x0036360036003636ULL, 0x00a6a600a600a6a6ULL, 0x00c5c500c500c5c5ULL, 0x0086860086008686ULL, 0x004d4d004d004d4dULL, 0x0033330033003333ULL, 0x00fdfd00fd00fdfdULL, 0x0066660066006666ULL, 0x0058580058005858ULL, 0x0096960096009696ULL, 0x003a3a003a003a3aULL, 0x0009090009000909ULL, 0x0095950095009595ULL, 0x0010100010001010ULL, 0x0078780078007878ULL, 0x00d8d800d800d8d8ULL, 0x0042420042004242ULL, 0x00cccc00cc00ccccULL, 0x00efef00ef00efefULL, 0x0026260026002626ULL, 0x00e5e500e500e5e5ULL, 0x0061610061006161ULL, 0x001a1a001a001a1aULL, 0x003f3f003f003f3fULL, 0x003b3b003b003b3bULL, 0x0082820082008282ULL, 0x00b6b600b600b6b6ULL, 0x00dbdb00db00dbdbULL, 0x00d4d400d400d4d4ULL, 0x0098980098009898ULL, 0x00e8e800e800e8e8ULL, 0x008b8b008b008b8bULL, 0x0002020002000202ULL, 0x00ebeb00eb00ebebULL, 0x000a0a000a000a0aULL, 0x002c2c002c002c2cULL, 0x001d1d001d001d1dULL, 0x00b0b000b000b0b0ULL, 0x006f6f006f006f6fULL, 0x008d8d008d008d8dULL, 0x0088880088008888ULL, 0x000e0e000e000e0eULL, 0x0019190019001919ULL, 0x0087870087008787ULL, 0x004e4e004e004e4eULL, 0x000b0b000b000b0bULL, 0x00a9a900a900a9a9ULL, 0x000c0c000c000c0cULL, 0x0079790079007979ULL, 0x0011110011001111ULL, 0x007f7f007f007f7fULL, 0x0022220022002222ULL, 0x00e7e700e700e7e7ULL, 0x0059590059005959ULL, 0x00e1e100e100e1e1ULL, 0x00dada00da00dadaULL, 0x003d3d003d003d3dULL, 0x00c8c800c800c8c8ULL, 0x0012120012001212ULL, 0x0004040004000404ULL, 0x0074740074007474ULL, 0x0054540054005454ULL, 0x0030300030003030ULL, 0x007e7e007e007e7eULL, 0x00b4b400b400b4b4ULL, 0x0028280028002828ULL, 0x0055550055005555ULL, 0x0068680068006868ULL, 0x0050500050005050ULL, 0x00bebe00be00bebeULL, 0x00d0d000d000d0d0ULL, 0x00c4c400c400c4c4ULL, 0x0031310031003131ULL, 0x00cbcb00cb00cbcbULL, 0x002a2a002a002a2aULL, 0x00adad00ad00adadULL, 0x000f0f000f000f0fULL, 0x00caca00ca00cacaULL, 0x0070700070007070ULL, 0x00ffff00ff00ffffULL, 0x0032320032003232ULL, 0x0069690069006969ULL, 0x0008080008000808ULL, 0x0062620062006262ULL, 0x0000000000000000ULL, 0x0024240024002424ULL, 0x00d1d100d100d1d1ULL, 0x00fbfb00fb00fbfbULL, 0x00baba00ba00babaULL, 0x00eded00ed00ededULL, 0x0045450045004545ULL, 0x0081810081008181ULL, 0x0073730073007373ULL, 0x006d6d006d006d6dULL, 0x0084840084008484ULL, 0x009f9f009f009f9fULL, 0x00eeee00ee00eeeeULL, 0x004a4a004a004a4aULL, 0x00c3c300c300c3c3ULL, 0x002e2e002e002e2eULL, 0x00c1c100c100c1c1ULL, 0x0001010001000101ULL, 0x00e6e600e600e6e6ULL, 0x0025250025002525ULL, 0x0048480048004848ULL, 0x0099990099009999ULL, 0x00b9b900b900b9b9ULL, 0x00b3b300b300b3b3ULL, 0x007b7b007b007b7bULL, 0x00f9f900f900f9f9ULL, 0x00cece00ce00ceceULL, 0x00bfbf00bf00bfbfULL, 0x00dfdf00df00dfdfULL, 0x0071710071007171ULL, 0x0029290029002929ULL, 0x00cdcd00cd00cdcdULL, 0x006c6c006c006c6cULL, 0x0013130013001313ULL, 0x0064640064006464ULL, 0x009b9b009b009b9bULL, 0x0063630063006363ULL, 0x009d9d009d009d9dULL, 0x00c0c000c000c0c0ULL, 0x004b4b004b004b4bULL, 0x00b7b700b700b7b7ULL, 0x00a5a500a500a5a5ULL, 0x0089890089008989ULL, 0x005f5f005f005f5fULL, 0x00b1b100b100b1b1ULL, 0x0017170017001717ULL, 0x00f4f400f400f4f4ULL, 0x00bcbc00bc00bcbcULL, 0x00d3d300d300d3d3ULL, 0x0046460046004646ULL, 0x00cfcf00cf00cfcfULL, 0x0037370037003737ULL, 0x005e5e005e005e5eULL, 0x0047470047004747ULL, 0x0094940094009494ULL, 0x00fafa00fa00fafaULL, 0x00fcfc00fc00fcfcULL, 0x005b5b005b005b5bULL, 0x0097970097009797ULL, 0x00fefe00fe00fefeULL, 0x005a5a005a005a5aULL, 0x00acac00ac00acacULL, 0x003c3c003c003c3cULL, 0x004c4c004c004c4cULL, 0x0003030003000303ULL, 0x0035350035003535ULL, 0x00f3f300f300f3f3ULL, 0x0023230023002323ULL, 0x00b8b800b800b8b8ULL, 0x005d5d005d005d5dULL, 0x006a6a006a006a6aULL, 0x0092920092009292ULL, 0x00d5d500d500d5d5ULL, 0x0021210021002121ULL, 0x0044440044004444ULL, 0x0051510051005151ULL, 0x00c6c600c600c6c6ULL, 0x007d7d007d007d7dULL, 0x0039390039003939ULL, 0x0083830083008383ULL, 0x00dcdc00dc00dcdcULL, 0x00aaaa00aa00aaaaULL, 0x007c7c007c007c7cULL, 0x0077770077007777ULL, 0x0056560056005656ULL, 0x0005050005000505ULL, 0x001b1b001b001b1bULL, 0x00a4a400a400a4a4ULL, 0x0015150015001515ULL, 0x0034340034003434ULL, 0x001e1e001e001e1eULL, 0x001c1c001c001c1cULL, 0x00f8f800f800f8f8ULL, 0x0052520052005252ULL, 0x0020200020002020ULL, 0x0014140014001414ULL, 0x00e9e900e900e9e9ULL, 0x00bdbd00bd00bdbdULL, 0x00dddd00dd00ddddULL, 0x00e4e400e400e4e4ULL, 0x00a1a100a100a1a1ULL, 0x00e0e000e000e0e0ULL, 0x008a8a008a008a8aULL, 0x00f1f100f100f1f1ULL, 0x00d6d600d600d6d6ULL, 0x007a7a007a007a7aULL, 0x00bbbb00bb00bbbbULL, 0x00e3e300e300e3e3ULL, 0x0040400040004040ULL, 0x004f4f004f004f4fULL, }; __visible const u64 camellia_sp00444404[256] = { 0x0000707070700070ULL, 0x00002c2c2c2c002cULL, 0x0000b3b3b3b300b3ULL, 0x0000c0c0c0c000c0ULL, 0x0000e4e4e4e400e4ULL, 0x0000575757570057ULL, 0x0000eaeaeaea00eaULL, 0x0000aeaeaeae00aeULL, 0x0000232323230023ULL, 0x00006b6b6b6b006bULL, 0x0000454545450045ULL, 0x0000a5a5a5a500a5ULL, 0x0000edededed00edULL, 0x00004f4f4f4f004fULL, 0x00001d1d1d1d001dULL, 0x0000929292920092ULL, 0x0000868686860086ULL, 0x0000afafafaf00afULL, 0x00007c7c7c7c007cULL, 0x00001f1f1f1f001fULL, 0x00003e3e3e3e003eULL, 0x0000dcdcdcdc00dcULL, 0x00005e5e5e5e005eULL, 0x00000b0b0b0b000bULL, 0x0000a6a6a6a600a6ULL, 0x0000393939390039ULL, 0x0000d5d5d5d500d5ULL, 0x00005d5d5d5d005dULL, 0x0000d9d9d9d900d9ULL, 0x00005a5a5a5a005aULL, 0x0000515151510051ULL, 0x00006c6c6c6c006cULL, 0x00008b8b8b8b008bULL, 0x00009a9a9a9a009aULL, 0x0000fbfbfbfb00fbULL, 0x0000b0b0b0b000b0ULL, 0x0000747474740074ULL, 0x00002b2b2b2b002bULL, 0x0000f0f0f0f000f0ULL, 0x0000848484840084ULL, 0x0000dfdfdfdf00dfULL, 0x0000cbcbcbcb00cbULL, 0x0000343434340034ULL, 0x0000767676760076ULL, 0x00006d6d6d6d006dULL, 0x0000a9a9a9a900a9ULL, 0x0000d1d1d1d100d1ULL, 0x0000040404040004ULL, 0x0000141414140014ULL, 0x00003a3a3a3a003aULL, 0x0000dededede00deULL, 0x0000111111110011ULL, 0x0000323232320032ULL, 0x00009c9c9c9c009cULL, 0x0000535353530053ULL, 0x0000f2f2f2f200f2ULL, 0x0000fefefefe00feULL, 0x0000cfcfcfcf00cfULL, 0x0000c3c3c3c300c3ULL, 0x00007a7a7a7a007aULL, 0x0000242424240024ULL, 0x0000e8e8e8e800e8ULL, 0x0000606060600060ULL, 0x0000696969690069ULL, 0x0000aaaaaaaa00aaULL, 0x0000a0a0a0a000a0ULL, 0x0000a1a1a1a100a1ULL, 0x0000626262620062ULL, 0x0000545454540054ULL, 0x00001e1e1e1e001eULL, 0x0000e0e0e0e000e0ULL, 0x0000646464640064ULL, 0x0000101010100010ULL, 0x0000000000000000ULL, 0x0000a3a3a3a300a3ULL, 0x0000757575750075ULL, 0x00008a8a8a8a008aULL, 0x0000e6e6e6e600e6ULL, 0x0000090909090009ULL, 0x0000dddddddd00ddULL, 0x0000878787870087ULL, 0x0000838383830083ULL, 0x0000cdcdcdcd00cdULL, 0x0000909090900090ULL, 0x0000737373730073ULL, 0x0000f6f6f6f600f6ULL, 0x00009d9d9d9d009dULL, 0x0000bfbfbfbf00bfULL, 0x0000525252520052ULL, 0x0000d8d8d8d800d8ULL, 0x0000c8c8c8c800c8ULL, 0x0000c6c6c6c600c6ULL, 0x0000818181810081ULL, 0x00006f6f6f6f006fULL, 0x0000131313130013ULL, 0x0000636363630063ULL, 0x0000e9e9e9e900e9ULL, 0x0000a7a7a7a700a7ULL, 0x00009f9f9f9f009fULL, 0x0000bcbcbcbc00bcULL, 0x0000292929290029ULL, 0x0000f9f9f9f900f9ULL, 0x00002f2f2f2f002fULL, 0x0000b4b4b4b400b4ULL, 0x0000787878780078ULL, 0x0000060606060006ULL, 0x0000e7e7e7e700e7ULL, 0x0000717171710071ULL, 0x0000d4d4d4d400d4ULL, 0x0000abababab00abULL, 0x0000888888880088ULL, 0x00008d8d8d8d008dULL, 0x0000727272720072ULL, 0x0000b9b9b9b900b9ULL, 0x0000f8f8f8f800f8ULL, 0x0000acacacac00acULL, 0x0000363636360036ULL, 0x00002a2a2a2a002aULL, 0x00003c3c3c3c003cULL, 0x0000f1f1f1f100f1ULL, 0x0000404040400040ULL, 0x0000d3d3d3d300d3ULL, 0x0000bbbbbbbb00bbULL, 0x0000434343430043ULL, 0x0000151515150015ULL, 0x0000adadadad00adULL, 0x0000777777770077ULL, 0x0000808080800080ULL, 0x0000828282820082ULL, 0x0000ecececec00ecULL, 0x0000272727270027ULL, 0x0000e5e5e5e500e5ULL, 0x0000858585850085ULL, 0x0000353535350035ULL, 0x00000c0c0c0c000cULL, 0x0000414141410041ULL, 0x0000efefefef00efULL, 0x0000939393930093ULL, 0x0000191919190019ULL, 0x0000212121210021ULL, 0x00000e0e0e0e000eULL, 0x00004e4e4e4e004eULL, 0x0000656565650065ULL, 0x0000bdbdbdbd00bdULL, 0x0000b8b8b8b800b8ULL, 0x00008f8f8f8f008fULL, 0x0000ebebebeb00ebULL, 0x0000cececece00ceULL, 0x0000303030300030ULL, 0x00005f5f5f5f005fULL, 0x0000c5c5c5c500c5ULL, 0x00001a1a1a1a001aULL, 0x0000e1e1e1e100e1ULL, 0x0000cacacaca00caULL, 0x0000474747470047ULL, 0x00003d3d3d3d003dULL, 0x0000010101010001ULL, 0x0000d6d6d6d600d6ULL, 0x0000565656560056ULL, 0x00004d4d4d4d004dULL, 0x00000d0d0d0d000dULL, 0x0000666666660066ULL, 0x0000cccccccc00ccULL, 0x00002d2d2d2d002dULL, 0x0000121212120012ULL, 0x0000202020200020ULL, 0x0000b1b1b1b100b1ULL, 0x0000999999990099ULL, 0x00004c4c4c4c004cULL, 0x0000c2c2c2c200c2ULL, 0x00007e7e7e7e007eULL, 0x0000050505050005ULL, 0x0000b7b7b7b700b7ULL, 0x0000313131310031ULL, 0x0000171717170017ULL, 0x0000d7d7d7d700d7ULL, 0x0000585858580058ULL, 0x0000616161610061ULL, 0x00001b1b1b1b001bULL, 0x00001c1c1c1c001cULL, 0x00000f0f0f0f000fULL, 0x0000161616160016ULL, 0x0000181818180018ULL, 0x0000222222220022ULL, 0x0000444444440044ULL, 0x0000b2b2b2b200b2ULL, 0x0000b5b5b5b500b5ULL, 0x0000919191910091ULL, 0x0000080808080008ULL, 0x0000a8a8a8a800a8ULL, 0x0000fcfcfcfc00fcULL, 0x0000505050500050ULL, 0x0000d0d0d0d000d0ULL, 0x00007d7d7d7d007dULL, 0x0000898989890089ULL, 0x0000979797970097ULL, 0x00005b5b5b5b005bULL, 0x0000959595950095ULL, 0x0000ffffffff00ffULL, 0x0000d2d2d2d200d2ULL, 0x0000c4c4c4c400c4ULL, 0x0000484848480048ULL, 0x0000f7f7f7f700f7ULL, 0x0000dbdbdbdb00dbULL, 0x0000030303030003ULL, 0x0000dadadada00daULL, 0x00003f3f3f3f003fULL, 0x0000949494940094ULL, 0x00005c5c5c5c005cULL, 0x0000020202020002ULL, 0x00004a4a4a4a004aULL, 0x0000333333330033ULL, 0x0000676767670067ULL, 0x0000f3f3f3f300f3ULL, 0x00007f7f7f7f007fULL, 0x0000e2e2e2e200e2ULL, 0x00009b9b9b9b009bULL, 0x0000262626260026ULL, 0x0000373737370037ULL, 0x00003b3b3b3b003bULL, 0x0000969696960096ULL, 0x00004b4b4b4b004bULL, 0x0000bebebebe00beULL, 0x00002e2e2e2e002eULL, 0x0000797979790079ULL, 0x00008c8c8c8c008cULL, 0x00006e6e6e6e006eULL, 0x00008e8e8e8e008eULL, 0x0000f5f5f5f500f5ULL, 0x0000b6b6b6b600b6ULL, 0x0000fdfdfdfd00fdULL, 0x0000595959590059ULL, 0x0000989898980098ULL, 0x00006a6a6a6a006aULL, 0x0000464646460046ULL, 0x0000babababa00baULL, 0x0000252525250025ULL, 0x0000424242420042ULL, 0x0000a2a2a2a200a2ULL, 0x0000fafafafa00faULL, 0x0000070707070007ULL, 0x0000555555550055ULL, 0x0000eeeeeeee00eeULL, 0x00000a0a0a0a000aULL, 0x0000494949490049ULL, 0x0000686868680068ULL, 0x0000383838380038ULL, 0x0000a4a4a4a400a4ULL, 0x0000282828280028ULL, 0x00007b7b7b7b007bULL, 0x0000c9c9c9c900c9ULL, 0x0000c1c1c1c100c1ULL, 0x0000e3e3e3e300e3ULL, 0x0000f4f4f4f400f4ULL, 0x0000c7c7c7c700c7ULL, 0x00009e9e9e9e009eULL, }; __visible const u64 camellia_sp02220222[256] = { 0x00e0e0e000e0e0e0ULL, 0x0005050500050505ULL, 0x0058585800585858ULL, 0x00d9d9d900d9d9d9ULL, 0x0067676700676767ULL, 0x004e4e4e004e4e4eULL, 0x0081818100818181ULL, 0x00cbcbcb00cbcbcbULL, 0x00c9c9c900c9c9c9ULL, 0x000b0b0b000b0b0bULL, 0x00aeaeae00aeaeaeULL, 0x006a6a6a006a6a6aULL, 0x00d5d5d500d5d5d5ULL, 0x0018181800181818ULL, 0x005d5d5d005d5d5dULL, 0x0082828200828282ULL, 0x0046464600464646ULL, 0x00dfdfdf00dfdfdfULL, 0x00d6d6d600d6d6d6ULL, 0x0027272700272727ULL, 0x008a8a8a008a8a8aULL, 0x0032323200323232ULL, 0x004b4b4b004b4b4bULL, 0x0042424200424242ULL, 0x00dbdbdb00dbdbdbULL, 0x001c1c1c001c1c1cULL, 0x009e9e9e009e9e9eULL, 0x009c9c9c009c9c9cULL, 0x003a3a3a003a3a3aULL, 0x00cacaca00cacacaULL, 0x0025252500252525ULL, 0x007b7b7b007b7b7bULL, 0x000d0d0d000d0d0dULL, 0x0071717100717171ULL, 0x005f5f5f005f5f5fULL, 0x001f1f1f001f1f1fULL, 0x00f8f8f800f8f8f8ULL, 0x00d7d7d700d7d7d7ULL, 0x003e3e3e003e3e3eULL, 0x009d9d9d009d9d9dULL, 0x007c7c7c007c7c7cULL, 0x0060606000606060ULL, 0x00b9b9b900b9b9b9ULL, 0x00bebebe00bebebeULL, 0x00bcbcbc00bcbcbcULL, 0x008b8b8b008b8b8bULL, 0x0016161600161616ULL, 0x0034343400343434ULL, 0x004d4d4d004d4d4dULL, 0x00c3c3c300c3c3c3ULL, 0x0072727200727272ULL, 0x0095959500959595ULL, 0x00ababab00abababULL, 0x008e8e8e008e8e8eULL, 0x00bababa00bababaULL, 0x007a7a7a007a7a7aULL, 0x00b3b3b300b3b3b3ULL, 0x0002020200020202ULL, 0x00b4b4b400b4b4b4ULL, 0x00adadad00adadadULL, 0x00a2a2a200a2a2a2ULL, 0x00acacac00acacacULL, 0x00d8d8d800d8d8d8ULL, 0x009a9a9a009a9a9aULL, 0x0017171700171717ULL, 0x001a1a1a001a1a1aULL, 0x0035353500353535ULL, 0x00cccccc00ccccccULL, 0x00f7f7f700f7f7f7ULL, 0x0099999900999999ULL, 0x0061616100616161ULL, 0x005a5a5a005a5a5aULL, 0x00e8e8e800e8e8e8ULL, 0x0024242400242424ULL, 0x0056565600565656ULL, 0x0040404000404040ULL, 0x00e1e1e100e1e1e1ULL, 0x0063636300636363ULL, 0x0009090900090909ULL, 0x0033333300333333ULL, 0x00bfbfbf00bfbfbfULL, 0x0098989800989898ULL, 0x0097979700979797ULL, 0x0085858500858585ULL, 0x0068686800686868ULL, 0x00fcfcfc00fcfcfcULL, 0x00ececec00ecececULL, 0x000a0a0a000a0a0aULL, 0x00dadada00dadadaULL, 0x006f6f6f006f6f6fULL, 0x0053535300535353ULL, 0x0062626200626262ULL, 0x00a3a3a300a3a3a3ULL, 0x002e2e2e002e2e2eULL, 0x0008080800080808ULL, 0x00afafaf00afafafULL, 0x0028282800282828ULL, 0x00b0b0b000b0b0b0ULL, 0x0074747400747474ULL, 0x00c2c2c200c2c2c2ULL, 0x00bdbdbd00bdbdbdULL, 0x0036363600363636ULL, 0x0022222200222222ULL, 0x0038383800383838ULL, 0x0064646400646464ULL, 0x001e1e1e001e1e1eULL, 0x0039393900393939ULL, 0x002c2c2c002c2c2cULL, 0x00a6a6a600a6a6a6ULL, 0x0030303000303030ULL, 0x00e5e5e500e5e5e5ULL, 0x0044444400444444ULL, 0x00fdfdfd00fdfdfdULL, 0x0088888800888888ULL, 0x009f9f9f009f9f9fULL, 0x0065656500656565ULL, 0x0087878700878787ULL, 0x006b6b6b006b6b6bULL, 0x00f4f4f400f4f4f4ULL, 0x0023232300232323ULL, 0x0048484800484848ULL, 0x0010101000101010ULL, 0x00d1d1d100d1d1d1ULL, 0x0051515100515151ULL, 0x00c0c0c000c0c0c0ULL, 0x00f9f9f900f9f9f9ULL, 0x00d2d2d200d2d2d2ULL, 0x00a0a0a000a0a0a0ULL, 0x0055555500555555ULL, 0x00a1a1a100a1a1a1ULL, 0x0041414100414141ULL, 0x00fafafa00fafafaULL, 0x0043434300434343ULL, 0x0013131300131313ULL, 0x00c4c4c400c4c4c4ULL, 0x002f2f2f002f2f2fULL, 0x00a8a8a800a8a8a8ULL, 0x00b6b6b600b6b6b6ULL, 0x003c3c3c003c3c3cULL, 0x002b2b2b002b2b2bULL, 0x00c1c1c100c1c1c1ULL, 0x00ffffff00ffffffULL, 0x00c8c8c800c8c8c8ULL, 0x00a5a5a500a5a5a5ULL, 0x0020202000202020ULL, 0x0089898900898989ULL, 0x0000000000000000ULL, 0x0090909000909090ULL, 0x0047474700474747ULL, 0x00efefef00efefefULL, 0x00eaeaea00eaeaeaULL, 0x00b7b7b700b7b7b7ULL, 0x0015151500151515ULL, 0x0006060600060606ULL, 0x00cdcdcd00cdcdcdULL, 0x00b5b5b500b5b5b5ULL, 0x0012121200121212ULL, 0x007e7e7e007e7e7eULL, 0x00bbbbbb00bbbbbbULL, 0x0029292900292929ULL, 0x000f0f0f000f0f0fULL, 0x00b8b8b800b8b8b8ULL, 0x0007070700070707ULL, 0x0004040400040404ULL, 0x009b9b9b009b9b9bULL, 0x0094949400949494ULL, 0x0021212100212121ULL, 0x0066666600666666ULL, 0x00e6e6e600e6e6e6ULL, 0x00cecece00cececeULL, 0x00ededed00edededULL, 0x00e7e7e700e7e7e7ULL, 0x003b3b3b003b3b3bULL, 0x00fefefe00fefefeULL, 0x007f7f7f007f7f7fULL, 0x00c5c5c500c5c5c5ULL, 0x00a4a4a400a4a4a4ULL, 0x0037373700373737ULL, 0x00b1b1b100b1b1b1ULL, 0x004c4c4c004c4c4cULL, 0x0091919100919191ULL, 0x006e6e6e006e6e6eULL, 0x008d8d8d008d8d8dULL, 0x0076767600767676ULL, 0x0003030300030303ULL, 0x002d2d2d002d2d2dULL, 0x00dedede00dededeULL, 0x0096969600969696ULL, 0x0026262600262626ULL, 0x007d7d7d007d7d7dULL, 0x00c6c6c600c6c6c6ULL, 0x005c5c5c005c5c5cULL, 0x00d3d3d300d3d3d3ULL, 0x00f2f2f200f2f2f2ULL, 0x004f4f4f004f4f4fULL, 0x0019191900191919ULL, 0x003f3f3f003f3f3fULL, 0x00dcdcdc00dcdcdcULL, 0x0079797900797979ULL, 0x001d1d1d001d1d1dULL, 0x0052525200525252ULL, 0x00ebebeb00ebebebULL, 0x00f3f3f300f3f3f3ULL, 0x006d6d6d006d6d6dULL, 0x005e5e5e005e5e5eULL, 0x00fbfbfb00fbfbfbULL, 0x0069696900696969ULL, 0x00b2b2b200b2b2b2ULL, 0x00f0f0f000f0f0f0ULL, 0x0031313100313131ULL, 0x000c0c0c000c0c0cULL, 0x00d4d4d400d4d4d4ULL, 0x00cfcfcf00cfcfcfULL, 0x008c8c8c008c8c8cULL, 0x00e2e2e200e2e2e2ULL, 0x0075757500757575ULL, 0x00a9a9a900a9a9a9ULL, 0x004a4a4a004a4a4aULL, 0x0057575700575757ULL, 0x0084848400848484ULL, 0x0011111100111111ULL, 0x0045454500454545ULL, 0x001b1b1b001b1b1bULL, 0x00f5f5f500f5f5f5ULL, 0x00e4e4e400e4e4e4ULL, 0x000e0e0e000e0e0eULL, 0x0073737300737373ULL, 0x00aaaaaa00aaaaaaULL, 0x00f1f1f100f1f1f1ULL, 0x00dddddd00ddddddULL, 0x0059595900595959ULL, 0x0014141400141414ULL, 0x006c6c6c006c6c6cULL, 0x0092929200929292ULL, 0x0054545400545454ULL, 0x00d0d0d000d0d0d0ULL, 0x0078787800787878ULL, 0x0070707000707070ULL, 0x00e3e3e300e3e3e3ULL, 0x0049494900494949ULL, 0x0080808000808080ULL, 0x0050505000505050ULL, 0x00a7a7a700a7a7a7ULL, 0x00f6f6f600f6f6f6ULL, 0x0077777700777777ULL, 0x0093939300939393ULL, 0x0086868600868686ULL, 0x0083838300838383ULL, 0x002a2a2a002a2a2aULL, 0x00c7c7c700c7c7c7ULL, 0x005b5b5b005b5b5bULL, 0x00e9e9e900e9e9e9ULL, 0x00eeeeee00eeeeeeULL, 0x008f8f8f008f8f8fULL, 0x0001010100010101ULL, 0x003d3d3d003d3d3dULL, }; __visible const u64 camellia_sp30333033[256] = { 0x3800383838003838ULL, 0x4100414141004141ULL, 0x1600161616001616ULL, 0x7600767676007676ULL, 0xd900d9d9d900d9d9ULL, 0x9300939393009393ULL, 0x6000606060006060ULL, 0xf200f2f2f200f2f2ULL, 0x7200727272007272ULL, 0xc200c2c2c200c2c2ULL, 0xab00ababab00ababULL, 0x9a009a9a9a009a9aULL, 0x7500757575007575ULL, 0x0600060606000606ULL, 0x5700575757005757ULL, 0xa000a0a0a000a0a0ULL, 0x9100919191009191ULL, 0xf700f7f7f700f7f7ULL, 0xb500b5b5b500b5b5ULL, 0xc900c9c9c900c9c9ULL, 0xa200a2a2a200a2a2ULL, 0x8c008c8c8c008c8cULL, 0xd200d2d2d200d2d2ULL, 0x9000909090009090ULL, 0xf600f6f6f600f6f6ULL, 0x0700070707000707ULL, 0xa700a7a7a700a7a7ULL, 0x2700272727002727ULL, 0x8e008e8e8e008e8eULL, 0xb200b2b2b200b2b2ULL, 0x4900494949004949ULL, 0xde00dedede00dedeULL, 0x4300434343004343ULL, 0x5c005c5c5c005c5cULL, 0xd700d7d7d700d7d7ULL, 0xc700c7c7c700c7c7ULL, 0x3e003e3e3e003e3eULL, 0xf500f5f5f500f5f5ULL, 0x8f008f8f8f008f8fULL, 0x6700676767006767ULL, 0x1f001f1f1f001f1fULL, 0x1800181818001818ULL, 0x6e006e6e6e006e6eULL, 0xaf00afafaf00afafULL, 0x2f002f2f2f002f2fULL, 0xe200e2e2e200e2e2ULL, 0x8500858585008585ULL, 0x0d000d0d0d000d0dULL, 0x5300535353005353ULL, 0xf000f0f0f000f0f0ULL, 0x9c009c9c9c009c9cULL, 0x6500656565006565ULL, 0xea00eaeaea00eaeaULL, 0xa300a3a3a300a3a3ULL, 0xae00aeaeae00aeaeULL, 0x9e009e9e9e009e9eULL, 0xec00ececec00ececULL, 0x8000808080008080ULL, 0x2d002d2d2d002d2dULL, 0x6b006b6b6b006b6bULL, 0xa800a8a8a800a8a8ULL, 0x2b002b2b2b002b2bULL, 0x3600363636003636ULL, 0xa600a6a6a600a6a6ULL, 0xc500c5c5c500c5c5ULL, 0x8600868686008686ULL, 0x4d004d4d4d004d4dULL, 0x3300333333003333ULL, 0xfd00fdfdfd00fdfdULL, 0x6600666666006666ULL, 0x5800585858005858ULL, 0x9600969696009696ULL, 0x3a003a3a3a003a3aULL, 0x0900090909000909ULL, 0x9500959595009595ULL, 0x1000101010001010ULL, 0x7800787878007878ULL, 0xd800d8d8d800d8d8ULL, 0x4200424242004242ULL, 0xcc00cccccc00ccccULL, 0xef00efefef00efefULL, 0x2600262626002626ULL, 0xe500e5e5e500e5e5ULL, 0x6100616161006161ULL, 0x1a001a1a1a001a1aULL, 0x3f003f3f3f003f3fULL, 0x3b003b3b3b003b3bULL, 0x8200828282008282ULL, 0xb600b6b6b600b6b6ULL, 0xdb00dbdbdb00dbdbULL, 0xd400d4d4d400d4d4ULL, 0x9800989898009898ULL, 0xe800e8e8e800e8e8ULL, 0x8b008b8b8b008b8bULL, 0x0200020202000202ULL, 0xeb00ebebeb00ebebULL, 0x0a000a0a0a000a0aULL, 0x2c002c2c2c002c2cULL, 0x1d001d1d1d001d1dULL, 0xb000b0b0b000b0b0ULL, 0x6f006f6f6f006f6fULL, 0x8d008d8d8d008d8dULL, 0x8800888888008888ULL, 0x0e000e0e0e000e0eULL, 0x1900191919001919ULL, 0x8700878787008787ULL, 0x4e004e4e4e004e4eULL, 0x0b000b0b0b000b0bULL, 0xa900a9a9a900a9a9ULL, 0x0c000c0c0c000c0cULL, 0x7900797979007979ULL, 0x1100111111001111ULL, 0x7f007f7f7f007f7fULL, 0x2200222222002222ULL, 0xe700e7e7e700e7e7ULL, 0x5900595959005959ULL, 0xe100e1e1e100e1e1ULL, 0xda00dadada00dadaULL, 0x3d003d3d3d003d3dULL, 0xc800c8c8c800c8c8ULL, 0x1200121212001212ULL, 0x0400040404000404ULL, 0x7400747474007474ULL, 0x5400545454005454ULL, 0x3000303030003030ULL, 0x7e007e7e7e007e7eULL, 0xb400b4b4b400b4b4ULL, 0x2800282828002828ULL, 0x5500555555005555ULL, 0x6800686868006868ULL, 0x5000505050005050ULL, 0xbe00bebebe00bebeULL, 0xd000d0d0d000d0d0ULL, 0xc400c4c4c400c4c4ULL, 0x3100313131003131ULL, 0xcb00cbcbcb00cbcbULL, 0x2a002a2a2a002a2aULL, 0xad00adadad00adadULL, 0x0f000f0f0f000f0fULL, 0xca00cacaca00cacaULL, 0x7000707070007070ULL, 0xff00ffffff00ffffULL, 0x3200323232003232ULL, 0x6900696969006969ULL, 0x0800080808000808ULL, 0x6200626262006262ULL, 0x0000000000000000ULL, 0x2400242424002424ULL, 0xd100d1d1d100d1d1ULL, 0xfb00fbfbfb00fbfbULL, 0xba00bababa00babaULL, 0xed00ededed00ededULL, 0x4500454545004545ULL, 0x8100818181008181ULL, 0x7300737373007373ULL, 0x6d006d6d6d006d6dULL, 0x8400848484008484ULL, 0x9f009f9f9f009f9fULL, 0xee00eeeeee00eeeeULL, 0x4a004a4a4a004a4aULL, 0xc300c3c3c300c3c3ULL, 0x2e002e2e2e002e2eULL, 0xc100c1c1c100c1c1ULL, 0x0100010101000101ULL, 0xe600e6e6e600e6e6ULL, 0x2500252525002525ULL, 0x4800484848004848ULL, 0x9900999999009999ULL, 0xb900b9b9b900b9b9ULL, 0xb300b3b3b300b3b3ULL, 0x7b007b7b7b007b7bULL, 0xf900f9f9f900f9f9ULL, 0xce00cecece00ceceULL, 0xbf00bfbfbf00bfbfULL, 0xdf00dfdfdf00dfdfULL, 0x7100717171007171ULL, 0x2900292929002929ULL, 0xcd00cdcdcd00cdcdULL, 0x6c006c6c6c006c6cULL, 0x1300131313001313ULL, 0x6400646464006464ULL, 0x9b009b9b9b009b9bULL, 0x6300636363006363ULL, 0x9d009d9d9d009d9dULL, 0xc000c0c0c000c0c0ULL, 0x4b004b4b4b004b4bULL, 0xb700b7b7b700b7b7ULL, 0xa500a5a5a500a5a5ULL, 0x8900898989008989ULL, 0x5f005f5f5f005f5fULL, 0xb100b1b1b100b1b1ULL, 0x1700171717001717ULL, 0xf400f4f4f400f4f4ULL, 0xbc00bcbcbc00bcbcULL, 0xd300d3d3d300d3d3ULL, 0x4600464646004646ULL, 0xcf00cfcfcf00cfcfULL, 0x3700373737003737ULL, 0x5e005e5e5e005e5eULL, 0x4700474747004747ULL, 0x9400949494009494ULL, 0xfa00fafafa00fafaULL, 0xfc00fcfcfc00fcfcULL, 0x5b005b5b5b005b5bULL, 0x9700979797009797ULL, 0xfe00fefefe00fefeULL, 0x5a005a5a5a005a5aULL, 0xac00acacac00acacULL, 0x3c003c3c3c003c3cULL, 0x4c004c4c4c004c4cULL, 0x0300030303000303ULL, 0x3500353535003535ULL, 0xf300f3f3f300f3f3ULL, 0x2300232323002323ULL, 0xb800b8b8b800b8b8ULL, 0x5d005d5d5d005d5dULL, 0x6a006a6a6a006a6aULL, 0x9200929292009292ULL, 0xd500d5d5d500d5d5ULL, 0x2100212121002121ULL, 0x4400444444004444ULL, 0x5100515151005151ULL, 0xc600c6c6c600c6c6ULL, 0x7d007d7d7d007d7dULL, 0x3900393939003939ULL, 0x8300838383008383ULL, 0xdc00dcdcdc00dcdcULL, 0xaa00aaaaaa00aaaaULL, 0x7c007c7c7c007c7cULL, 0x7700777777007777ULL, 0x5600565656005656ULL, 0x0500050505000505ULL, 0x1b001b1b1b001b1bULL, 0xa400a4a4a400a4a4ULL, 0x1500151515001515ULL, 0x3400343434003434ULL, 0x1e001e1e1e001e1eULL, 0x1c001c1c1c001c1cULL, 0xf800f8f8f800f8f8ULL, 0x5200525252005252ULL, 0x2000202020002020ULL, 0x1400141414001414ULL, 0xe900e9e9e900e9e9ULL, 0xbd00bdbdbd00bdbdULL, 0xdd00dddddd00ddddULL, 0xe400e4e4e400e4e4ULL, 0xa100a1a1a100a1a1ULL, 0xe000e0e0e000e0e0ULL, 0x8a008a8a8a008a8aULL, 0xf100f1f1f100f1f1ULL, 0xd600d6d6d600d6d6ULL, 0x7a007a7a7a007a7aULL, 0xbb00bbbbbb00bbbbULL, 0xe300e3e3e300e3e3ULL, 0x4000404040004040ULL, 0x4f004f4f4f004f4fULL, }; __visible const u64 camellia_sp44044404[256] = { 0x7070007070700070ULL, 0x2c2c002c2c2c002cULL, 0xb3b300b3b3b300b3ULL, 0xc0c000c0c0c000c0ULL, 0xe4e400e4e4e400e4ULL, 0x5757005757570057ULL, 0xeaea00eaeaea00eaULL, 0xaeae00aeaeae00aeULL, 0x2323002323230023ULL, 0x6b6b006b6b6b006bULL, 0x4545004545450045ULL, 0xa5a500a5a5a500a5ULL, 0xeded00ededed00edULL, 0x4f4f004f4f4f004fULL, 0x1d1d001d1d1d001dULL, 0x9292009292920092ULL, 0x8686008686860086ULL, 0xafaf00afafaf00afULL, 0x7c7c007c7c7c007cULL, 0x1f1f001f1f1f001fULL, 0x3e3e003e3e3e003eULL, 0xdcdc00dcdcdc00dcULL, 0x5e5e005e5e5e005eULL, 0x0b0b000b0b0b000bULL, 0xa6a600a6a6a600a6ULL, 0x3939003939390039ULL, 0xd5d500d5d5d500d5ULL, 0x5d5d005d5d5d005dULL, 0xd9d900d9d9d900d9ULL, 0x5a5a005a5a5a005aULL, 0x5151005151510051ULL, 0x6c6c006c6c6c006cULL, 0x8b8b008b8b8b008bULL, 0x9a9a009a9a9a009aULL, 0xfbfb00fbfbfb00fbULL, 0xb0b000b0b0b000b0ULL, 0x7474007474740074ULL, 0x2b2b002b2b2b002bULL, 0xf0f000f0f0f000f0ULL, 0x8484008484840084ULL, 0xdfdf00dfdfdf00dfULL, 0xcbcb00cbcbcb00cbULL, 0x3434003434340034ULL, 0x7676007676760076ULL, 0x6d6d006d6d6d006dULL, 0xa9a900a9a9a900a9ULL, 0xd1d100d1d1d100d1ULL, 0x0404000404040004ULL, 0x1414001414140014ULL, 0x3a3a003a3a3a003aULL, 0xdede00dedede00deULL, 0x1111001111110011ULL, 0x3232003232320032ULL, 0x9c9c009c9c9c009cULL, 0x5353005353530053ULL, 0xf2f200f2f2f200f2ULL, 0xfefe00fefefe00feULL, 0xcfcf00cfcfcf00cfULL, 0xc3c300c3c3c300c3ULL, 0x7a7a007a7a7a007aULL, 0x2424002424240024ULL, 0xe8e800e8e8e800e8ULL, 0x6060006060600060ULL, 0x6969006969690069ULL, 0xaaaa00aaaaaa00aaULL, 0xa0a000a0a0a000a0ULL, 0xa1a100a1a1a100a1ULL, 0x6262006262620062ULL, 0x5454005454540054ULL, 0x1e1e001e1e1e001eULL, 0xe0e000e0e0e000e0ULL, 0x6464006464640064ULL, 0x1010001010100010ULL, 0x0000000000000000ULL, 0xa3a300a3a3a300a3ULL, 0x7575007575750075ULL, 0x8a8a008a8a8a008aULL, 0xe6e600e6e6e600e6ULL, 0x0909000909090009ULL, 0xdddd00dddddd00ddULL, 0x8787008787870087ULL, 0x8383008383830083ULL, 0xcdcd00cdcdcd00cdULL, 0x9090009090900090ULL, 0x7373007373730073ULL, 0xf6f600f6f6f600f6ULL, 0x9d9d009d9d9d009dULL, 0xbfbf00bfbfbf00bfULL, 0x5252005252520052ULL, 0xd8d800d8d8d800d8ULL, 0xc8c800c8c8c800c8ULL, 0xc6c600c6c6c600c6ULL, 0x8181008181810081ULL, 0x6f6f006f6f6f006fULL, 0x1313001313130013ULL, 0x6363006363630063ULL, 0xe9e900e9e9e900e9ULL, 0xa7a700a7a7a700a7ULL, 0x9f9f009f9f9f009fULL, 0xbcbc00bcbcbc00bcULL, 0x2929002929290029ULL, 0xf9f900f9f9f900f9ULL, 0x2f2f002f2f2f002fULL, 0xb4b400b4b4b400b4ULL, 0x7878007878780078ULL, 0x0606000606060006ULL, 0xe7e700e7e7e700e7ULL, 0x7171007171710071ULL, 0xd4d400d4d4d400d4ULL, 0xabab00ababab00abULL, 0x8888008888880088ULL, 0x8d8d008d8d8d008dULL, 0x7272007272720072ULL, 0xb9b900b9b9b900b9ULL, 0xf8f800f8f8f800f8ULL, 0xacac00acacac00acULL, 0x3636003636360036ULL, 0x2a2a002a2a2a002aULL, 0x3c3c003c3c3c003cULL, 0xf1f100f1f1f100f1ULL, 0x4040004040400040ULL, 0xd3d300d3d3d300d3ULL, 0xbbbb00bbbbbb00bbULL, 0x4343004343430043ULL, 0x1515001515150015ULL, 0xadad00adadad00adULL, 0x7777007777770077ULL, 0x8080008080800080ULL, 0x8282008282820082ULL, 0xecec00ececec00ecULL, 0x2727002727270027ULL, 0xe5e500e5e5e500e5ULL, 0x8585008585850085ULL, 0x3535003535350035ULL, 0x0c0c000c0c0c000cULL, 0x4141004141410041ULL, 0xefef00efefef00efULL, 0x9393009393930093ULL, 0x1919001919190019ULL, 0x2121002121210021ULL, 0x0e0e000e0e0e000eULL, 0x4e4e004e4e4e004eULL, 0x6565006565650065ULL, 0xbdbd00bdbdbd00bdULL, 0xb8b800b8b8b800b8ULL, 0x8f8f008f8f8f008fULL, 0xebeb00ebebeb00ebULL, 0xcece00cecece00ceULL, 0x3030003030300030ULL, 0x5f5f005f5f5f005fULL, 0xc5c500c5c5c500c5ULL, 0x1a1a001a1a1a001aULL, 0xe1e100e1e1e100e1ULL, 0xcaca00cacaca00caULL, 0x4747004747470047ULL, 0x3d3d003d3d3d003dULL, 0x0101000101010001ULL, 0xd6d600d6d6d600d6ULL, 0x5656005656560056ULL, 0x4d4d004d4d4d004dULL, 0x0d0d000d0d0d000dULL, 0x6666006666660066ULL, 0xcccc00cccccc00ccULL, 0x2d2d002d2d2d002dULL, 0x1212001212120012ULL, 0x2020002020200020ULL, 0xb1b100b1b1b100b1ULL, 0x9999009999990099ULL, 0x4c4c004c4c4c004cULL, 0xc2c200c2c2c200c2ULL, 0x7e7e007e7e7e007eULL, 0x0505000505050005ULL, 0xb7b700b7b7b700b7ULL, 0x3131003131310031ULL, 0x1717001717170017ULL, 0xd7d700d7d7d700d7ULL, 0x5858005858580058ULL, 0x6161006161610061ULL, 0x1b1b001b1b1b001bULL, 0x1c1c001c1c1c001cULL, 0x0f0f000f0f0f000fULL, 0x1616001616160016ULL, 0x1818001818180018ULL, 0x2222002222220022ULL, 0x4444004444440044ULL, 0xb2b200b2b2b200b2ULL, 0xb5b500b5b5b500b5ULL, 0x9191009191910091ULL, 0x0808000808080008ULL, 0xa8a800a8a8a800a8ULL, 0xfcfc00fcfcfc00fcULL, 0x5050005050500050ULL, 0xd0d000d0d0d000d0ULL, 0x7d7d007d7d7d007dULL, 0x8989008989890089ULL, 0x9797009797970097ULL, 0x5b5b005b5b5b005bULL, 0x9595009595950095ULL, 0xffff00ffffff00ffULL, 0xd2d200d2d2d200d2ULL, 0xc4c400c4c4c400c4ULL, 0x4848004848480048ULL, 0xf7f700f7f7f700f7ULL, 0xdbdb00dbdbdb00dbULL, 0x0303000303030003ULL, 0xdada00dadada00daULL, 0x3f3f003f3f3f003fULL, 0x9494009494940094ULL, 0x5c5c005c5c5c005cULL, 0x0202000202020002ULL, 0x4a4a004a4a4a004aULL, 0x3333003333330033ULL, 0x6767006767670067ULL, 0xf3f300f3f3f300f3ULL, 0x7f7f007f7f7f007fULL, 0xe2e200e2e2e200e2ULL, 0x9b9b009b9b9b009bULL, 0x2626002626260026ULL, 0x3737003737370037ULL, 0x3b3b003b3b3b003bULL, 0x9696009696960096ULL, 0x4b4b004b4b4b004bULL, 0xbebe00bebebe00beULL, 0x2e2e002e2e2e002eULL, 0x7979007979790079ULL, 0x8c8c008c8c8c008cULL, 0x6e6e006e6e6e006eULL, 0x8e8e008e8e8e008eULL, 0xf5f500f5f5f500f5ULL, 0xb6b600b6b6b600b6ULL, 0xfdfd00fdfdfd00fdULL, 0x5959005959590059ULL, 0x9898009898980098ULL, 0x6a6a006a6a6a006aULL, 0x4646004646460046ULL, 0xbaba00bababa00baULL, 0x2525002525250025ULL, 0x4242004242420042ULL, 0xa2a200a2a2a200a2ULL, 0xfafa00fafafa00faULL, 0x0707000707070007ULL, 0x5555005555550055ULL, 0xeeee00eeeeee00eeULL, 0x0a0a000a0a0a000aULL, 0x4949004949490049ULL, 0x6868006868680068ULL, 0x3838003838380038ULL, 0xa4a400a4a4a400a4ULL, 0x2828002828280028ULL, 0x7b7b007b7b7b007bULL, 0xc9c900c9c9c900c9ULL, 0xc1c100c1c1c100c1ULL, 0xe3e300e3e3e300e3ULL, 0xf4f400f4f4f400f4ULL, 0xc7c700c7c7c700c7ULL, 0x9e9e009e9e9e009eULL, }; __visible const u64 camellia_sp11101110[256] = { 0x7070700070707000ULL, 0x8282820082828200ULL, 0x2c2c2c002c2c2c00ULL, 0xececec00ececec00ULL, 0xb3b3b300b3b3b300ULL, 0x2727270027272700ULL, 0xc0c0c000c0c0c000ULL, 0xe5e5e500e5e5e500ULL, 0xe4e4e400e4e4e400ULL, 0x8585850085858500ULL, 0x5757570057575700ULL, 0x3535350035353500ULL, 0xeaeaea00eaeaea00ULL, 0x0c0c0c000c0c0c00ULL, 0xaeaeae00aeaeae00ULL, 0x4141410041414100ULL, 0x2323230023232300ULL, 0xefefef00efefef00ULL, 0x6b6b6b006b6b6b00ULL, 0x9393930093939300ULL, 0x4545450045454500ULL, 0x1919190019191900ULL, 0xa5a5a500a5a5a500ULL, 0x2121210021212100ULL, 0xededed00ededed00ULL, 0x0e0e0e000e0e0e00ULL, 0x4f4f4f004f4f4f00ULL, 0x4e4e4e004e4e4e00ULL, 0x1d1d1d001d1d1d00ULL, 0x6565650065656500ULL, 0x9292920092929200ULL, 0xbdbdbd00bdbdbd00ULL, 0x8686860086868600ULL, 0xb8b8b800b8b8b800ULL, 0xafafaf00afafaf00ULL, 0x8f8f8f008f8f8f00ULL, 0x7c7c7c007c7c7c00ULL, 0xebebeb00ebebeb00ULL, 0x1f1f1f001f1f1f00ULL, 0xcecece00cecece00ULL, 0x3e3e3e003e3e3e00ULL, 0x3030300030303000ULL, 0xdcdcdc00dcdcdc00ULL, 0x5f5f5f005f5f5f00ULL, 0x5e5e5e005e5e5e00ULL, 0xc5c5c500c5c5c500ULL, 0x0b0b0b000b0b0b00ULL, 0x1a1a1a001a1a1a00ULL, 0xa6a6a600a6a6a600ULL, 0xe1e1e100e1e1e100ULL, 0x3939390039393900ULL, 0xcacaca00cacaca00ULL, 0xd5d5d500d5d5d500ULL, 0x4747470047474700ULL, 0x5d5d5d005d5d5d00ULL, 0x3d3d3d003d3d3d00ULL, 0xd9d9d900d9d9d900ULL, 0x0101010001010100ULL, 0x5a5a5a005a5a5a00ULL, 0xd6d6d600d6d6d600ULL, 0x5151510051515100ULL, 0x5656560056565600ULL, 0x6c6c6c006c6c6c00ULL, 0x4d4d4d004d4d4d00ULL, 0x8b8b8b008b8b8b00ULL, 0x0d0d0d000d0d0d00ULL, 0x9a9a9a009a9a9a00ULL, 0x6666660066666600ULL, 0xfbfbfb00fbfbfb00ULL, 0xcccccc00cccccc00ULL, 0xb0b0b000b0b0b000ULL, 0x2d2d2d002d2d2d00ULL, 0x7474740074747400ULL, 0x1212120012121200ULL, 0x2b2b2b002b2b2b00ULL, 0x2020200020202000ULL, 0xf0f0f000f0f0f000ULL, 0xb1b1b100b1b1b100ULL, 0x8484840084848400ULL, 0x9999990099999900ULL, 0xdfdfdf00dfdfdf00ULL, 0x4c4c4c004c4c4c00ULL, 0xcbcbcb00cbcbcb00ULL, 0xc2c2c200c2c2c200ULL, 0x3434340034343400ULL, 0x7e7e7e007e7e7e00ULL, 0x7676760076767600ULL, 0x0505050005050500ULL, 0x6d6d6d006d6d6d00ULL, 0xb7b7b700b7b7b700ULL, 0xa9a9a900a9a9a900ULL, 0x3131310031313100ULL, 0xd1d1d100d1d1d100ULL, 0x1717170017171700ULL, 0x0404040004040400ULL, 0xd7d7d700d7d7d700ULL, 0x1414140014141400ULL, 0x5858580058585800ULL, 0x3a3a3a003a3a3a00ULL, 0x6161610061616100ULL, 0xdedede00dedede00ULL, 0x1b1b1b001b1b1b00ULL, 0x1111110011111100ULL, 0x1c1c1c001c1c1c00ULL, 0x3232320032323200ULL, 0x0f0f0f000f0f0f00ULL, 0x9c9c9c009c9c9c00ULL, 0x1616160016161600ULL, 0x5353530053535300ULL, 0x1818180018181800ULL, 0xf2f2f200f2f2f200ULL, 0x2222220022222200ULL, 0xfefefe00fefefe00ULL, 0x4444440044444400ULL, 0xcfcfcf00cfcfcf00ULL, 0xb2b2b200b2b2b200ULL, 0xc3c3c300c3c3c300ULL, 0xb5b5b500b5b5b500ULL, 0x7a7a7a007a7a7a00ULL, 0x9191910091919100ULL, 0x2424240024242400ULL, 0x0808080008080800ULL, 0xe8e8e800e8e8e800ULL, 0xa8a8a800a8a8a800ULL, 0x6060600060606000ULL, 0xfcfcfc00fcfcfc00ULL, 0x6969690069696900ULL, 0x5050500050505000ULL, 0xaaaaaa00aaaaaa00ULL, 0xd0d0d000d0d0d000ULL, 0xa0a0a000a0a0a000ULL, 0x7d7d7d007d7d7d00ULL, 0xa1a1a100a1a1a100ULL, 0x8989890089898900ULL, 0x6262620062626200ULL, 0x9797970097979700ULL, 0x5454540054545400ULL, 0x5b5b5b005b5b5b00ULL, 0x1e1e1e001e1e1e00ULL, 0x9595950095959500ULL, 0xe0e0e000e0e0e000ULL, 0xffffff00ffffff00ULL, 0x6464640064646400ULL, 0xd2d2d200d2d2d200ULL, 0x1010100010101000ULL, 0xc4c4c400c4c4c400ULL, 0x0000000000000000ULL, 0x4848480048484800ULL, 0xa3a3a300a3a3a300ULL, 0xf7f7f700f7f7f700ULL, 0x7575750075757500ULL, 0xdbdbdb00dbdbdb00ULL, 0x8a8a8a008a8a8a00ULL, 0x0303030003030300ULL, 0xe6e6e600e6e6e600ULL, 0xdadada00dadada00ULL, 0x0909090009090900ULL, 0x3f3f3f003f3f3f00ULL, 0xdddddd00dddddd00ULL, 0x9494940094949400ULL, 0x8787870087878700ULL, 0x5c5c5c005c5c5c00ULL, 0x8383830083838300ULL, 0x0202020002020200ULL, 0xcdcdcd00cdcdcd00ULL, 0x4a4a4a004a4a4a00ULL, 0x9090900090909000ULL, 0x3333330033333300ULL, 0x7373730073737300ULL, 0x6767670067676700ULL, 0xf6f6f600f6f6f600ULL, 0xf3f3f300f3f3f300ULL, 0x9d9d9d009d9d9d00ULL, 0x7f7f7f007f7f7f00ULL, 0xbfbfbf00bfbfbf00ULL, 0xe2e2e200e2e2e200ULL, 0x5252520052525200ULL, 0x9b9b9b009b9b9b00ULL, 0xd8d8d800d8d8d800ULL, 0x2626260026262600ULL, 0xc8c8c800c8c8c800ULL, 0x3737370037373700ULL, 0xc6c6c600c6c6c600ULL, 0x3b3b3b003b3b3b00ULL, 0x8181810081818100ULL, 0x9696960096969600ULL, 0x6f6f6f006f6f6f00ULL, 0x4b4b4b004b4b4b00ULL, 0x1313130013131300ULL, 0xbebebe00bebebe00ULL, 0x6363630063636300ULL, 0x2e2e2e002e2e2e00ULL, 0xe9e9e900e9e9e900ULL, 0x7979790079797900ULL, 0xa7a7a700a7a7a700ULL, 0x8c8c8c008c8c8c00ULL, 0x9f9f9f009f9f9f00ULL, 0x6e6e6e006e6e6e00ULL, 0xbcbcbc00bcbcbc00ULL, 0x8e8e8e008e8e8e00ULL, 0x2929290029292900ULL, 0xf5f5f500f5f5f500ULL, 0xf9f9f900f9f9f900ULL, 0xb6b6b600b6b6b600ULL, 0x2f2f2f002f2f2f00ULL, 0xfdfdfd00fdfdfd00ULL, 0xb4b4b400b4b4b400ULL, 0x5959590059595900ULL, 0x7878780078787800ULL, 0x9898980098989800ULL, 0x0606060006060600ULL, 0x6a6a6a006a6a6a00ULL, 0xe7e7e700e7e7e700ULL, 0x4646460046464600ULL, 0x7171710071717100ULL, 0xbababa00bababa00ULL, 0xd4d4d400d4d4d400ULL, 0x2525250025252500ULL, 0xababab00ababab00ULL, 0x4242420042424200ULL, 0x8888880088888800ULL, 0xa2a2a200a2a2a200ULL, 0x8d8d8d008d8d8d00ULL, 0xfafafa00fafafa00ULL, 0x7272720072727200ULL, 0x0707070007070700ULL, 0xb9b9b900b9b9b900ULL, 0x5555550055555500ULL, 0xf8f8f800f8f8f800ULL, 0xeeeeee00eeeeee00ULL, 0xacacac00acacac00ULL, 0x0a0a0a000a0a0a00ULL, 0x3636360036363600ULL, 0x4949490049494900ULL, 0x2a2a2a002a2a2a00ULL, 0x6868680068686800ULL, 0x3c3c3c003c3c3c00ULL, 0x3838380038383800ULL, 0xf1f1f100f1f1f100ULL, 0xa4a4a400a4a4a400ULL, 0x4040400040404000ULL, 0x2828280028282800ULL, 0xd3d3d300d3d3d300ULL, 0x7b7b7b007b7b7b00ULL, 0xbbbbbb00bbbbbb00ULL, 0xc9c9c900c9c9c900ULL, 0x4343430043434300ULL, 0xc1c1c100c1c1c100ULL, 0x1515150015151500ULL, 0xe3e3e300e3e3e300ULL, 0xadadad00adadad00ULL, 0xf4f4f400f4f4f400ULL, 0x7777770077777700ULL, 0xc7c7c700c7c7c700ULL, 0x8080800080808000ULL, 0x9e9e9e009e9e9e00ULL, }; /* key constants */ #define CAMELLIA_SIGMA1L (0xA09E667FL) #define CAMELLIA_SIGMA1R (0x3BCC908BL) #define CAMELLIA_SIGMA2L (0xB67AE858L) #define CAMELLIA_SIGMA2R (0x4CAA73B2L) #define CAMELLIA_SIGMA3L (0xC6EF372FL) #define CAMELLIA_SIGMA3R (0xE94F82BEL) #define CAMELLIA_SIGMA4L (0x54FF53A5L) #define CAMELLIA_SIGMA4R (0xF1D36F1CL) #define CAMELLIA_SIGMA5L (0x10E527FAL) #define CAMELLIA_SIGMA5R (0xDE682D1DL) #define CAMELLIA_SIGMA6L (0xB05688C2L) #define CAMELLIA_SIGMA6R (0xB3E6C1FDL) /* macros */ #define ROLDQ(l, r, bits) ({ \ u64 t = l; \ l = (l << bits) | (r >> (64 - bits)); \ r = (r << bits) | (t >> (64 - bits)); \ }) #define CAMELLIA_F(x, kl, kr, y) ({ \ u64 ii = x ^ (((u64)kl << 32) | kr); \ y = camellia_sp11101110[(uint8_t)ii]; \ y ^= camellia_sp44044404[(uint8_t)(ii >> 8)]; \ ii >>= 16; \ y ^= camellia_sp30333033[(uint8_t)ii]; \ y ^= camellia_sp02220222[(uint8_t)(ii >> 8)]; \ ii >>= 16; \ y ^= camellia_sp00444404[(uint8_t)ii]; \ y ^= camellia_sp03303033[(uint8_t)(ii >> 8)]; \ ii >>= 16; \ y ^= camellia_sp22000222[(uint8_t)ii]; \ y ^= camellia_sp10011110[(uint8_t)(ii >> 8)]; \ y = ror64(y, 32); \ }) #define SET_SUBKEY_LR(INDEX, sRL) (subkey[(INDEX)] = ror64((sRL), 32)) static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) { u64 kw4, tt; u32 dw, tl, tr; /* absorb kw2 to other subkeys */ /* round 2 */ subRL[3] ^= subRL[1]; /* round 4 */ subRL[5] ^= subRL[1]; /* round 6 */ subRL[7] ^= subRL[1]; subRL[1] ^= (subRL[1] & ~subRL[9]) << 32; /* modified for FLinv(kl2) */ dw = (subRL[1] & subRL[9]) >> 32; subRL[1] ^= rol32(dw, 1); /* round 8 */ subRL[11] ^= subRL[1]; /* round 10 */ subRL[13] ^= subRL[1]; /* round 12 */ subRL[15] ^= subRL[1]; subRL[1] ^= (subRL[1] & ~subRL[17]) << 32; /* modified for FLinv(kl4) */ dw = (subRL[1] & subRL[17]) >> 32; subRL[1] ^= rol32(dw, 1); /* round 14 */ subRL[19] ^= subRL[1]; /* round 16 */ subRL[21] ^= subRL[1]; /* round 18 */ subRL[23] ^= subRL[1]; if (max == 24) { /* kw3 */ subRL[24] ^= subRL[1]; /* absorb kw4 to other subkeys */ kw4 = subRL[25]; } else { subRL[1] ^= (subRL[1] & ~subRL[25]) << 32; /* modified for FLinv(kl6) */ dw = (subRL[1] & subRL[25]) >> 32; subRL[1] ^= rol32(dw, 1); /* round 20 */ subRL[27] ^= subRL[1]; /* round 22 */ subRL[29] ^= subRL[1]; /* round 24 */ subRL[31] ^= subRL[1]; /* kw3 */ subRL[32] ^= subRL[1]; /* absorb kw4 to other subkeys */ kw4 = subRL[33]; /* round 23 */ subRL[30] ^= kw4; /* round 21 */ subRL[28] ^= kw4; /* round 19 */ subRL[26] ^= kw4; kw4 ^= (kw4 & ~subRL[24]) << 32; /* modified for FL(kl5) */ dw = (kw4 & subRL[24]) >> 32; kw4 ^= rol32(dw, 1); } /* round 17 */ subRL[22] ^= kw4; /* round 15 */ subRL[20] ^= kw4; /* round 13 */ subRL[18] ^= kw4; kw4 ^= (kw4 & ~subRL[16]) << 32; /* modified for FL(kl3) */ dw = (kw4 & subRL[16]) >> 32; kw4 ^= rol32(dw, 1); /* round 11 */ subRL[14] ^= kw4; /* round 9 */ subRL[12] ^= kw4; /* round 7 */ subRL[10] ^= kw4; kw4 ^= (kw4 & ~subRL[8]) << 32; /* modified for FL(kl1) */ dw = (kw4 & subRL[8]) >> 32; kw4 ^= rol32(dw, 1); /* round 5 */ subRL[6] ^= kw4; /* round 3 */ subRL[4] ^= kw4; /* round 1 */ subRL[2] ^= kw4; /* kw1 */ subRL[0] ^= kw4; /* key XOR is end of F-function */ SET_SUBKEY_LR(0, subRL[0] ^ subRL[2]); /* kw1 */ SET_SUBKEY_LR(2, subRL[3]); /* round 1 */ SET_SUBKEY_LR(3, subRL[2] ^ subRL[4]); /* round 2 */ SET_SUBKEY_LR(4, subRL[3] ^ subRL[5]); /* round 3 */ SET_SUBKEY_LR(5, subRL[4] ^ subRL[6]); /* round 4 */ SET_SUBKEY_LR(6, subRL[5] ^ subRL[7]); /* round 5 */ tl = (subRL[10] >> 32) ^ (subRL[10] & ~subRL[8]); dw = tl & (subRL[8] >> 32); /* FL(kl1) */ tr = subRL[10] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(7, subRL[6] ^ tt); /* round 6 */ SET_SUBKEY_LR(8, subRL[8]); /* FL(kl1) */ SET_SUBKEY_LR(9, subRL[9]); /* FLinv(kl2) */ tl = (subRL[7] >> 32) ^ (subRL[7] & ~subRL[9]); dw = tl & (subRL[9] >> 32); /* FLinv(kl2) */ tr = subRL[7] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(10, subRL[11] ^ tt); /* round 7 */ SET_SUBKEY_LR(11, subRL[10] ^ subRL[12]); /* round 8 */ SET_SUBKEY_LR(12, subRL[11] ^ subRL[13]); /* round 9 */ SET_SUBKEY_LR(13, subRL[12] ^ subRL[14]); /* round 10 */ SET_SUBKEY_LR(14, subRL[13] ^ subRL[15]); /* round 11 */ tl = (subRL[18] >> 32) ^ (subRL[18] & ~subRL[16]); dw = tl & (subRL[16] >> 32); /* FL(kl3) */ tr = subRL[18] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(15, subRL[14] ^ tt); /* round 12 */ SET_SUBKEY_LR(16, subRL[16]); /* FL(kl3) */ SET_SUBKEY_LR(17, subRL[17]); /* FLinv(kl4) */ tl = (subRL[15] >> 32) ^ (subRL[15] & ~subRL[17]); dw = tl & (subRL[17] >> 32); /* FLinv(kl4) */ tr = subRL[15] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(18, subRL[19] ^ tt); /* round 13 */ SET_SUBKEY_LR(19, subRL[18] ^ subRL[20]); /* round 14 */ SET_SUBKEY_LR(20, subRL[19] ^ subRL[21]); /* round 15 */ SET_SUBKEY_LR(21, subRL[20] ^ subRL[22]); /* round 16 */ SET_SUBKEY_LR(22, subRL[21] ^ subRL[23]); /* round 17 */ if (max == 24) { SET_SUBKEY_LR(23, subRL[22]); /* round 18 */ SET_SUBKEY_LR(24, subRL[24] ^ subRL[23]); /* kw3 */ } else { tl = (subRL[26] >> 32) ^ (subRL[26] & ~subRL[24]); dw = tl & (subRL[24] >> 32); /* FL(kl5) */ tr = subRL[26] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(23, subRL[22] ^ tt); /* round 18 */ SET_SUBKEY_LR(24, subRL[24]); /* FL(kl5) */ SET_SUBKEY_LR(25, subRL[25]); /* FLinv(kl6) */ tl = (subRL[23] >> 32) ^ (subRL[23] & ~subRL[25]); dw = tl & (subRL[25] >> 32); /* FLinv(kl6) */ tr = subRL[23] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(26, subRL[27] ^ tt); /* round 19 */ SET_SUBKEY_LR(27, subRL[26] ^ subRL[28]); /* round 20 */ SET_SUBKEY_LR(28, subRL[27] ^ subRL[29]); /* round 21 */ SET_SUBKEY_LR(29, subRL[28] ^ subRL[30]); /* round 22 */ SET_SUBKEY_LR(30, subRL[29] ^ subRL[31]); /* round 23 */ SET_SUBKEY_LR(31, subRL[30]); /* round 24 */ SET_SUBKEY_LR(32, subRL[32] ^ subRL[31]); /* kw3 */ } } static void camellia_setup128(const unsigned char *key, u64 *subkey) { u64 kl, kr, ww; u64 subRL[26]; /** * k == kl || kr (|| is concatenation) */ kl = get_unaligned_be64(key); kr = get_unaligned_be64(key + 8); /* generate KL dependent subkeys */ /* kw1 */ subRL[0] = kl; /* kw2 */ subRL[1] = kr; /* rotation left shift 15bit */ ROLDQ(kl, kr, 15); /* k3 */ subRL[4] = kl; /* k4 */ subRL[5] = kr; /* rotation left shift 15+30bit */ ROLDQ(kl, kr, 30); /* k7 */ subRL[10] = kl; /* k8 */ subRL[11] = kr; /* rotation left shift 15+30+15bit */ ROLDQ(kl, kr, 15); /* k10 */ subRL[13] = kr; /* rotation left shift 15+30+15+17 bit */ ROLDQ(kl, kr, 17); /* kl3 */ subRL[16] = kl; /* kl4 */ subRL[17] = kr; /* rotation left shift 15+30+15+17+17 bit */ ROLDQ(kl, kr, 17); /* k13 */ subRL[18] = kl; /* k14 */ subRL[19] = kr; /* rotation left shift 15+30+15+17+17+17 bit */ ROLDQ(kl, kr, 17); /* k17 */ subRL[22] = kl; /* k18 */ subRL[23] = kr; /* generate KA */ kl = subRL[0]; kr = subRL[1]; CAMELLIA_F(kl, CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R, ww); kr ^= ww; CAMELLIA_F(kr, CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R, kl); /* current status == (kll, klr, w0, w1) */ CAMELLIA_F(kl, CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R, kr); kr ^= ww; CAMELLIA_F(kr, CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R, ww); kl ^= ww; /* generate KA dependent subkeys */ /* k1, k2 */ subRL[2] = kl; subRL[3] = kr; ROLDQ(kl, kr, 15); /* k5,k6 */ subRL[6] = kl; subRL[7] = kr; ROLDQ(kl, kr, 15); /* kl1, kl2 */ subRL[8] = kl; subRL[9] = kr; ROLDQ(kl, kr, 15); /* k9 */ subRL[12] = kl; ROLDQ(kl, kr, 15); /* k11, k12 */ subRL[14] = kl; subRL[15] = kr; ROLDQ(kl, kr, 34); /* k15, k16 */ subRL[20] = kl; subRL[21] = kr; ROLDQ(kl, kr, 17); /* kw3, kw4 */ subRL[24] = kl; subRL[25] = kr; camellia_setup_tail(subkey, subRL, 24); } static void camellia_setup256(const unsigned char *key, u64 *subkey) { u64 kl, kr; /* left half of key */ u64 krl, krr; /* right half of key */ u64 ww; /* temporary variables */ u64 subRL[34]; /** * key = (kl || kr || krl || krr) (|| is concatenation) */ kl = get_unaligned_be64(key); kr = get_unaligned_be64(key + 8); krl = get_unaligned_be64(key + 16); krr = get_unaligned_be64(key + 24); /* generate KL dependent subkeys */ /* kw1 */ subRL[0] = kl; /* kw2 */ subRL[1] = kr; ROLDQ(kl, kr, 45); /* k9 */ subRL[12] = kl; /* k10 */ subRL[13] = kr; ROLDQ(kl, kr, 15); /* kl3 */ subRL[16] = kl; /* kl4 */ subRL[17] = kr; ROLDQ(kl, kr, 17); /* k17 */ subRL[22] = kl; /* k18 */ subRL[23] = kr; ROLDQ(kl, kr, 34); /* k23 */ subRL[30] = kl; /* k24 */ subRL[31] = kr; /* generate KR dependent subkeys */ ROLDQ(krl, krr, 15); /* k3 */ subRL[4] = krl; /* k4 */ subRL[5] = krr; ROLDQ(krl, krr, 15); /* kl1 */ subRL[8] = krl; /* kl2 */ subRL[9] = krr; ROLDQ(krl, krr, 30); /* k13 */ subRL[18] = krl; /* k14 */ subRL[19] = krr; ROLDQ(krl, krr, 34); /* k19 */ subRL[26] = krl; /* k20 */ subRL[27] = krr; ROLDQ(krl, krr, 34); /* generate KA */ kl = subRL[0] ^ krl; kr = subRL[1] ^ krr; CAMELLIA_F(kl, CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R, ww); kr ^= ww; CAMELLIA_F(kr, CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R, kl); kl ^= krl; CAMELLIA_F(kl, CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R, kr); kr ^= ww ^ krr; CAMELLIA_F(kr, CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R, ww); kl ^= ww; /* generate KB */ krl ^= kl; krr ^= kr; CAMELLIA_F(krl, CAMELLIA_SIGMA5L, CAMELLIA_SIGMA5R, ww); krr ^= ww; CAMELLIA_F(krr, CAMELLIA_SIGMA6L, CAMELLIA_SIGMA6R, ww); krl ^= ww; /* generate KA dependent subkeys */ ROLDQ(kl, kr, 15); /* k5 */ subRL[6] = kl; /* k6 */ subRL[7] = kr; ROLDQ(kl, kr, 30); /* k11 */ subRL[14] = kl; /* k12 */ subRL[15] = kr; /* rotation left shift 32bit */ ROLDQ(kl, kr, 32); /* kl5 */ subRL[24] = kl; /* kl6 */ subRL[25] = kr; /* rotation left shift 17 from k11,k12 -> k21,k22 */ ROLDQ(kl, kr, 17); /* k21 */ subRL[28] = kl; /* k22 */ subRL[29] = kr; /* generate KB dependent subkeys */ /* k1 */ subRL[2] = krl; /* k2 */ subRL[3] = krr; ROLDQ(krl, krr, 30); /* k7 */ subRL[10] = krl; /* k8 */ subRL[11] = krr; ROLDQ(krl, krr, 30); /* k15 */ subRL[20] = krl; /* k16 */ subRL[21] = krr; ROLDQ(krl, krr, 51); /* kw3 */ subRL[32] = krl; /* kw4 */ subRL[33] = krr; camellia_setup_tail(subkey, subRL, 32); } static void camellia_setup192(const unsigned char *key, u64 *subkey) { unsigned char kk[32]; u64 krl, krr; memcpy(kk, key, 24); memcpy((unsigned char *)&krl, key+16, 8); krr = ~krl; memcpy(kk+24, (unsigned char *)&krr, 8); camellia_setup256(kk, subkey); } int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key, unsigned int key_len, u32 *flags) { if (key_len != 16 && key_len != 24 && key_len != 32) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } cctx->key_length = key_len; switch (key_len) { case 16: camellia_setup128(key, cctx->key_table); break; case 24: camellia_setup192(key, cctx->key_table); break; case 32: camellia_setup256(key, cctx->key_table); break; } return 0; } EXPORT_SYMBOL_GPL(__camellia_setkey); static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len, &tfm->crt_flags); } void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) { u128 iv = *src; camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src); u128_xor(&dst[1], &dst[1], &iv); } EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way); void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) { be128 ctrblk; if (dst != src) *dst = *src; le128_to_be128(&ctrblk, iv); le128_inc(iv); camellia_enc_blk_xor(ctx, (u8 *)dst, (u8 *)&ctrblk); } EXPORT_SYMBOL_GPL(camellia_crypt_ctr); void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, le128 *iv) { be128 ctrblks[2]; if (dst != src) { dst[0] = src[0]; dst[1] = src[1]; } le128_to_be128(&ctrblks[0], iv); le128_inc(iv); le128_to_be128(&ctrblks[1], iv); le128_inc(iv); camellia_enc_blk_xor_2way(ctx, (u8 *)dst, (u8 *)ctrblks); } EXPORT_SYMBOL_GPL(camellia_crypt_ctr_2way); static const struct common_glue_ctx camellia_enc = { .num_funcs = 2, .fpu_blocks_limit = -1, .funcs = { { .num_blocks = 2, .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } }, { .num_blocks = 1, .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } } } }; static const struct common_glue_ctx camellia_ctr = { .num_funcs = 2, .fpu_blocks_limit = -1, .funcs = { { .num_blocks = 2, .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } }, { .num_blocks = 1, .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } } } }; static const struct common_glue_ctx camellia_dec = { .num_funcs = 2, .fpu_blocks_limit = -1, .funcs = { { .num_blocks = 2, .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } }, { .num_blocks = 1, .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } } } }; static const struct common_glue_ctx camellia_dec_cbc = { .num_funcs = 2, .fpu_blocks_limit = -1, .funcs = { { .num_blocks = 2, .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } }, { .num_blocks = 1, .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } } } }; static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes); } static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes); } static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc, dst, src, nbytes); } static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src, nbytes); } static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes); } static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) { const unsigned int bsize = CAMELLIA_BLOCK_SIZE; struct camellia_ctx *ctx = priv; int i; while (nbytes >= 2 * bsize) { camellia_enc_blk_2way(ctx, srcdst, srcdst); srcdst += bsize * 2; nbytes -= bsize * 2; } for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) camellia_enc_blk(ctx, srcdst, srcdst); } static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) { const unsigned int bsize = CAMELLIA_BLOCK_SIZE; struct camellia_ctx *ctx = priv; int i; while (nbytes >= 2 * bsize) { camellia_dec_blk_2way(ctx, srcdst, srcdst); srcdst += bsize * 2; nbytes -= bsize * 2; } for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) camellia_dec_blk(ctx, srcdst, srcdst); } int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm); int err; err = __camellia_setkey(&ctx->camellia_ctx, key, keylen - CAMELLIA_BLOCK_SIZE, &tfm->crt_flags); if (err) return err; return lrw_init_table(&ctx->lrw_table, key + keylen - CAMELLIA_BLOCK_SIZE); } EXPORT_SYMBOL_GPL(lrw_camellia_setkey); static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[2 * 4]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = &ctx->camellia_ctx, .crypt_fn = encrypt_callback, }; return lrw_crypt(desc, dst, src, nbytes, &req); } static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[2 * 4]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = &ctx->camellia_ctx, .crypt_fn = decrypt_callback, }; return lrw_crypt(desc, dst, src, nbytes, &req); } void lrw_camellia_exit_tfm(struct crypto_tfm *tfm) { struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_free_table(&ctx->lrw_table); } EXPORT_SYMBOL_GPL(lrw_camellia_exit_tfm); int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct camellia_xts_ctx *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; int err; /* key consists of keys of equal size concatenated, therefore * the length must be even */ if (keylen % 2) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } /* first half of xts-key is for crypt */ err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags); if (err) return err; /* second half of xts-key is for tweak */ return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2, flags); } EXPORT_SYMBOL_GPL(xts_camellia_setkey); static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[2 * 4]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = &ctx->tweak_ctx, .tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk), .crypt_ctx = &ctx->crypt_ctx, .crypt_fn = encrypt_callback, }; return xts_crypt(desc, dst, src, nbytes, &req); } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[2 * 4]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = &ctx->tweak_ctx, .tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk), .crypt_ctx = &ctx->crypt_ctx, .crypt_fn = decrypt_callback, }; return xts_crypt(desc, dst, src, nbytes, &req); } static struct crypto_alg camellia_algs[6] = { { .cra_name = "camellia", .cra_driver_name = "camellia-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = CAMELLIA_MIN_KEY_SIZE, .cia_max_keysize = CAMELLIA_MAX_KEY_SIZE, .cia_setkey = camellia_setkey, .cia_encrypt = camellia_encrypt, .cia_decrypt = camellia_decrypt } } }, { .cra_name = "ecb(camellia)", .cra_driver_name = "ecb-camellia-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, .setkey = camellia_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, }, }, { .cra_name = "cbc(camellia)", .cra_driver_name = "cbc-camellia-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, .ivsize = CAMELLIA_BLOCK_SIZE, .setkey = camellia_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }, }, { .cra_name = "ctr(camellia)", .cra_driver_name = "ctr-camellia-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct camellia_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, .ivsize = CAMELLIA_BLOCK_SIZE, .setkey = camellia_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, }, }, }, { .cra_name = "lrw(camellia)", .cra_driver_name = "lrw-camellia-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_lrw_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_exit = lrw_camellia_exit_tfm, .cra_u = { .blkcipher = { .min_keysize = CAMELLIA_MIN_KEY_SIZE + CAMELLIA_BLOCK_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE + CAMELLIA_BLOCK_SIZE, .ivsize = CAMELLIA_BLOCK_SIZE, .setkey = lrw_camellia_setkey, .encrypt = lrw_encrypt, .decrypt = lrw_decrypt, }, }, }, { .cra_name = "xts(camellia)", .cra_driver_name = "xts-camellia-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_xts_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2, .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2, .ivsize = CAMELLIA_BLOCK_SIZE, .setkey = xts_camellia_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, }, }, } }; static bool is_blacklisted_cpu(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return false; if (boot_cpu_data.x86 == 0x0f) { /* * On Pentium 4, camellia-asm is slower than original assembler * implementation because excessive uses of 64bit rotate and * left-shifts (which are really slow on P4) needed to store and * handle 128bit block in two 64bit registers. */ return true; } return false; } static int force; module_param(force, int, 0); MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist"); static int __init init(void) { if (!force && is_blacklisted_cpu()) { printk(KERN_INFO "camellia-x86_64: performance on this CPU " "would be suboptimal: disabling " "camellia-x86_64.\n"); return -ENODEV; } return crypto_register_algs(camellia_algs, ARRAY_SIZE(camellia_algs)); } static void __exit fini(void) { crypto_unregister_algs(camellia_algs, ARRAY_SIZE(camellia_algs)); } module_init(init); module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized"); MODULE_ALIAS_CRYPTO("camellia"); MODULE_ALIAS_CRYPTO("camellia-asm");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_26
crossvul-cpp_data_bad_5786_0
/* * ModSecurity for Apache 2.x, http://www.modsecurity.org/ * Copyright (c) 2004-2013 Trustwave Holdings, Inc. (http://www.trustwave.com/) * * You may not use this file except in compliance with * the License.  You may obtain a copy of the License at * *     http://www.apache.org/licenses/LICENSE-2.0 * * If any of the files related to licensing are missing or if you have any * other questions related to licensing please contact Trustwave Holdings, Inc. * directly using the email address security@modsecurity.org. */ #include <stdlib.h> #include "apr_global_mutex.h" #include "modsecurity.h" #include "msc_parsers.h" #include "msc_util.h" #include "msc_xml.h" #include "apr_version.h" unsigned long int DSOLOCAL unicode_codepage = 0; int DSOLOCAL *unicode_map_table = NULL; /** * Format an alert message. */ const char * msc_alert_message(modsec_rec *msr, msre_actionset *actionset, const char *action_message, const char *rule_message) { const char *message = NULL; if (rule_message == NULL) rule_message = "Unknown error."; if (action_message == NULL) { message = apr_psprintf(msr->mp, "%s%s", rule_message, msre_format_metadata(msr, actionset)); } else { message = apr_psprintf(msr->mp, "%s %s%s", action_message, rule_message, msre_format_metadata(msr, actionset)); } return message; } /** * Log an alert message to the log, adding the rule metadata at the end. */ void msc_alert(modsec_rec *msr, int level, msre_actionset *actionset, const char *action_message, const char *rule_message) { const char *message = msc_alert_message(msr, actionset, action_message, rule_message); msr_log(msr, level, "%s", message); } #if 0 /** * Return phase name associated with the given phase number. */ static const char *phase_name(int phase) { switch(phase) { case 1 : return "REQUEST_HEADERS"; break; case 2 : return "REQUEST_BODY"; break; case 3 : return "RESPONSE_HEADERS"; break; case 4 : return "RESPONSE_BODY"; break; case 5 : return "LOGGING"; break; } return "INVALID"; } #endif /** * Creates and initialises a ModSecurity engine instance. */ msc_engine *modsecurity_create(apr_pool_t *mp, int processing_mode) { msc_engine *msce = NULL; msce = apr_pcalloc(mp, sizeof(msc_engine)); if (msce == NULL) return NULL; msce->mp = mp; msce->processing_mode = processing_mode; msce->msre = msre_engine_create(msce->mp); if (msce->msre == NULL) return NULL; msre_engine_register_default_variables(msce->msre); msre_engine_register_default_operators(msce->msre); msre_engine_register_default_tfns(msce->msre); msre_engine_register_default_actions(msce->msre); // TODO: msre_engine_register_default_reqbody_processors(msce->msre); return msce; } /** * Initialise the modsecurity engine. This function must be invoked * after configuration processing is complete as Apache needs to know the * username it is running as. */ int modsecurity_init(msc_engine *msce, apr_pool_t *mp) { apr_status_t rc; /* Serial audit log mutext */ rc = apr_global_mutex_create(&msce->auditlog_lock, NULL, APR_LOCK_DEFAULT, mp); if (rc != APR_SUCCESS) { //ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, "mod_security: Could not create modsec_auditlog_lock"); //return HTTP_INTERNAL_SERVER_ERROR; return -1; } #if !defined(MSC_TEST) #ifdef __SET_MUTEX_PERMS #if AP_SERVER_MAJORVERSION_NUMBER > 1 && AP_SERVER_MINORVERSION_NUMBER > 2 rc = ap_unixd_set_global_mutex_perms(msce->auditlog_lock); #else rc = unixd_set_global_mutex_perms(msce->auditlog_lock); #endif if (rc != APR_SUCCESS) { // ap_log_error(APLOG_MARK, APLOG_ERR, rc, s, "mod_security: Could not set permissions on modsec_auditlog_lock; check User and Group directives"); // return HTTP_INTERNAL_SERVER_ERROR; return -1; } #endif /* SET_MUTEX_PERMS */ rc = apr_global_mutex_create(&msce->geo_lock, NULL, APR_LOCK_DEFAULT, mp); if (rc != APR_SUCCESS) { return -1; } #ifdef __SET_MUTEX_PERMS #if AP_SERVER_MAJORVERSION_NUMBER > 1 && AP_SERVER_MINORVERSION_NUMBER > 2 rc = ap_unixd_set_global_mutex_perms(msce->geo_lock); #else rc = unixd_set_global_mutex_perms(msce->geo_lock); #endif if (rc != APR_SUCCESS) { return -1; } #endif /* SET_MUTEX_PERMS */ #endif return 1; } /** * Performs per-child (new process) initialisation. */ void modsecurity_child_init(msc_engine *msce) { /* Need to call this once per process before any other XML calls. */ xmlInitParser(); if (msce->auditlog_lock != NULL) { apr_status_t rc = apr_global_mutex_child_init(&msce->auditlog_lock, NULL, msce->mp); if (rc != APR_SUCCESS) { // ap_log_error(APLOG_MARK, APLOG_ERR, rs, s, "Failed to child-init auditlog mutex"); } } if (msce->geo_lock != NULL) { apr_status_t rc = apr_global_mutex_child_init(&msce->geo_lock, NULL, msce->mp); if (rc != APR_SUCCESS) { // ap_log_error(APLOG_MARK, APLOG_ERR, rs, s, "Failed to child-init geo mutex"); } } } /** * Releases resources held by engine instance. */ void modsecurity_shutdown(msc_engine *msce) { if (msce == NULL) return; } /** * */ static void modsecurity_persist_data(modsec_rec *msr) { const apr_array_header_t *arr; apr_table_entry_t *te; apr_time_t time_before, time_after; int i; time_before = apr_time_now(); /* Collections, store & remove stale. */ arr = apr_table_elts(msr->collections); te = (apr_table_entry_t *)arr->elts; for (i = 0; i < arr->nelts; i++) { apr_table_t *col = (apr_table_t *)te[i].val; /* Only store those collections that changed. */ if (apr_table_get(msr->collections_dirty, te[i].key)) { collection_store(msr, col); } } time_after = apr_time_now(); msr->time_storage_write += time_after - time_before; if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Recording persistent data took %" APR_TIME_T_FMT " microseconds.", msr->time_gc); } /* Remove stale collections. */ srand(time(NULL)); if (rand() < RAND_MAX/100) { arr = apr_table_elts(msr->collections); te = (apr_table_entry_t *)arr->elts; for (i = 0; i < arr->nelts; i++) { collections_remove_stale(msr, te[i].key); } msr->time_gc = apr_time_now() - time_after; if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Garbage collection took %" APR_TIME_T_FMT " microseconds.", msr->time_gc); } } } /** * */ static apr_status_t modsecurity_tx_cleanup(void *data) { modsec_rec *msr = (modsec_rec *)data; char *my_error_msg = NULL; if (msr == NULL) return APR_SUCCESS; /* Multipart processor cleanup. */ if (msr->mpd != NULL) multipart_cleanup(msr); /* XML processor cleanup. */ if (msr->xml != NULL) xml_cleanup(msr); // TODO: Why do we ignore return code here? modsecurity_request_body_clear(msr, &my_error_msg); if (my_error_msg != NULL) { msr_log(msr, 1, "%s", my_error_msg); } #if defined(WITH_LUA) #ifdef CACHE_LUA if(msr->L != NULL) lua_close(msr->L); #endif #endif return APR_SUCCESS; } /** * */ apr_status_t modsecurity_tx_init(modsec_rec *msr) { const char *s = NULL; const apr_array_header_t *arr; char *semicolon = NULL; char *comma = NULL; apr_table_entry_t *te; int i; /* Register TX cleanup */ apr_pool_cleanup_register(msr->mp, msr, modsecurity_tx_cleanup, apr_pool_cleanup_null); /* Initialise C-L */ msr->request_content_length = -1; s = apr_table_get(msr->request_headers, "Content-Length"); if (s != NULL) { msr->request_content_length = strtol(s, NULL, 10); } /* Figure out whether this request has a body */ msr->reqbody_chunked = 0; msr->reqbody_should_exist = 0; if (msr->request_content_length == -1) { /* There's no C-L, but is chunked encoding used? */ char *transfer_encoding = (char *)apr_table_get(msr->request_headers, "Transfer-Encoding"); if ((transfer_encoding != NULL)&&(strstr(transfer_encoding, "chunked") != NULL)) { msr->reqbody_should_exist = 1; msr->reqbody_chunked = 1; } } else { /* C-L found */ msr->reqbody_should_exist = 1; } /* Initialise C-T */ msr->request_content_type = NULL; s = apr_table_get(msr->request_headers, "Content-Type"); if (s != NULL) msr->request_content_type = s; /* Decide what to do with the request body. */ if ((msr->request_content_type != NULL) && (strncasecmp(msr->request_content_type, "application/x-www-form-urlencoded", 33) == 0)) { /* Always place POST requests with * "application/x-www-form-urlencoded" payloads in memory. */ msr->msc_reqbody_storage = MSC_REQBODY_MEMORY; msr->msc_reqbody_spilltodisk = 0; msr->msc_reqbody_processor = "URLENCODED"; } else { /* If the C-L is known and there's more data than * our limit go to disk straight away. */ if ((msr->request_content_length != -1) && (msr->request_content_length > msr->txcfg->reqbody_inmemory_limit)) { msr->msc_reqbody_storage = MSC_REQBODY_DISK; } /* In all other cases, try using the memory first * but switch over to disk for larger bodies. */ msr->msc_reqbody_storage = MSC_REQBODY_MEMORY; msr->msc_reqbody_spilltodisk = 1; if (msr->request_content_type != NULL) { if (strncasecmp(msr->request_content_type, "multipart/form-data", 19) == 0) { msr->msc_reqbody_processor = "MULTIPART"; } } } /* Check if we are forcing buffering, then use memory only. */ if (msr->txcfg->reqbody_buffering != REQUEST_BODY_FORCEBUF_OFF) { msr->msc_reqbody_storage = MSC_REQBODY_MEMORY; msr->msc_reqbody_spilltodisk = 0; } /* Initialise arguments */ msr->arguments = apr_table_make(msr->mp, 32); if (msr->arguments == NULL) return -1; if (msr->query_string != NULL) { int invalid_count = 0; if (parse_arguments(msr, msr->query_string, strlen(msr->query_string), msr->txcfg->argument_separator, "QUERY_STRING", msr->arguments, &invalid_count) < 0) { msr_log(msr, 1, "Initialisation: Error occurred while parsing QUERY_STRING arguments."); return -1; } if (invalid_count) { msr->urlencoded_error = 1; } } msr->arguments_to_sanitize = apr_table_make(msr->mp, 16); if (msr->arguments_to_sanitize == NULL) return -1; msr->request_headers_to_sanitize = apr_table_make(msr->mp, 16); if (msr->request_headers_to_sanitize == NULL) return -1; msr->response_headers_to_sanitize = apr_table_make(msr->mp, 16); if (msr->response_headers_to_sanitize == NULL) return -1; msr->pattern_to_sanitize = apr_table_make(msr->mp, 32); if (msr->pattern_to_sanitize == NULL) return -1; /* remove targets */ msr->removed_targets = apr_table_make(msr->mp, 16); if (msr->removed_targets == NULL) return -1; /* Initialise cookies */ msr->request_cookies = apr_table_make(msr->mp, 16); if (msr->request_cookies == NULL) return -1; /* Initialize matched vars */ msr->matched_vars = apr_table_make(msr->mp, 8); if (msr->matched_vars == NULL) return -1; apr_table_clear(msr->matched_vars); msr->perf_rules = apr_table_make(msr->mp, 8); if (msr->perf_rules == NULL) return -1; apr_table_clear(msr->perf_rules); /* Locate the cookie headers and parse them */ arr = apr_table_elts(msr->request_headers); te = (apr_table_entry_t *)arr->elts; for (i = 0; i < arr->nelts; i++) { if (strcasecmp(te[i].key, "Cookie") == 0) { if (msr->txcfg->cookie_format == COOKIES_V0) { semicolon = apr_pstrdup(msr->mp, te[i].val); while((*semicolon != 0)&&(*semicolon != ';')) semicolon++; if(*semicolon == ';') { parse_cookies_v0(msr, te[i].val, msr->request_cookies, ";"); } else { comma = apr_pstrdup(msr->mp, te[i].val); while((*comma != 0)&&(*comma != ',')) comma++; if(*comma == ',') { comma++; if(*comma == 0x20) {// looks like comma is the separator if (msr->txcfg->debuglog_level >= 5) { msr_log(msr, 5, "Cookie v0 parser: Using comma as a separator. Semi-colon was not identified!"); } parse_cookies_v0(msr, te[i].val, msr->request_cookies, ","); } else { parse_cookies_v0(msr, te[i].val, msr->request_cookies, ";"); } } else { parse_cookies_v0(msr, te[i].val, msr->request_cookies, ";"); } } } else { parse_cookies_v1(msr, te[i].val, msr->request_cookies); } } } /* Collections. */ msr->tx_vars = apr_table_make(msr->mp, 32); if (msr->tx_vars == NULL) return -1; msr->geo_vars = apr_table_make(msr->mp, 8); if (msr->geo_vars == NULL) return -1; msr->collections_original = apr_table_make(msr->mp, 8); if (msr->collections_original == NULL) return -1; msr->collections = apr_table_make(msr->mp, 8); if (msr->collections == NULL) return -1; msr->collections_dirty = apr_table_make(msr->mp, 8); if (msr->collections_dirty == NULL) return -1; /* Other */ msr->tcache = NULL; msr->tcache_items = 0; msr->matched_rules = apr_array_make(msr->mp, 16, sizeof(void *)); if (msr->matched_rules == NULL) return -1; msr->matched_var = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string)); if (msr->matched_var == NULL) return -1; msr->highest_severity = 255; /* high, invalid value */ msr->removed_rules = apr_array_make(msr->mp, 16, sizeof(char *)); if (msr->removed_rules == NULL) return -1; msr->removed_rules_tag = apr_array_make(msr->mp, 16, sizeof(char *)); if (msr->removed_rules_tag == NULL) return -1; msr->removed_rules_msg = apr_array_make(msr->mp, 16, sizeof(char *)); if (msr->removed_rules_msg == NULL) return -1; return 1; } /** * */ static int is_response_status_relevant(modsec_rec *msr, int status) { char *my_error_msg = NULL; apr_status_t rc; char buf[32]; /* ENH: Setting is_relevant here will cause an audit even if noauditlog * was set for the last rule that matched. Is this what we want? */ if ((msr->txcfg->auditlog_relevant_regex == NULL) ||(msr->txcfg->auditlog_relevant_regex == NOT_SET_P)) { return 0; } apr_snprintf(buf, sizeof(buf), "%d", status); rc = msc_regexec(msr->txcfg->auditlog_relevant_regex, buf, strlen(buf), &my_error_msg); if (rc >= 0) return 1; if (rc == PCRE_ERROR_NOMATCH) return 0; msr_log(msr, 1, "Regex processing failed (rc %d): %s", rc, my_error_msg); return 0; } /** * */ static apr_status_t modsecurity_process_phase_request_headers(modsec_rec *msr) { apr_time_t time_before; apr_status_t rc = 0; if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Starting phase REQUEST_HEADERS."); } time_before = apr_time_now(); if (msr->txcfg->ruleset != NULL) { rc = msre_ruleset_process_phase(msr->txcfg->ruleset, msr); } msr->time_phase1 = apr_time_now() - time_before; return rc; } /** * */ static apr_status_t modsecurity_process_phase_request_body(modsec_rec *msr) { apr_time_t time_before; apr_status_t rc = 0; if ((msr->allow_scope == ACTION_ALLOW_REQUEST)||(msr->allow_scope == ACTION_ALLOW)) { if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Skipping phase REQUEST_BODY (allow used)."); } return 0; } else { if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Starting phase REQUEST_BODY."); } } time_before = apr_time_now(); if (msr->txcfg->ruleset != NULL) { rc = msre_ruleset_process_phase(msr->txcfg->ruleset, msr); } msr->time_phase2 = apr_time_now() - time_before; return rc; } /** * */ static apr_status_t modsecurity_process_phase_response_headers(modsec_rec *msr) { apr_time_t time_before; apr_status_t rc = 0; if (msr->allow_scope == ACTION_ALLOW) { if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Skipping phase RESPONSE_HEADERS (allow used)."); } return 0; } else { if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Starting phase RESPONSE_HEADERS."); } } time_before = apr_time_now(); if (msr->txcfg->ruleset != NULL) { rc = msre_ruleset_process_phase(msr->txcfg->ruleset, msr); } msr->time_phase3 = apr_time_now() - time_before; return rc; } /** * */ static apr_status_t modsecurity_process_phase_response_body(modsec_rec *msr) { apr_time_t time_before; apr_status_t rc = 0; if (msr->allow_scope == ACTION_ALLOW) { if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Skipping phase RESPONSE_BODY (allow used)."); } return 0; } else { if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Starting phase RESPONSE_BODY."); } } time_before = apr_time_now(); if (msr->txcfg->ruleset != NULL) { rc = msre_ruleset_process_phase(msr->txcfg->ruleset, msr); } msr->time_phase4 = apr_time_now() - time_before; return rc; } /** * */ static apr_status_t modsecurity_process_phase_logging(modsec_rec *msr) { apr_time_t time_before, time_after; if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Starting phase LOGGING."); } time_before = apr_time_now(); if (msr->txcfg->ruleset != NULL) { msre_ruleset_process_phase(msr->txcfg->ruleset, msr); } modsecurity_persist_data(msr); time_after = apr_time_now(); msr->time_phase5 = time_after - time_before; /* Is this request relevant for logging purposes? */ if (msr->is_relevant == 0) { /* Check the status */ msr->is_relevant += is_response_status_relevant(msr, msr->r->status); /* If we processed two requests and statuses are different then * check the other status too. */ if (msr->r_early->status != msr->r->status) { msr->is_relevant += is_response_status_relevant(msr, msr->r_early->status); } } /* Figure out if we want to keep the files (if there are any, of course). */ if ((msr->txcfg->upload_keep_files == KEEP_FILES_ON) || ((msr->txcfg->upload_keep_files == KEEP_FILES_RELEVANT_ONLY)&&(msr->is_relevant))) { msr->upload_remove_files = 0; } else { msr->upload_remove_files = 1; } /* Are we configured for audit logging? */ switch(msr->txcfg->auditlog_flag) { case AUDITLOG_OFF : if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Audit log: Not configured to run for this request."); } return DECLINED; break; case AUDITLOG_RELEVANT : if (msr->is_relevant == 0) { if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Audit log: Ignoring a non-relevant request."); } return DECLINED; } break; case AUDITLOG_ON : /* All right, do nothing */ break; default : msr_log(msr, 1, "Internal error: Could not determine if auditing is needed, so forcing auditing."); break; } /* Invoke the Audit logger */ if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Audit log: Logging this transaction."); } sec_audit_logger(msr); msr->time_logging = apr_time_now() - time_after; return 0; } /** * Processes one transaction phase. The phase number does not * need to be explicitly provided since it's already available * in the modsec_rec structure. */ apr_status_t modsecurity_process_phase(modsec_rec *msr, unsigned int phase) { /* Check if we should run. */ if ((msr->was_intercepted)&&(phase != PHASE_LOGGING)) { if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Skipping phase %d as request was already intercepted.", phase); } return 0; } /* Do not process the same phase twice. */ if (msr->phase >= phase) { if (msr->txcfg->debuglog_level >= 4) { msr_log(msr, 4, "Skipping phase %d because it was previously run (at %d now).", phase, msr->phase); } return 0; } msr->phase = phase; /* Clear out the transformation cache at the start of each phase */ if (msr->txcfg->cache_trans == MODSEC_CACHE_ENABLED) { if (msr->tcache) { apr_hash_index_t *hi; void *dummy; apr_table_t *tab; const void *key; apr_ssize_t klen; #ifdef CACHE_DEBUG apr_pool_t *mp = msr->msc_rule_mptmp; const apr_array_header_t *ctarr; const apr_table_entry_t *ctelts; msre_cache_rec *rec; int cn = 0; int ri; #else apr_pool_t *mp = msr->mp; #endif for (hi = apr_hash_first(mp, msr->tcache); hi; hi = apr_hash_next(hi)) { apr_hash_this(hi, &key, &klen, &dummy); tab = (apr_table_t *)dummy; if (tab == NULL) continue; #ifdef CACHE_DEBUG /* Dump the cache out as we clear */ ctarr = apr_table_elts(tab); ctelts = (const apr_table_entry_t*)ctarr->elts; for (ri = 0; ri < ctarr->nelts; ri++) { cn++; rec = (msre_cache_rec *)ctelts[ri].val; if (rec->changed) { if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "CACHE: %5d) hits=%d key=%pp %x;%s=\"%s\" (%pp - %pp)", cn, rec->hits, key, rec->num, rec->path, log_escape_nq_ex(mp, rec->val, rec->val_len), rec->val, rec->val + rec->val_len); } } else { if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "CACHE: %5d) hits=%d key=%pp %x;%s=<no change>", cn, rec->hits, key, rec->num, rec->path); } } } #endif apr_table_clear(tab); apr_hash_set(msr->tcache, key, klen, NULL); } if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "Cleared transformation cache for phase %d", msr->phase); } } msr->tcache_items = 0; msr->tcache = apr_hash_make(msr->mp); if (msr->tcache == NULL) return -1; } switch(phase) { case 1 : return modsecurity_process_phase_request_headers(msr); case 2 : return modsecurity_process_phase_request_body(msr); case 3 : return modsecurity_process_phase_response_headers(msr); case 4 : return modsecurity_process_phase_response_body(msr); case 5 : return modsecurity_process_phase_logging(msr); default : msr_log(msr, 1, "Invalid processing phase: %d", msr->phase); break; } return -1; }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5786_0
crossvul-cpp_data_good_5591_0
/* * Edgeport USB Serial Converter driver * * Copyright (C) 2000-2002 Inside Out Networks, All rights reserved. * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Supports the following devices: * EP/1 EP/2 EP/4 EP/21 EP/22 EP/221 EP/42 EP/421 WATCHPORT * * For questions or problems with this driver, contact Inside Out * Networks technical support, or Peter Berger <pberger@brimson.com>, * or Al Borchers <alborchers@steinerpoint.com>. */ #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/serial.h> #include <linux/kfifo.h> #include <linux/ioctl.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "io_16654.h" #include "io_usbvend.h" #include "io_ti.h" #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com> and David Iacovelli" #define DRIVER_DESC "Edgeport USB Serial Driver" #define EPROM_PAGE_SIZE 64 /* different hardware types */ #define HARDWARE_TYPE_930 0 #define HARDWARE_TYPE_TIUMP 1 /* IOCTL_PRIVATE_TI_GET_MODE Definitions */ #define TI_MODE_CONFIGURING 0 /* Device has not entered start device */ #define TI_MODE_BOOT 1 /* Staying in boot mode */ #define TI_MODE_DOWNLOAD 2 /* Made it to download mode */ #define TI_MODE_TRANSITIONING 3 /* Currently in boot mode but transitioning to download mode */ /* read urb state */ #define EDGE_READ_URB_RUNNING 0 #define EDGE_READ_URB_STOPPING 1 #define EDGE_READ_URB_STOPPED 2 #define EDGE_CLOSING_WAIT 4000 /* in .01 sec */ #define EDGE_OUT_BUF_SIZE 1024 /* Product information read from the Edgeport */ struct product_info { int TiMode; /* Current TI Mode */ __u8 hardware_type; /* Type of hardware */ } __attribute__((packed)); struct edgeport_port { __u16 uart_base; __u16 dma_address; __u8 shadow_msr; __u8 shadow_mcr; __u8 shadow_lsr; __u8 lsr_mask; __u32 ump_read_timeout; /* Number of milliseconds the UMP will wait without data before completing a read short */ int baud_rate; int close_pending; int lsr_event; struct async_icount icount; wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ struct edgeport_serial *edge_serial; struct usb_serial_port *port; __u8 bUartMode; /* Port type, 0: RS232, etc. */ spinlock_t ep_lock; int ep_read_urb_state; int ep_write_urb_in_use; struct kfifo write_fifo; }; struct edgeport_serial { struct product_info product_info; u8 TI_I2C_Type; /* Type of I2C in UMP */ u8 TiReadI2C; /* Set to TRUE if we have read the I2c in Boot Mode */ struct mutex es_lock; int num_ports_open; struct usb_serial *serial; }; /* Devices that this driver supports */ static const struct usb_device_id edgeport_1port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) }, { } }; static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) }, /* The 4, 8 and 16 port devices show up as multiple 2 port devices */ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, { } }; /* Devices that this driver supports */ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, { } }; MODULE_DEVICE_TABLE(usb, id_table_combined); static unsigned char OperationalMajorVersion; static unsigned char OperationalMinorVersion; static unsigned short OperationalBuildNumber; static int closing_wait = EDGE_CLOSING_WAIT; static bool ignore_cpu_rev; static int default_uart_mode; /* RS232 */ static void edge_tty_recv(struct device *dev, struct tty_struct *tty, unsigned char *data, int length); static void stop_read(struct edgeport_port *edge_port); static int restart_read(struct edgeport_port *edge_port); static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios); static void edge_send(struct tty_struct *tty); /* sysfs attributes */ static int edge_create_sysfs_attrs(struct usb_serial_port *port); static int edge_remove_sysfs_attrs(struct usb_serial_port *port); static int ti_vread_sync(struct usb_device *dev, __u8 request, __u16 value, __u16 index, u8 *data, int size) { int status; status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN), value, index, data, size, 1000); if (status < 0) return status; if (status != size) { dev_dbg(&dev->dev, "%s - wanted to write %d, but only wrote %d\n", __func__, size, status); return -ECOMM; } return 0; } static int ti_vsend_sync(struct usb_device *dev, __u8 request, __u16 value, __u16 index, u8 *data, int size) { int status; status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT), value, index, data, size, 1000); if (status < 0) return status; if (status != size) { dev_dbg(&dev->dev, "%s - wanted to write %d, but only wrote %d\n", __func__, size, status); return -ECOMM; } return 0; } static int send_cmd(struct usb_device *dev, __u8 command, __u8 moduleid, __u16 value, u8 *data, int size) { return ti_vsend_sync(dev, command, value, moduleid, data, size); } /* clear tx/rx buffers and fifo in TI UMP */ static int purge_port(struct usb_serial_port *port, __u16 mask) { int port_number = port->number - port->serial->minor; dev_dbg(&port->dev, "%s - port %d, mask %x\n", __func__, port_number, mask); return send_cmd(port->serial->dev, UMPC_PURGE_PORT, (__u8)(UMPM_UART1_PORT + port_number), mask, NULL, 0); } /** * read_download_mem - Read edgeport memory from TI chip * @dev: usb device pointer * @start_address: Device CPU address at which to read * @length: Length of above data * @address_type: Can read both XDATA and I2C * @buffer: pointer to input data buffer */ static int read_download_mem(struct usb_device *dev, int start_address, int length, __u8 address_type, __u8 *buffer) { int status = 0; __u8 read_length; __be16 be_start_address; dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length); /* Read in blocks of 64 bytes * (TI firmware can't handle more than 64 byte reads) */ while (length) { if (length > 64) read_length = 64; else read_length = (__u8)length; if (read_length > 1) { dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length); } be_start_address = cpu_to_be16(start_address); status = ti_vread_sync(dev, UMPC_MEMORY_READ, (__u16)address_type, (__force __u16)be_start_address, buffer, read_length); if (status) { dev_dbg(&dev->dev, "%s - ERROR %x\n", __func__, status); return status; } if (read_length > 1) usb_serial_debug_data(&dev->dev, __func__, read_length, buffer); /* Update pointers/length */ start_address += read_length; buffer += read_length; length -= read_length; } return status; } static int read_ram(struct usb_device *dev, int start_address, int length, __u8 *buffer) { return read_download_mem(dev, start_address, length, DTK_ADDR_SPACE_XDATA, buffer); } /* Read edgeport memory to a given block */ static int read_boot_mem(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status = 0; int i; for (i = 0; i < length; i++) { status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, serial->TI_I2C_Type, (__u16)(start_address+i), &buffer[i], 0x01); if (status) { dev_dbg(&serial->serial->dev->dev, "%s - ERROR %x\n", __func__, status); return status; } } dev_dbg(&serial->serial->dev->dev, "%s - start_address = %x, length = %d\n", __func__, start_address, length); usb_serial_debug_data(&serial->serial->dev->dev, __func__, length, buffer); serial->TiReadI2C = 1; return status; } /* Write given block to TI EPROM memory */ static int write_boot_mem(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status = 0; int i; u8 *temp; /* Must do a read before write */ if (!serial->TiReadI2C) { temp = kmalloc(1, GFP_KERNEL); if (!temp) { dev_err(&serial->serial->dev->dev, "%s - out of memory\n", __func__); return -ENOMEM; } status = read_boot_mem(serial, 0, 1, temp); kfree(temp); if (status) return status; } for (i = 0; i < length; ++i) { status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, buffer[i], (__u16)(i + start_address), NULL, 0); if (status) return status; } dev_dbg(&serial->serial->dev->dev, "%s - start_sddr = %x, length = %d\n", __func__, start_address, length); usb_serial_debug_data(&serial->serial->dev->dev, __func__, length, buffer); return status; } /* Write edgeport I2C memory to TI chip */ static int write_i2c_mem(struct edgeport_serial *serial, int start_address, int length, __u8 address_type, __u8 *buffer) { struct device *dev = &serial->serial->dev->dev; int status = 0; int write_length; __be16 be_start_address; /* We can only send a maximum of 1 aligned byte page at a time */ /* calculate the number of bytes left in the first page */ write_length = EPROM_PAGE_SIZE - (start_address & (EPROM_PAGE_SIZE - 1)); if (write_length > length) write_length = length; dev_dbg(dev, "%s - BytesInFirstPage Addr = %x, length = %d\n", __func__, start_address, write_length); usb_serial_debug_data(dev, __func__, write_length, buffer); /* Write first page */ be_start_address = cpu_to_be16(start_address); status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, (__u16)address_type, (__force __u16)be_start_address, buffer, write_length); if (status) { dev_dbg(dev, "%s - ERROR %d\n", __func__, status); return status; } length -= write_length; start_address += write_length; buffer += write_length; /* We should be aligned now -- can write max page size bytes at a time */ while (length) { if (length > EPROM_PAGE_SIZE) write_length = EPROM_PAGE_SIZE; else write_length = length; dev_dbg(dev, "%s - Page Write Addr = %x, length = %d\n", __func__, start_address, write_length); usb_serial_debug_data(dev, __func__, write_length, buffer); /* Write next page */ be_start_address = cpu_to_be16(start_address); status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, (__u16)address_type, (__force __u16)be_start_address, buffer, write_length); if (status) { dev_err(dev, "%s - ERROR %d\n", __func__, status); return status; } length -= write_length; start_address += write_length; buffer += write_length; } return status; } /* Examine the UMP DMA registers and LSR * * Check the MSBit of the X and Y DMA byte count registers. * A zero in this bit indicates that the TX DMA buffers are empty * then check the TX Empty bit in the UART. */ static int tx_active(struct edgeport_port *port) { int status; struct out_endpoint_desc_block *oedb; __u8 *lsr; int bytes_left = 0; oedb = kmalloc(sizeof(*oedb), GFP_KERNEL); if (!oedb) { dev_err(&port->port->dev, "%s - out of memory\n", __func__); return -ENOMEM; } lsr = kmalloc(1, GFP_KERNEL); /* Sigh, that's right, just one byte, as not all platforms can do DMA from stack */ if (!lsr) { kfree(oedb); return -ENOMEM; } /* Read the DMA Count Registers */ status = read_ram(port->port->serial->dev, port->dma_address, sizeof(*oedb), (void *)oedb); if (status) goto exit_is_tx_active; dev_dbg(&port->port->dev, "%s - XByteCount 0x%X\n", __func__, oedb->XByteCount); /* and the LSR */ status = read_ram(port->port->serial->dev, port->uart_base + UMPMEM_OFFS_UART_LSR, 1, lsr); if (status) goto exit_is_tx_active; dev_dbg(&port->port->dev, "%s - LSR = 0x%X\n", __func__, *lsr); /* If either buffer has data or we are transmitting then return TRUE */ if ((oedb->XByteCount & 0x80) != 0) bytes_left += 64; if ((*lsr & UMP_UART_LSR_TX_MASK) == 0) bytes_left += 1; /* We return Not Active if we get any kind of error */ exit_is_tx_active: dev_dbg(&port->port->dev, "%s - return %d\n", __func__, bytes_left); kfree(lsr); kfree(oedb); return bytes_left; } static void chase_port(struct edgeport_port *port, unsigned long timeout, int flush) { int baud_rate; struct tty_struct *tty = tty_port_tty_get(&port->port->port); struct usb_serial *serial = port->port->serial; wait_queue_t wait; unsigned long flags; if (!tty) return; if (!timeout) timeout = (HZ * EDGE_CLOSING_WAIT)/100; /* wait for data to drain from the buffer */ spin_lock_irqsave(&port->ep_lock, flags); init_waitqueue_entry(&wait, current); add_wait_queue(&tty->write_wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (kfifo_len(&port->write_fifo) == 0 || timeout == 0 || signal_pending(current) || serial->disconnected) /* disconnect */ break; spin_unlock_irqrestore(&port->ep_lock, flags); timeout = schedule_timeout(timeout); spin_lock_irqsave(&port->ep_lock, flags); } set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); if (flush) kfifo_reset_out(&port->write_fifo); spin_unlock_irqrestore(&port->ep_lock, flags); tty_kref_put(tty); /* wait for data to drain from the device */ timeout += jiffies; while ((long)(jiffies - timeout) < 0 && !signal_pending(current) && !serial->disconnected) { /* not disconnected */ if (!tx_active(port)) break; msleep(10); } /* disconnected */ if (serial->disconnected) return; /* wait one more character time, based on baud rate */ /* (tx_active doesn't seem to wait for the last byte) */ baud_rate = port->baud_rate; if (baud_rate == 0) baud_rate = 50; msleep(max(1, DIV_ROUND_UP(10000, baud_rate))); } static int choose_config(struct usb_device *dev) { /* * There may be multiple configurations on this device, in which case * we would need to read and parse all of them to find out which one * we want. However, we just support one config at this point, * configuration # 1, which is Config Descriptor 0. */ dev_dbg(&dev->dev, "%s - Number of Interfaces = %d\n", __func__, dev->config->desc.bNumInterfaces); dev_dbg(&dev->dev, "%s - MAX Power = %d\n", __func__, dev->config->desc.bMaxPower * 2); if (dev->config->desc.bNumInterfaces != 1) { dev_err(&dev->dev, "%s - bNumInterfaces is not 1, ERROR!\n", __func__); return -ENODEV; } return 0; } static int read_rom(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status; if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) { status = read_download_mem(serial->serial->dev, start_address, length, serial->TI_I2C_Type, buffer); } else { status = read_boot_mem(serial, start_address, length, buffer); } return status; } static int write_rom(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { if (serial->product_info.TiMode == TI_MODE_BOOT) return write_boot_mem(serial, start_address, length, buffer); if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) return write_i2c_mem(serial, start_address, length, serial->TI_I2C_Type, buffer); return -EINVAL; } /* Read a descriptor header from I2C based on type */ static int get_descriptor_addr(struct edgeport_serial *serial, int desc_type, struct ti_i2c_desc *rom_desc) { int start_address; int status; /* Search for requested descriptor in I2C */ start_address = 2; do { status = read_rom(serial, start_address, sizeof(struct ti_i2c_desc), (__u8 *)rom_desc); if (status) return 0; if (rom_desc->Type == desc_type) return start_address; start_address = start_address + sizeof(struct ti_i2c_desc) + rom_desc->Size; } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type); return 0; } /* Validate descriptor checksum */ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer) { __u16 i; __u8 cs = 0; for (i = 0; i < rom_desc->Size; i++) cs = (__u8)(cs + buffer[i]); if (cs != rom_desc->CheckSum) { pr_debug("%s - Mismatch %x - %x", __func__, rom_desc->CheckSum, cs); return -EINVAL; } return 0; } /* Make sure that the I2C image is good */ static int check_i2c_image(struct edgeport_serial *serial) { struct device *dev = &serial->serial->dev->dev; int status = 0; struct ti_i2c_desc *rom_desc; int start_address = 2; __u8 *buffer; __u16 ttype; rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) { dev_err(dev, "%s - out of memory\n", __func__); return -ENOMEM; } buffer = kmalloc(TI_MAX_I2C_SIZE, GFP_KERNEL); if (!buffer) { dev_err(dev, "%s - out of memory when allocating buffer\n", __func__); kfree(rom_desc); return -ENOMEM; } /* Read the first byte (Signature0) must be 0x52 or 0x10 */ status = read_rom(serial, 0, 1, buffer); if (status) goto out; if (*buffer != UMP5152 && *buffer != UMP3410) { dev_err(dev, "%s - invalid buffer signature\n", __func__); status = -ENODEV; goto out; } do { /* Validate the I2C */ status = read_rom(serial, start_address, sizeof(struct ti_i2c_desc), (__u8 *)rom_desc); if (status) break; if ((start_address + sizeof(struct ti_i2c_desc) + rom_desc->Size) > TI_MAX_I2C_SIZE) { status = -ENODEV; dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__); break; } dev_dbg(dev, "%s Type = 0x%x\n", __func__, rom_desc->Type); /* Skip type 2 record */ ttype = rom_desc->Type & 0x0f; if (ttype != I2C_DESC_TYPE_FIRMWARE_BASIC && ttype != I2C_DESC_TYPE_FIRMWARE_AUTO) { /* Read the descriptor data */ status = read_rom(serial, start_address + sizeof(struct ti_i2c_desc), rom_desc->Size, buffer); if (status) break; status = valid_csum(rom_desc, buffer); if (status) break; } start_address = start_address + sizeof(struct ti_i2c_desc) + rom_desc->Size; } while ((rom_desc->Type != I2C_DESC_TYPE_ION) && (start_address < TI_MAX_I2C_SIZE)); if ((rom_desc->Type != I2C_DESC_TYPE_ION) || (start_address > TI_MAX_I2C_SIZE)) status = -ENODEV; out: kfree(buffer); kfree(rom_desc); return status; } static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer) { int status; int start_address; struct ti_i2c_desc *rom_desc; struct edge_ti_manuf_descriptor *desc; struct device *dev = &serial->serial->dev->dev; rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) { dev_err(dev, "%s - out of memory\n", __func__); return -ENOMEM; } start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_ION, rom_desc); if (!start_address) { dev_dbg(dev, "%s - Edge Descriptor not found in I2C\n", __func__); status = -ENODEV; goto exit; } /* Read the descriptor data */ status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc), rom_desc->Size, buffer); if (status) goto exit; status = valid_csum(rom_desc, buffer); desc = (struct edge_ti_manuf_descriptor *)buffer; dev_dbg(dev, "%s - IonConfig 0x%x\n", __func__, desc->IonConfig); dev_dbg(dev, "%s - Version %d\n", __func__, desc->Version); dev_dbg(dev, "%s - Cpu/Board 0x%x\n", __func__, desc->CpuRev_BoardRev); dev_dbg(dev, "%s - NumPorts %d\n", __func__, desc->NumPorts); dev_dbg(dev, "%s - NumVirtualPorts %d\n", __func__, desc->NumVirtualPorts); dev_dbg(dev, "%s - TotalPorts %d\n", __func__, desc->TotalPorts); exit: kfree(rom_desc); return status; } /* Build firmware header used for firmware update */ static int build_i2c_fw_hdr(__u8 *header, struct device *dev) { __u8 *buffer; int buffer_size; int i; int err; __u8 cs = 0; struct ti_i2c_desc *i2c_header; struct ti_i2c_image_header *img_header; struct ti_i2c_firmware_rec *firmware_rec; const struct firmware *fw; const char *fw_name = "edgeport/down3.bin"; /* In order to update the I2C firmware we must change the type 2 record * to type 0xF2. This will force the UMP to come up in Boot Mode. * Then while in boot mode, the driver will download the latest * firmware (padded to 15.5k) into the UMP ram. And finally when the * device comes back up in download mode the driver will cause the new * firmware to be copied from the UMP Ram to I2C and the firmware will * update the record type from 0xf2 to 0x02. */ /* Allocate a 15.5k buffer + 2 bytes for version number * (Firmware Record) */ buffer_size = (((1024 * 16) - 512 ) + sizeof(struct ti_i2c_firmware_rec)); buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) { dev_err(dev, "%s - out of memory\n", __func__); return -ENOMEM; } // Set entire image of 0xffs memset(buffer, 0xff, buffer_size); err = request_firmware(&fw, fw_name, dev); if (err) { dev_err(dev, "Failed to load image \"%s\" err %d\n", fw_name, err); kfree(buffer); return err; } /* Save Download Version Number */ OperationalMajorVersion = fw->data[0]; OperationalMinorVersion = fw->data[1]; OperationalBuildNumber = fw->data[2] | (fw->data[3] << 8); /* Copy version number into firmware record */ firmware_rec = (struct ti_i2c_firmware_rec *)buffer; firmware_rec->Ver_Major = OperationalMajorVersion; firmware_rec->Ver_Minor = OperationalMinorVersion; /* Pointer to fw_down memory image */ img_header = (struct ti_i2c_image_header *)&fw->data[4]; memcpy(buffer + sizeof(struct ti_i2c_firmware_rec), &fw->data[4 + sizeof(struct ti_i2c_image_header)], le16_to_cpu(img_header->Length)); release_firmware(fw); for (i=0; i < buffer_size; i++) { cs = (__u8)(cs + buffer[i]); } kfree(buffer); /* Build new header */ i2c_header = (struct ti_i2c_desc *)header; firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data; i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK; i2c_header->Size = (__u16)buffer_size; i2c_header->CheckSum = cs; firmware_rec->Ver_Major = OperationalMajorVersion; firmware_rec->Ver_Minor = OperationalMinorVersion; return 0; } /* Try to figure out what type of I2c we have */ static int i2c_type_bootmode(struct edgeport_serial *serial) { struct device *dev = &serial->serial->dev->dev; int status; u8 *data; data = kmalloc(1, GFP_KERNEL); if (!data) { dev_err(dev, "%s - out of memory\n", __func__); return -ENOMEM; } /* Try to read type 2 */ status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01); if (status) dev_dbg(dev, "%s - read 2 status error = %d\n", __func__, status); else dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data); if ((!status) && (*data == UMP5152 || *data == UMP3410)) { dev_dbg(dev, "%s - ROM_TYPE_II\n", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; goto out; } /* Try to read type 3 */ status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01); if (status) dev_dbg(dev, "%s - read 3 status error = %d\n", __func__, status); else dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data); if ((!status) && (*data == UMP5152 || *data == UMP3410)) { dev_dbg(dev, "%s - ROM_TYPE_III\n", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III; goto out; } dev_dbg(dev, "%s - Unknown\n", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; status = -ENODEV; out: kfree(data); return status; } static int bulk_xfer(struct usb_serial *serial, void *buffer, int length, int *num_sent) { int status; status = usb_bulk_msg(serial->dev, usb_sndbulkpipe(serial->dev, serial->port[0]->bulk_out_endpointAddress), buffer, length, num_sent, 1000); return status; } /* Download given firmware image to the device (IN BOOT MODE) */ static int download_code(struct edgeport_serial *serial, __u8 *image, int image_length) { int status = 0; int pos; int transfer; int done; /* Transfer firmware image */ for (pos = 0; pos < image_length; ) { /* Read the next buffer from file */ transfer = image_length - pos; if (transfer > EDGE_FW_BULK_MAX_PACKET_SIZE) transfer = EDGE_FW_BULK_MAX_PACKET_SIZE; /* Transfer data */ status = bulk_xfer(serial->serial, &image[pos], transfer, &done); if (status) break; /* Advance buffer pointer */ pos += done; } return status; } /* FIXME!!! */ static int config_boot_dev(struct usb_device *dev) { return 0; } static int ti_cpu_rev(struct edge_ti_manuf_descriptor *desc) { return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev); } /** * DownloadTIFirmware - Download run-time operating firmware to the TI5052 * * This routine downloads the main operating code into the TI5052, using the * boot code already burned into E2PROM or ROM. */ static int download_fw(struct edgeport_serial *serial) { struct device *dev = &serial->serial->dev->dev; int status = 0; int start_address; struct edge_ti_manuf_descriptor *ti_manuf_desc; struct usb_interface_descriptor *interface; int download_cur_ver; int download_new_ver; /* This routine is entered by both the BOOT mode and the Download mode * We can determine which code is running by the reading the config * descriptor and if we have only one bulk pipe it is in boot mode */ serial->product_info.hardware_type = HARDWARE_TYPE_TIUMP; /* Default to type 2 i2c */ serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; status = choose_config(serial->serial->dev); if (status) return status; interface = &serial->serial->interface->cur_altsetting->desc; if (!interface) { dev_err(dev, "%s - no interface set, error!\n", __func__); return -ENODEV; } /* * Setup initial mode -- the default mode 0 is TI_MODE_CONFIGURING * if we have more than one endpoint we are definitely in download * mode */ if (interface->bNumEndpoints > 1) serial->product_info.TiMode = TI_MODE_DOWNLOAD; else /* Otherwise we will remain in configuring mode */ serial->product_info.TiMode = TI_MODE_CONFIGURING; /********************************************************************/ /* Download Mode */ /********************************************************************/ if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) { struct ti_i2c_desc *rom_desc; dev_dbg(dev, "%s - RUNNING IN DOWNLOAD MODE\n", __func__); status = check_i2c_image(serial); if (status) { dev_dbg(dev, "%s - DOWNLOAD MODE -- BAD I2C\n", __func__); return status; } /* Validate Hardware version number * Read Manufacturing Descriptor from TI Based Edgeport */ ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL); if (!ti_manuf_desc) { dev_err(dev, "%s - out of memory.\n", __func__); return -ENOMEM; } status = get_manuf_info(serial, (__u8 *)ti_manuf_desc); if (status) { kfree(ti_manuf_desc); return status; } /* Check version number of ION descriptor */ if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) { dev_dbg(dev, "%s - Wrong CPU Rev %d (Must be 2)\n", __func__, ti_cpu_rev(ti_manuf_desc)); kfree(ti_manuf_desc); return -EINVAL; } rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) { dev_err(dev, "%s - out of memory.\n", __func__); kfree(ti_manuf_desc); return -ENOMEM; } /* Search for type 2 record (firmware record) */ start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc); if (start_address != 0) { struct ti_i2c_firmware_rec *firmware_version; u8 *record; dev_dbg(dev, "%s - Found Type FIRMWARE (Type 2) record\n", __func__); firmware_version = kmalloc(sizeof(*firmware_version), GFP_KERNEL); if (!firmware_version) { dev_err(dev, "%s - out of memory.\n", __func__); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } /* Validate version number * Read the descriptor data */ status = read_rom(serial, start_address + sizeof(struct ti_i2c_desc), sizeof(struct ti_i2c_firmware_rec), (__u8 *)firmware_version); if (status) { kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } /* Check version number of download with current version in I2c */ download_cur_ver = (firmware_version->Ver_Major << 8) + (firmware_version->Ver_Minor); download_new_ver = (OperationalMajorVersion << 8) + (OperationalMinorVersion); dev_dbg(dev, "%s - >> FW Versions Device %d.%d Driver %d.%d\n", __func__, firmware_version->Ver_Major, firmware_version->Ver_Minor, OperationalMajorVersion, OperationalMinorVersion); /* Check if we have an old version in the I2C and update if necessary */ if (download_cur_ver < download_new_ver) { dev_dbg(dev, "%s - Update I2C dld from %d.%d to %d.%d\n", __func__, firmware_version->Ver_Major, firmware_version->Ver_Minor, OperationalMajorVersion, OperationalMinorVersion); record = kmalloc(1, GFP_KERNEL); if (!record) { dev_err(dev, "%s - out of memory.\n", __func__); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } /* In order to update the I2C firmware we must * change the type 2 record to type 0xF2. This * will force the UMP to come up in Boot Mode. * Then while in boot mode, the driver will * download the latest firmware (padded to * 15.5k) into the UMP ram. Finally when the * device comes back up in download mode the * driver will cause the new firmware to be * copied from the UMP Ram to I2C and the * firmware will update the record type from * 0xf2 to 0x02. */ *record = I2C_DESC_TYPE_FIRMWARE_BLANK; /* Change the I2C Firmware record type to 0xf2 to trigger an update */ status = write_rom(serial, start_address, sizeof(*record), record); if (status) { kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } /* verify the write -- must do this in order * for write to complete before we do the * hardware reset */ status = read_rom(serial, start_address, sizeof(*record), record); if (status) { kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) { dev_err(dev, "%s - error resetting device\n", __func__); kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENODEV; } dev_dbg(dev, "%s - HARDWARE RESET\n", __func__); /* Reset UMP -- Back to BOOT MODE */ status = ti_vsend_sync(serial->serial->dev, UMPC_HARDWARE_RESET, 0, 0, NULL, 0); dev_dbg(dev, "%s - HARDWARE RESET return %d\n", __func__, status); /* return an error on purpose. */ kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENODEV; } kfree(firmware_version); } /* Search for type 0xF2 record (firmware blank record) */ else if ((start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_FIRMWARE_BLANK, rom_desc)) != 0) { #define HEADER_SIZE (sizeof(struct ti_i2c_desc) + \ sizeof(struct ti_i2c_firmware_rec)) __u8 *header; __u8 *vheader; header = kmalloc(HEADER_SIZE, GFP_KERNEL); if (!header) { dev_err(dev, "%s - out of memory.\n", __func__); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } vheader = kmalloc(HEADER_SIZE, GFP_KERNEL); if (!vheader) { dev_err(dev, "%s - out of memory.\n", __func__); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } dev_dbg(dev, "%s - Found Type BLANK FIRMWARE (Type F2) record\n", __func__); /* * In order to update the I2C firmware we must change * the type 2 record to type 0xF2. This will force the * UMP to come up in Boot Mode. Then while in boot * mode, the driver will download the latest firmware * (padded to 15.5k) into the UMP ram. Finally when the * device comes back up in download mode the driver * will cause the new firmware to be copied from the * UMP Ram to I2C and the firmware will update the * record type from 0xf2 to 0x02. */ status = build_i2c_fw_hdr(header, dev); if (status) { kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } /* Update I2C with type 0xf2 record with correct size and checksum */ status = write_rom(serial, start_address, HEADER_SIZE, header); if (status) { kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } /* verify the write -- must do this in order for write to complete before we do the hardware reset */ status = read_rom(serial, start_address, HEADER_SIZE, vheader); if (status) { dev_dbg(dev, "%s - can't read header back\n", __func__); kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return status; } if (memcmp(vheader, header, HEADER_SIZE)) { dev_dbg(dev, "%s - write download record failed\n", __func__); kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } kfree(vheader); kfree(header); dev_dbg(dev, "%s - Start firmware update\n", __func__); /* Tell firmware to copy download image into I2C */ status = ti_vsend_sync(serial->serial->dev, UMPC_COPY_DNLD_TO_I2C, 0, 0, NULL, 0); dev_dbg(dev, "%s - Update complete 0x%x\n", __func__, status); if (status) { dev_err(dev, "%s - UMPC_COPY_DNLD_TO_I2C failed\n", __func__); kfree(rom_desc); kfree(ti_manuf_desc); return status; } } // The device is running the download code kfree(rom_desc); kfree(ti_manuf_desc); return 0; } /********************************************************************/ /* Boot Mode */ /********************************************************************/ dev_dbg(dev, "%s - RUNNING IN BOOT MODE\n", __func__); /* Configure the TI device so we can use the BULK pipes for download */ status = config_boot_dev(serial->serial->dev); if (status) return status; if (le16_to_cpu(serial->serial->dev->descriptor.idVendor) != USB_VENDOR_ID_ION) { dev_dbg(dev, "%s - VID = 0x%x\n", __func__, le16_to_cpu(serial->serial->dev->descriptor.idVendor)); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; goto stayinbootmode; } /* We have an ION device (I2c Must be programmed) Determine I2C image type */ if (i2c_type_bootmode(serial)) goto stayinbootmode; /* Check for ION Vendor ID and that the I2C is valid */ if (!check_i2c_image(serial)) { struct ti_i2c_image_header *header; int i; __u8 cs = 0; __u8 *buffer; int buffer_size; int err; const struct firmware *fw; const char *fw_name = "edgeport/down3.bin"; /* Validate Hardware version number * Read Manufacturing Descriptor from TI Based Edgeport */ ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL); if (!ti_manuf_desc) { dev_err(dev, "%s - out of memory.\n", __func__); return -ENOMEM; } status = get_manuf_info(serial, (__u8 *)ti_manuf_desc); if (status) { kfree(ti_manuf_desc); goto stayinbootmode; } /* Check for version 2 */ if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) { dev_dbg(dev, "%s - Wrong CPU Rev %d (Must be 2)\n", __func__, ti_cpu_rev(ti_manuf_desc)); kfree(ti_manuf_desc); goto stayinbootmode; } kfree(ti_manuf_desc); /* * In order to update the I2C firmware we must change the type * 2 record to type 0xF2. This will force the UMP to come up * in Boot Mode. Then while in boot mode, the driver will * download the latest firmware (padded to 15.5k) into the * UMP ram. Finally when the device comes back up in download * mode the driver will cause the new firmware to be copied * from the UMP Ram to I2C and the firmware will update the * record type from 0xf2 to 0x02. * * Do we really have to copy the whole firmware image, * or could we do this in place! */ /* Allocate a 15.5k buffer + 3 byte header */ buffer_size = (((1024 * 16) - 512) + sizeof(struct ti_i2c_image_header)); buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) { dev_err(dev, "%s - out of memory\n", __func__); return -ENOMEM; } /* Initialize the buffer to 0xff (pad the buffer) */ memset(buffer, 0xff, buffer_size); err = request_firmware(&fw, fw_name, dev); if (err) { dev_err(dev, "Failed to load image \"%s\" err %d\n", fw_name, err); kfree(buffer); return err; } memcpy(buffer, &fw->data[4], fw->size - 4); release_firmware(fw); for (i = sizeof(struct ti_i2c_image_header); i < buffer_size; i++) { cs = (__u8)(cs + buffer[i]); } header = (struct ti_i2c_image_header *)buffer; /* update length and checksum after padding */ header->Length = cpu_to_le16((__u16)(buffer_size - sizeof(struct ti_i2c_image_header))); header->CheckSum = cs; /* Download the operational code */ dev_dbg(dev, "%s - Downloading operational code image (TI UMP)\n", __func__); status = download_code(serial, buffer, buffer_size); kfree(buffer); if (status) { dev_dbg(dev, "%s - Error downloading operational code image\n", __func__); return status; } /* Device will reboot */ serial->product_info.TiMode = TI_MODE_TRANSITIONING; dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__); /* return an error on purpose */ return -ENODEV; } stayinbootmode: /* Eprom is invalid or blank stay in boot mode */ dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__); serial->product_info.TiMode = TI_MODE_BOOT; return 0; } static int ti_do_config(struct edgeport_port *port, int feature, int on) { int port_number = port->port->number - port->port->serial->minor; on = !!on; /* 1 or 0 not bitmask */ return send_cmd(port->port->serial->dev, feature, (__u8)(UMPM_UART1_PORT + port_number), on, NULL, 0); } static int restore_mcr(struct edgeport_port *port, __u8 mcr) { int status = 0; dev_dbg(&port->port->dev, "%s - %x\n", __func__, mcr); status = ti_do_config(port, UMPC_SET_CLR_DTR, mcr & MCR_DTR); if (status) return status; status = ti_do_config(port, UMPC_SET_CLR_RTS, mcr & MCR_RTS); if (status) return status; return ti_do_config(port, UMPC_SET_CLR_LOOPBACK, mcr & MCR_LOOPBACK); } /* Convert TI LSR to standard UART flags */ static __u8 map_line_status(__u8 ti_lsr) { __u8 lsr = 0; #define MAP_FLAG(flagUmp, flagUart) \ if (ti_lsr & flagUmp) \ lsr |= flagUart; MAP_FLAG(UMP_UART_LSR_OV_MASK, LSR_OVER_ERR) /* overrun */ MAP_FLAG(UMP_UART_LSR_PE_MASK, LSR_PAR_ERR) /* parity error */ MAP_FLAG(UMP_UART_LSR_FE_MASK, LSR_FRM_ERR) /* framing error */ MAP_FLAG(UMP_UART_LSR_BR_MASK, LSR_BREAK) /* break detected */ MAP_FLAG(UMP_UART_LSR_RX_MASK, LSR_RX_AVAIL) /* rx data available */ MAP_FLAG(UMP_UART_LSR_TX_MASK, LSR_TX_EMPTY) /* tx hold reg empty */ #undef MAP_FLAG return lsr; } static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr) { struct async_icount *icount; struct tty_struct *tty; dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, msr); if (msr & (EDGEPORT_MSR_DELTA_CTS | EDGEPORT_MSR_DELTA_DSR | EDGEPORT_MSR_DELTA_RI | EDGEPORT_MSR_DELTA_CD)) { icount = &edge_port->icount; /* update input line counters */ if (msr & EDGEPORT_MSR_DELTA_CTS) icount->cts++; if (msr & EDGEPORT_MSR_DELTA_DSR) icount->dsr++; if (msr & EDGEPORT_MSR_DELTA_CD) icount->dcd++; if (msr & EDGEPORT_MSR_DELTA_RI) icount->rng++; wake_up_interruptible(&edge_port->delta_msr_wait); } /* Save the new modem status */ edge_port->shadow_msr = msr & 0xf0; tty = tty_port_tty_get(&edge_port->port->port); /* handle CTS flow control */ if (tty && C_CRTSCTS(tty)) { if (msr & EDGEPORT_MSR_CTS) { tty->hw_stopped = 0; tty_wakeup(tty); } else { tty->hw_stopped = 1; } } tty_kref_put(tty); } static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data, __u8 lsr, __u8 data) { struct async_icount *icount; __u8 new_lsr = (__u8)(lsr & (__u8)(LSR_OVER_ERR | LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK)); struct tty_struct *tty; dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, new_lsr); edge_port->shadow_lsr = lsr; if (new_lsr & LSR_BREAK) /* * Parity and Framing errors only count if they * occur exclusive of a break being received. */ new_lsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK); /* Place LSR data byte into Rx buffer */ if (lsr_data) { tty = tty_port_tty_get(&edge_port->port->port); if (tty) { edge_tty_recv(&edge_port->port->dev, tty, &data, 1); tty_kref_put(tty); } } /* update input line counters */ icount = &edge_port->icount; if (new_lsr & LSR_BREAK) icount->brk++; if (new_lsr & LSR_OVER_ERR) icount->overrun++; if (new_lsr & LSR_PAR_ERR) icount->parity++; if (new_lsr & LSR_FRM_ERR) icount->frame++; } static void edge_interrupt_callback(struct urb *urb) { struct edgeport_serial *edge_serial = urb->context; struct usb_serial_port *port; struct edgeport_port *edge_port; struct device *dev; unsigned char *data = urb->transfer_buffer; int length = urb->actual_length; int port_number; int function; int retval; __u8 lsr; __u8 msr; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_err(&urb->dev->dev, "%s - nonzero urb status received: " "%d\n", __func__, status); goto exit; } if (!length) { dev_dbg(&urb->dev->dev, "%s - no data in urb\n", __func__); goto exit; } dev = &edge_serial->serial->dev->dev; usb_serial_debug_data(dev, __func__, length, data); if (length != 2) { dev_dbg(dev, "%s - expecting packet of size 2, got %d\n", __func__, length); goto exit; } port_number = TIUMP_GET_PORT_FROM_CODE(data[0]); function = TIUMP_GET_FUNC_FROM_CODE(data[0]); dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, port_number, function, data[1]); port = edge_serial->serial->port[port_number]; edge_port = usb_get_serial_port_data(port); if (!edge_port) { dev_dbg(dev, "%s - edge_port not found\n", __func__); return; } switch (function) { case TIUMP_INTERRUPT_CODE_LSR: lsr = map_line_status(data[1]); if (lsr & UMP_UART_LSR_DATA_MASK) { /* Save the LSR event for bulk read completion routine */ dev_dbg(dev, "%s - LSR Event Port %u LSR Status = %02x\n", __func__, port_number, lsr); edge_port->lsr_event = 1; edge_port->lsr_mask = lsr; } else { dev_dbg(dev, "%s - ===== Port %d LSR Status = %02x ======\n", __func__, port_number, lsr); handle_new_lsr(edge_port, 0, lsr, 0); } break; case TIUMP_INTERRUPT_CODE_MSR: /* MSR */ /* Copy MSR from UMP */ msr = data[1]; dev_dbg(dev, "%s - ===== Port %u MSR Status = %02x ======\n", __func__, port_number, msr); handle_new_msr(edge_port, msr); break; default: dev_err(&urb->dev->dev, "%s - Unknown Interrupt code from UMP %x\n", __func__, data[1]); break; } exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static void edge_bulk_in_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; struct device *dev = &edge_port->port->dev; unsigned char *data = urb->transfer_buffer; struct tty_struct *tty; int retval = 0; int port_number; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_err(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n", __func__, status); } if (status == -EPIPE) goto exit; if (status) { dev_err(&urb->dev->dev, "%s - stopping read!\n", __func__); return; } port_number = edge_port->port->number - edge_port->port->serial->minor; if (edge_port->lsr_event) { edge_port->lsr_event = 0; dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", __func__, port_number, edge_port->lsr_mask, *data); handle_new_lsr(edge_port, 1, edge_port->lsr_mask, *data); /* Adjust buffer length/pointer */ --urb->actual_length; ++data; } tty = tty_port_tty_get(&edge_port->port->port); if (tty && urb->actual_length) { usb_serial_debug_data(dev, __func__, urb->actual_length, data); if (edge_port->close_pending) dev_dbg(dev, "%s - close pending, dropping data on the floor\n", __func__); else edge_tty_recv(dev, tty, data, urb->actual_length); edge_port->icount.rx += urb->actual_length; } tty_kref_put(tty); exit: /* continue read unless stopped */ spin_lock(&edge_port->ep_lock); if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) retval = usb_submit_urb(urb, GFP_ATOMIC); else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING) edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED; spin_unlock(&edge_port->ep_lock); if (retval) dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static void edge_tty_recv(struct device *dev, struct tty_struct *tty, unsigned char *data, int length) { int queued; queued = tty_insert_flip_string(tty, data, length); if (queued < length) dev_err(dev, "%s - dropping data, %d bytes lost\n", __func__, length - queued); tty_flip_buffer_push(tty); } static void edge_bulk_out_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status = urb->status; struct tty_struct *tty; edge_port->ep_write_urb_in_use = 0; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_err_console(port, "%s - nonzero write bulk status " "received: %d\n", __func__, status); } /* send any buffered data */ tty = tty_port_tty_get(&port->port); edge_send(tty); tty_kref_put(tty); } static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct edgeport_serial *edge_serial; struct usb_device *dev; struct urb *urb; int port_number; int status; u16 open_settings; u8 transaction_timeout; if (edge_port == NULL) return -ENODEV; port_number = port->number - port->serial->minor; switch (port_number) { case 0: edge_port->uart_base = UMPMEM_BASE_UART1; edge_port->dma_address = UMPD_OEDB1_ADDRESS; break; case 1: edge_port->uart_base = UMPMEM_BASE_UART2; edge_port->dma_address = UMPD_OEDB2_ADDRESS; break; default: dev_err(&port->dev, "Unknown port number!!!\n"); return -ENODEV; } dev_dbg(&port->dev, "%s - port_number = %d, uart_base = %04x, dma_address = %04x\n", __func__, port_number, edge_port->uart_base, edge_port->dma_address); dev = port->serial->dev; memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); init_waitqueue_head(&edge_port->delta_msr_wait); /* turn off loopback */ status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); if (status) { dev_err(&port->dev, "%s - cannot send clear loopback command, %d\n", __func__, status); return status; } /* set up the port settings */ if (tty) edge_set_termios(tty, port, &tty->termios); /* open up the port */ /* milliseconds to timeout for DMA transfer */ transaction_timeout = 2; edge_port->ump_read_timeout = max(20, ((transaction_timeout * 3) / 2)); /* milliseconds to timeout for DMA transfer */ open_settings = (u8)(UMP_DMA_MODE_CONTINOUS | UMP_PIPE_TRANS_TIMEOUT_ENA | (transaction_timeout << 2)); dev_dbg(&port->dev, "%s - Sending UMPC_OPEN_PORT\n", __func__); /* Tell TI to open and start the port */ status = send_cmd(dev, UMPC_OPEN_PORT, (u8)(UMPM_UART1_PORT + port_number), open_settings, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send open command, %d\n", __func__, status); return status; } /* Start the DMA? */ status = send_cmd(dev, UMPC_START_PORT, (u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send start DMA command, %d\n", __func__, status); return status; } /* Clear TX and RX buffers in UMP */ status = purge_port(port, UMP_PORT_DIR_OUT | UMP_PORT_DIR_IN); if (status) { dev_err(&port->dev, "%s - cannot send clear buffers command, %d\n", __func__, status); return status; } /* Read Initial MSR */ status = ti_vread_sync(dev, UMPC_READ_MSR, 0, (__u16)(UMPM_UART1_PORT + port_number), &edge_port->shadow_msr, 1); if (status) { dev_err(&port->dev, "%s - cannot send read MSR command, %d\n", __func__, status); return status; } dev_dbg(&port->dev, "ShadowMSR 0x%X\n", edge_port->shadow_msr); /* Set Initial MCR */ edge_port->shadow_mcr = MCR_RTS | MCR_DTR; dev_dbg(&port->dev, "ShadowMCR 0x%X\n", edge_port->shadow_mcr); edge_serial = edge_port->edge_serial; if (mutex_lock_interruptible(&edge_serial->es_lock)) return -ERESTARTSYS; if (edge_serial->num_ports_open == 0) { /* we are the first port to open, post the interrupt urb */ urb = edge_serial->serial->port[0]->interrupt_in_urb; if (!urb) { dev_err(&port->dev, "%s - no interrupt urb present, exiting\n", __func__); status = -EINVAL; goto release_es_lock; } urb->context = edge_serial; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - usb_submit_urb failed with value %d\n", __func__, status); goto release_es_lock; } } /* * reset the data toggle on the bulk endpoints to work around bug in * host controllers where things get out of sync some times */ usb_clear_halt(dev, port->write_urb->pipe); usb_clear_halt(dev, port->read_urb->pipe); /* start up our bulk read urb */ urb = port->read_urb; if (!urb) { dev_err(&port->dev, "%s - no read urb present, exiting\n", __func__); status = -EINVAL; goto unlink_int_urb; } edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING; urb->context = edge_port; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - read bulk usb_submit_urb failed with value %d\n", __func__, status); goto unlink_int_urb; } ++edge_serial->num_ports_open; goto release_es_lock; unlink_int_urb: if (edge_port->edge_serial->num_ports_open == 0) usb_kill_urb(port->serial->port[0]->interrupt_in_urb); release_es_lock: mutex_unlock(&edge_serial->es_lock); return status; } static void edge_close(struct usb_serial_port *port) { struct edgeport_serial *edge_serial; struct edgeport_port *edge_port; struct usb_serial *serial = port->serial; int port_number; edge_serial = usb_get_serial_data(port->serial); edge_port = usb_get_serial_port_data(port); if (edge_serial == NULL || edge_port == NULL) return; /* The bulkreadcompletion routine will check * this flag and dump add read data */ edge_port->close_pending = 1; /* chase the port close and flush */ chase_port(edge_port, (HZ * closing_wait) / 100, 1); usb_kill_urb(port->read_urb); usb_kill_urb(port->write_urb); edge_port->ep_write_urb_in_use = 0; /* assuming we can still talk to the device, * send a close port command to it */ dev_dbg(&port->dev, "%s - send umpc_close_port\n", __func__); port_number = port->number - port->serial->minor; mutex_lock(&serial->disc_mutex); if (!serial->disconnected) { send_cmd(serial->dev, UMPC_CLOSE_PORT, (__u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0); } mutex_unlock(&serial->disc_mutex); mutex_lock(&edge_serial->es_lock); --edge_port->edge_serial->num_ports_open; if (edge_port->edge_serial->num_ports_open <= 0) { /* last port is now closed, let's shut down our interrupt urb */ usb_kill_urb(port->serial->port[0]->interrupt_in_urb); edge_port->edge_serial->num_ports_open = 0; } mutex_unlock(&edge_serial->es_lock); edge_port->close_pending = 0; } static int edge_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); if (count == 0) { dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__); return 0; } if (edge_port == NULL) return -ENODEV; if (edge_port->close_pending == 1) return -ENODEV; count = kfifo_in_locked(&edge_port->write_fifo, data, count, &edge_port->ep_lock); edge_send(tty); return count; } static void edge_send(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; int count, result; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_write_urb_in_use) { spin_unlock_irqrestore(&edge_port->ep_lock, flags); return; } count = kfifo_out(&edge_port->write_fifo, port->write_urb->transfer_buffer, port->bulk_out_size); if (count == 0) { spin_unlock_irqrestore(&edge_port->ep_lock, flags); return; } edge_port->ep_write_urb_in_use = 1; spin_unlock_irqrestore(&edge_port->ep_lock, flags); usb_serial_debug_data(&port->dev, __func__, count, port->write_urb->transfer_buffer); /* set up our urb */ port->write_urb->transfer_buffer_length = count; /* send the data out the bulk port */ result = usb_submit_urb(port->write_urb, GFP_ATOMIC); if (result) { dev_err_console(port, "%s - failed submitting write urb, error %d\n", __func__, result); edge_port->ep_write_urb_in_use = 0; /* TODO: reschedule edge_send */ } else edge_port->icount.tx += count; /* wakeup any process waiting for writes to complete */ /* there is now more room in the buffer for new writes */ if (tty) tty_wakeup(tty); } static int edge_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int room = 0; unsigned long flags; if (edge_port == NULL) return 0; if (edge_port->close_pending == 1) return 0; spin_lock_irqsave(&edge_port->ep_lock, flags); room = kfifo_avail(&edge_port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); dev_dbg(&port->dev, "%s - returns %d\n", __func__, room); return room; } static int edge_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; if (edge_port == NULL) return 0; if (edge_port->close_pending == 1) return 0; spin_lock_irqsave(&edge_port->ep_lock, flags); chars = kfifo_len(&edge_port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); return chars; } static void edge_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; if (edge_port == NULL) return; /* if we are implementing XON/XOFF, send the stop character */ if (I_IXOFF(tty)) { unsigned char stop_char = STOP_CHAR(tty); status = edge_write(tty, port, &stop_char, 1); if (status <= 0) { dev_err(&port->dev, "%s - failed to write stop character, %d\n", __func__, status); } } /* if we are implementing RTS/CTS, stop reads */ /* and the Edgeport will clear the RTS line */ if (C_CRTSCTS(tty)) stop_read(edge_port); } static void edge_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; if (edge_port == NULL) return; /* if we are implementing XON/XOFF, send the start character */ if (I_IXOFF(tty)) { unsigned char start_char = START_CHAR(tty); status = edge_write(tty, port, &start_char, 1); if (status <= 0) { dev_err(&port->dev, "%s - failed to write start character, %d\n", __func__, status); } } /* if we are implementing RTS/CTS, restart reads */ /* are the Edgeport will assert the RTS line */ if (C_CRTSCTS(tty)) { status = restart_read(edge_port); if (status) dev_err(&port->dev, "%s - read bulk usb_submit_urb failed: %d\n", __func__, status); } } static void stop_read(struct edgeport_port *edge_port) { unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPING; edge_port->shadow_mcr &= ~MCR_RTS; spin_unlock_irqrestore(&edge_port->ep_lock, flags); } static int restart_read(struct edgeport_port *edge_port) { struct urb *urb; int status = 0; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPED) { urb = edge_port->port->read_urb; status = usb_submit_urb(urb, GFP_ATOMIC); } edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING; edge_port->shadow_mcr |= MCR_RTS; spin_unlock_irqrestore(&edge_port->ep_lock, flags); return status; } static void change_port_settings(struct tty_struct *tty, struct edgeport_port *edge_port, struct ktermios *old_termios) { struct device *dev = &edge_port->port->dev; struct ump_uart_config *config; int baud; unsigned cflag; int status; int port_number = edge_port->port->number - edge_port->port->serial->minor; dev_dbg(dev, "%s - port %d\n", __func__, edge_port->port->number); config = kmalloc (sizeof (*config), GFP_KERNEL); if (!config) { tty->termios = *old_termios; dev_err(dev, "%s - out of memory\n", __func__); return; } cflag = tty->termios.c_cflag; config->wFlags = 0; /* These flags must be set */ config->wFlags |= UMP_MASK_UART_FLAGS_RECEIVE_MS_INT; config->wFlags |= UMP_MASK_UART_FLAGS_AUTO_START_ON_ERR; config->bUartMode = (__u8)(edge_port->bUartMode); switch (cflag & CSIZE) { case CS5: config->bDataBits = UMP_UART_CHAR5BITS; dev_dbg(dev, "%s - data bits = 5\n", __func__); break; case CS6: config->bDataBits = UMP_UART_CHAR6BITS; dev_dbg(dev, "%s - data bits = 6\n", __func__); break; case CS7: config->bDataBits = UMP_UART_CHAR7BITS; dev_dbg(dev, "%s - data bits = 7\n", __func__); break; default: case CS8: config->bDataBits = UMP_UART_CHAR8BITS; dev_dbg(dev, "%s - data bits = 8\n", __func__); break; } if (cflag & PARENB) { if (cflag & PARODD) { config->wFlags |= UMP_MASK_UART_FLAGS_PARITY; config->bParity = UMP_UART_ODDPARITY; dev_dbg(dev, "%s - parity = odd\n", __func__); } else { config->wFlags |= UMP_MASK_UART_FLAGS_PARITY; config->bParity = UMP_UART_EVENPARITY; dev_dbg(dev, "%s - parity = even\n", __func__); } } else { config->bParity = UMP_UART_NOPARITY; dev_dbg(dev, "%s - parity = none\n", __func__); } if (cflag & CSTOPB) { config->bStopBits = UMP_UART_STOPBIT2; dev_dbg(dev, "%s - stop bits = 2\n", __func__); } else { config->bStopBits = UMP_UART_STOPBIT1; dev_dbg(dev, "%s - stop bits = 1\n", __func__); } /* figure out the flow control settings */ if (cflag & CRTSCTS) { config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X_CTS_FLOW; config->wFlags |= UMP_MASK_UART_FLAGS_RTS_FLOW; dev_dbg(dev, "%s - RTS/CTS is enabled\n", __func__); } else { dev_dbg(dev, "%s - RTS/CTS is disabled\n", __func__); tty->hw_stopped = 0; restart_read(edge_port); } /* if we are implementing XON/XOFF, set the start and stop character in the device */ config->cXon = START_CHAR(tty); config->cXoff = STOP_CHAR(tty); /* if we are implementing INBOUND XON/XOFF */ if (I_IXOFF(tty)) { config->wFlags |= UMP_MASK_UART_FLAGS_IN_X; dev_dbg(dev, "%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n", __func__, config->cXon, config->cXoff); } else dev_dbg(dev, "%s - INBOUND XON/XOFF is disabled\n", __func__); /* if we are implementing OUTBOUND XON/XOFF */ if (I_IXON(tty)) { config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X; dev_dbg(dev, "%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n", __func__, config->cXon, config->cXoff); } else dev_dbg(dev, "%s - OUTBOUND XON/XOFF is disabled\n", __func__); tty->termios.c_cflag &= ~CMSPAR; /* Round the baud rate */ baud = tty_get_baud_rate(tty); if (!baud) { /* pick a default, any default... */ baud = 9600; } else tty_encode_baud_rate(tty, baud, baud); edge_port->baud_rate = baud; config->wBaudRate = (__u16)((461550L + baud/2) / baud); /* FIXME: Recompute actual baud from divisor here */ dev_dbg(dev, "%s - baud rate = %d, wBaudRate = %d\n", __func__, baud, config->wBaudRate); dev_dbg(dev, "wBaudRate: %d\n", (int)(461550L / config->wBaudRate)); dev_dbg(dev, "wFlags: 0x%x\n", config->wFlags); dev_dbg(dev, "bDataBits: %d\n", config->bDataBits); dev_dbg(dev, "bParity: %d\n", config->bParity); dev_dbg(dev, "bStopBits: %d\n", config->bStopBits); dev_dbg(dev, "cXon: %d\n", config->cXon); dev_dbg(dev, "cXoff: %d\n", config->cXoff); dev_dbg(dev, "bUartMode: %d\n", config->bUartMode); /* move the word values into big endian mode */ cpu_to_be16s(&config->wFlags); cpu_to_be16s(&config->wBaudRate); status = send_cmd(edge_port->port->serial->dev, UMPC_SET_CONFIG, (__u8)(UMPM_UART1_PORT + port_number), 0, (__u8 *)config, sizeof(*config)); if (status) dev_dbg(dev, "%s - error %d when trying to write config to device\n", __func__, status); kfree(config); } static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int cflag; cflag = tty->termios.c_cflag; dev_dbg(&port->dev, "%s - clfag %08x iflag %08x\n", __func__, tty->termios.c_cflag, tty->termios.c_iflag); dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__, old_termios->c_cflag, old_termios->c_iflag); dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number); if (edge_port == NULL) return; /* change the port settings to the new ones specified */ change_port_settings(tty, edge_port, old_termios); } static int edge_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int mcr; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); mcr = edge_port->shadow_mcr; if (set & TIOCM_RTS) mcr |= MCR_RTS; if (set & TIOCM_DTR) mcr |= MCR_DTR; if (set & TIOCM_LOOP) mcr |= MCR_LOOPBACK; if (clear & TIOCM_RTS) mcr &= ~MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~MCR_LOOPBACK; edge_port->shadow_mcr = mcr; spin_unlock_irqrestore(&edge_port->ep_lock, flags); restore_mcr(edge_port, mcr); return 0; } static int edge_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int result = 0; unsigned int msr; unsigned int mcr; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); msr = edge_port->shadow_msr; mcr = edge_port->shadow_mcr; result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */ | ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */ | ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */ | ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */ | ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */ | ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */ dev_dbg(&port->dev, "%s -- %x\n", __func__, result); spin_unlock_irqrestore(&edge_port->ep_lock, flags); return result; } static int edge_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct async_icount *ic = &edge_port->icount; icount->cts = ic->cts; icount->dsr = ic->dsr; icount->rng = ic->rng; icount->dcd = ic->dcd; icount->tx = ic->tx; icount->rx = ic->rx; icount->frame = ic->frame; icount->parity = ic->parity; icount->overrun = ic->overrun; icount->brk = ic->brk; icount->buf_overrun = ic->buf_overrun; return 0; } static int get_serial_info(struct edgeport_port *edge_port, struct serial_struct __user *retinfo) { struct serial_struct tmp; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_16550A; tmp.line = edge_port->port->serial->minor; tmp.port = edge_port->port->number; tmp.irq = 0; tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ; tmp.xmit_fifo_size = edge_port->port->bulk_out_size; tmp.baud_base = 9600; tmp.close_delay = 5*HZ; tmp.closing_wait = closing_wait; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } static int edge_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct async_icount cnow; struct async_icount cprev; dev_dbg(&port->dev, "%s - port %d, cmd = 0x%x\n", __func__, port->number, cmd); switch (cmd) { case TIOCGSERIAL: dev_dbg(&port->dev, "%s - TIOCGSERIAL\n", __func__); return get_serial_info(edge_port, (struct serial_struct __user *) arg); case TIOCMIWAIT: dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); cprev = edge_port->icount; while (1) { interruptible_sleep_on(&edge_port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; cnow = edge_port->icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) return -EIO; /* no change => error */ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { return 0; } cprev = cnow; } /* not reached */ break; } return -ENOIOCTLCMD; } static void edge_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; int bv = 0; /* Off */ /* chase the port close */ chase_port(edge_port, 0, 0); if (break_state == -1) bv = 1; /* On */ status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv); if (status) dev_dbg(&port->dev, "%s - error %d sending break set/clear command.\n", __func__, status); } static int edge_startup(struct usb_serial *serial) { struct edgeport_serial *edge_serial; int status; /* create our private serial structure */ edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); if (edge_serial == NULL) { dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__); return -ENOMEM; } mutex_init(&edge_serial->es_lock); edge_serial->serial = serial; usb_set_serial_data(serial, edge_serial); status = download_fw(edge_serial); if (status) { kfree(edge_serial); return status; } return 0; } static void edge_disconnect(struct usb_serial *serial) { } static void edge_release(struct usb_serial *serial) { kfree(usb_get_serial_data(serial)); } static int edge_port_probe(struct usb_serial_port *port) { struct edgeport_port *edge_port; int ret; edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL); if (!edge_port) return -ENOMEM; ret = kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE, GFP_KERNEL); if (ret) { kfree(edge_port); return -ENOMEM; } spin_lock_init(&edge_port->ep_lock); edge_port->port = port; edge_port->edge_serial = usb_get_serial_data(port->serial); edge_port->bUartMode = default_uart_mode; usb_set_serial_port_data(port, edge_port); ret = edge_create_sysfs_attrs(port); if (ret) { kfifo_free(&edge_port->write_fifo); kfree(edge_port); return ret; } return 0; } static int edge_port_remove(struct usb_serial_port *port) { struct edgeport_port *edge_port; edge_port = usb_get_serial_port_data(port); edge_remove_sysfs_attrs(port); kfifo_free(&edge_port->write_fifo); kfree(edge_port); return 0; } /* Sysfs Attributes */ static ssize_t show_uart_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_serial_port *port = to_usb_serial_port(dev); struct edgeport_port *edge_port = usb_get_serial_port_data(port); return sprintf(buf, "%d\n", edge_port->bUartMode); } static ssize_t store_uart_mode(struct device *dev, struct device_attribute *attr, const char *valbuf, size_t count) { struct usb_serial_port *port = to_usb_serial_port(dev); struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int v = simple_strtoul(valbuf, NULL, 0); dev_dbg(dev, "%s: setting uart_mode = %d\n", __func__, v); if (v < 256) edge_port->bUartMode = v; else dev_err(dev, "%s - uart_mode %d is invalid\n", __func__, v); return count; } static DEVICE_ATTR(uart_mode, S_IWUSR | S_IRUGO, show_uart_mode, store_uart_mode); static int edge_create_sysfs_attrs(struct usb_serial_port *port) { return device_create_file(&port->dev, &dev_attr_uart_mode); } static int edge_remove_sysfs_attrs(struct usb_serial_port *port) { device_remove_file(&port->dev, &dev_attr_uart_mode); return 0; } static struct usb_serial_driver edgeport_1port_device = { .driver = { .owner = THIS_MODULE, .name = "edgeport_ti_1", }, .description = "Edgeport TI 1 port adapter", .id_table = edgeport_1port_id_table, .num_ports = 1, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_port_probe, .port_remove = edge_port_remove, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .get_icount = edge_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_callback, }; static struct usb_serial_driver edgeport_2port_device = { .driver = { .owner = THIS_MODULE, .name = "edgeport_ti_2", }, .description = "Edgeport TI 2 port adapter", .id_table = edgeport_2port_id_table, .num_ports = 2, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_port_probe, .port_remove = edge_port_remove, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_callback, }; static struct usb_serial_driver * const serial_drivers[] = { &edgeport_1port_device, &edgeport_2port_device, NULL }; module_usb_serial_driver(serial_drivers, id_table_combined); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("edgeport/down3.bin"); module_param(closing_wait, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(closing_wait, "Maximum wait for data to drain, in .01 secs"); module_param(ignore_cpu_rev, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ignore_cpu_rev, "Ignore the cpu revision when connecting to a device"); module_param(default_uart_mode, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(default_uart_mode, "Default uart_mode, 0=RS232, ...");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5591_0
crossvul-cpp_data_good_5861_33
/* * Accelerated GHASH implementation with Intel PCLMULQDQ-NI * instructions. This file contains glue code. * * Copyright (c) 2009 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/cryptd.h> #include <crypto/gf128mul.h> #include <crypto/internal/hash.h> #include <asm/i387.h> #include <asm/cpu_device_id.h> #define GHASH_BLOCK_SIZE 16 #define GHASH_DIGEST_SIZE 16 void clmul_ghash_mul(char *dst, const u128 *shash); void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, const u128 *shash); struct ghash_async_ctx { struct cryptd_ahash *cryptd_tfm; }; struct ghash_ctx { u128 shash; }; struct ghash_desc_ctx { u8 buffer[GHASH_BLOCK_SIZE]; u32 bytes; }; static int ghash_init(struct shash_desc *desc) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); memset(dctx, 0, sizeof(*dctx)); return 0; } static int ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct ghash_ctx *ctx = crypto_shash_ctx(tfm); be128 *x = (be128 *)key; u64 a, b; if (keylen != GHASH_BLOCK_SIZE) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /* perform multiplication by 'x' in GF(2^128) */ a = be64_to_cpu(x->a); b = be64_to_cpu(x->b); ctx->shash.a = (b << 1) | (a >> 63); ctx->shash.b = (a << 1) | (b >> 63); if (a >> 63) ctx->shash.b ^= ((u64)0xc2) << 56; return 0; } static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; kernel_fpu_begin(); if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); dctx->bytes -= n; srclen -= n; while (n--) *pos++ ^= *src++; if (!dctx->bytes) clmul_ghash_mul(dst, &ctx->shash); } clmul_ghash_update(dst, src, srclen, &ctx->shash); kernel_fpu_end(); if (srclen & 0xf) { src += srclen - (srclen & 0xf); srclen &= 0xf; dctx->bytes = GHASH_BLOCK_SIZE - srclen; while (srclen--) *dst++ ^= *src++; } return 0; } static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) { u8 *dst = dctx->buffer; if (dctx->bytes) { u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); while (dctx->bytes--) *tmp++ ^= 0; kernel_fpu_begin(); clmul_ghash_mul(dst, &ctx->shash); kernel_fpu_end(); } dctx->bytes = 0; } static int ghash_final(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; ghash_flush(ctx, dctx); memcpy(dst, buf, GHASH_BLOCK_SIZE); return 0; } static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, .final = ghash_final, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), .base = { .cra_name = "__ghash", .cra_driver_name = "__ghash-pclmulqdqni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ghash_ctx), .cra_module = THIS_MODULE, }, }; static int ghash_async_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *cryptd_req = ahash_request_ctx(req); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!irq_fpu_usable()) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_init(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); desc->tfm = child; desc->flags = req->base.flags; return crypto_shash_init(desc); } } static int ghash_async_update(struct ahash_request *req) { struct ahash_request *cryptd_req = ahash_request_ctx(req); if (!irq_fpu_usable()) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_update(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); return shash_ahash_update(req, desc); } } static int ghash_async_final(struct ahash_request *req) { struct ahash_request *cryptd_req = ahash_request_ctx(req); if (!irq_fpu_usable()) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_final(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); return crypto_shash_final(desc, req->result); } } static int ghash_async_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *cryptd_req = ahash_request_ctx(req); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!irq_fpu_usable()) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_digest(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); desc->tfm = child; desc->flags = req->base.flags; return shash_ahash_digest(req, desc); } } static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_ahash *child = &ctx->cryptd_tfm->base; int err; crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(child, key, keylen); crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; } static int ghash_async_init_tfm(struct crypto_tfm *tfm) { struct cryptd_ahash *cryptd_tfm; struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni", 0, 0); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); ctx->cryptd_tfm = cryptd_tfm; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct ahash_request) + crypto_ahash_reqsize(&cryptd_tfm->base)); return 0; } static void ghash_async_exit_tfm(struct crypto_tfm *tfm) { struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); cryptd_free_ahash(ctx->cryptd_tfm); } static struct ahash_alg ghash_async_alg = { .init = ghash_async_init, .update = ghash_async_update, .final = ghash_async_final, .setkey = ghash_async_setkey, .digest = ghash_async_digest, .halg = { .digestsize = GHASH_DIGEST_SIZE, .base = { .cra_name = "ghash", .cra_driver_name = "ghash-clmulni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_type = &crypto_ahash_type, .cra_module = THIS_MODULE, .cra_init = ghash_async_init_tfm, .cra_exit = ghash_async_exit_tfm, }, }, }; static const struct x86_cpu_id pcmul_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), /* Pickle-Mickle-Duck */ {} }; MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id); static int __init ghash_pclmulqdqni_mod_init(void) { int err; if (!x86_match_cpu(pcmul_cpu_id)) return -ENODEV; err = crypto_register_shash(&ghash_alg); if (err) goto err_out; err = crypto_register_ahash(&ghash_async_alg); if (err) goto err_shash; return 0; err_shash: crypto_unregister_shash(&ghash_alg); err_out: return err; } static void __exit ghash_pclmulqdqni_mod_exit(void) { crypto_unregister_ahash(&ghash_async_alg); crypto_unregister_shash(&ghash_alg); } module_init(ghash_pclmulqdqni_mod_init); module_exit(ghash_pclmulqdqni_mod_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("GHASH Message Digest Algorithm, " "acclerated by PCLMULQDQ-NI"); MODULE_ALIAS_CRYPTO("ghash");
./CrossVul/dataset_final_sorted/CWE-264/c/good_5861_33
crossvul-cpp_data_good_891_0
/* abuild-sudo.c - limited root privileges for users in "abuild" group * * Copyright (C) 2012 Natanael Copa <ncopa@alpinelinux.org> * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. See http://www.gnu.org/ for details. */ #include <sys/types.h> #include <err.h> #include <grp.h> #include <pwd.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #ifndef ABUILD_GROUP #define ABUILD_GROUP "abuild" #endif static const char* valid_cmds[] = { "/bin/adduser", "/usr/sbin/adduser", "/bin/addgroup", "/usr/sbin/addgroup", "/sbin/apk", "/usr/bin/abuild-rmtemp", NULL }; static const char* invalid_opts[] = { "--allow-untrusted", "--keys-dir", NULL, }; const char *get_command_path(const char *cmd) { const char *p; int i; for (i = 0; valid_cmds[i] != NULL; i++) { if (access(valid_cmds[i], F_OK) == -1) continue; p = strrchr(valid_cmds[i], '/') + 1; if (strcmp(p, cmd) == 0) return valid_cmds[i]; } return NULL; } void check_option(const char *opt) { int i; for (i = 0; invalid_opts[i] != NULL; i++) if (strcmp(opt, invalid_opts[i]) == 0) errx(1, "%s: not allowed option", opt); } int is_in_group(gid_t group) { int ngroups_max = sysconf(_SC_NGROUPS_MAX) + 1; gid_t *buf = malloc(ngroups_max * sizeof(gid_t)); int ngroups; int i; if (buf == NULL) { perror("malloc"); return 0; } ngroups = getgroups(ngroups_max, buf); for (i = 0; i < ngroups; i++) { if (buf[i] == group) break; } free(buf); return i < ngroups; } int main(int argc, const char *argv[]) { struct group *grent; const char *cmd; const char *path; int i; struct passwd *pw; grent = getgrnam(ABUILD_GROUP); if (grent == NULL) errx(1, "%s: Group not found", ABUILD_GROUP); char *name = NULL; pw = getpwuid(getuid()); if (pw) name = pw->pw_name; if (!is_in_group(grent->gr_gid)) { errx(1, "User %s is not a member of group %s\n", name ? name : "(unknown)", ABUILD_GROUP); } if (name == NULL) warnx("Could not find username for uid %d\n", getuid()); setenv("USER", name ?: "", 1); cmd = strrchr(argv[0], '/'); if (cmd) cmd++; else cmd = argv[0]; cmd = strchr(cmd, '-'); if (cmd == NULL) errx(1, "Calling command has no '-'"); cmd++; path = get_command_path(cmd); if (path == NULL) errx(1, "%s: Not a valid subcommand", cmd); for (i = 1; i < argc; i++) check_option(argv[i]); argv[0] = path; /* set our uid to root so bbsuid --install works */ setuid(0); /* set our gid to root so apk commit hooks run with the same gid as for "sudo apk add ..." */ setgid(0); execv(path, (char * const*)argv); perror(path); return 1; }
./CrossVul/dataset_final_sorted/CWE-264/c/good_891_0
crossvul-cpp_data_bad_5861_0
/* * Glue Code for the asm optimized version of the AES Cipher Algorithm */ #include <linux/module.h> #include <linux/crypto.h> #include <crypto/aes.h> #include "aes_glue.h" EXPORT_SYMBOL(AES_encrypt); EXPORT_SYMBOL(AES_decrypt); EXPORT_SYMBOL(private_AES_set_encrypt_key); EXPORT_SYMBOL(private_AES_set_decrypt_key); static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct AES_CTX *ctx = crypto_tfm_ctx(tfm); AES_encrypt(src, dst, &ctx->enc_key); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct AES_CTX *ctx = crypto_tfm_ctx(tfm); AES_decrypt(src, dst, &ctx->dec_key); } static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct AES_CTX *ctx = crypto_tfm_ctx(tfm); switch (key_len) { case AES_KEYSIZE_128: key_len = 128; break; case AES_KEYSIZE_192: key_len = 192; break; case AES_KEYSIZE_256: key_len = 256; break; default: tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } if (private_AES_set_encrypt_key(in_key, key_len, &ctx->enc_key) == -1) { tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } /* private_AES_set_decrypt_key expects an encryption key as input */ ctx->dec_key = ctx->enc_key; if (private_AES_set_decrypt_key(in_key, key_len, &ctx->dec_key) == -1) { tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } return 0; } static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct AES_CTX), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, .cia_encrypt = aes_encrypt, .cia_decrypt = aes_decrypt } } }; static int __init aes_init(void) { return crypto_register_alg(&aes_alg); } static void __exit aes_fini(void) { crypto_unregister_alg(&aes_alg); } module_init(aes_init); module_exit(aes_fini); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)"); MODULE_LICENSE("GPL"); MODULE_ALIAS("aes"); MODULE_ALIAS("aes-asm"); MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
./CrossVul/dataset_final_sorted/CWE-264/c/bad_5861_0
crossvul-cpp_data_bad_2287_11
/* * "splice": joining two ropes together by interweaving their strands. * * This is the "extended pipe" functionality, where a pipe is used as * an arbitrary in-memory buffer. Think of a pipe as a small kernel * buffer that you can use to transfer data from one end to the other. * * The traditional unix read/write is extended with a "splice()" operation * that transfers data buffers to or from a pipe buffer. * * Named by Larry McVoy, original implementation from Linus, extended by * Jens to support splicing to files, network, direct splicing, etc and * fixing lots of bugs. * * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk> * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org> * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu> * */ #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/splice.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/export.h> #include <linux/syscalls.h> #include <linux/uio.h> #include <linux/security.h> #include <linux/gfp.h> #include <linux/socket.h> #include <linux/compat.h> #include "internal.h" /* * Attempt to steal a page from a pipe buffer. This should perhaps go into * a vm helper function, it's already simplified quite a bit by the * addition of remove_mapping(). If success is returned, the caller may * attempt to reuse this page for another destination. */ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; struct address_space *mapping; lock_page(page); mapping = page_mapping(page); if (mapping) { WARN_ON(!PageUptodate(page)); /* * At least for ext2 with nobh option, we need to wait on * writeback completing on this page, since we'll remove it * from the pagecache. Otherwise truncate wont wait on the * page, allowing the disk blocks to be reused by someone else * before we actually wrote our data to them. fs corruption * ensues. */ wait_on_page_writeback(page); if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) goto out_unlock; /* * If we succeeded in removing the mapping, set LRU flag * and return good. */ if (remove_mapping(mapping, page)) { buf->flags |= PIPE_BUF_FLAG_LRU; return 0; } } /* * Raced with truncate or failed to remove page from current * address space, unlock and return failure. */ out_unlock: unlock_page(page); return 1; } static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { page_cache_release(buf->page); buf->flags &= ~PIPE_BUF_FLAG_LRU; } /* * Check whether the contents of buf is OK to access. Since the content * is a page cache page, IO may be in flight. */ static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; int err; if (!PageUptodate(page)) { lock_page(page); /* * Page got truncated/unhashed. This will cause a 0-byte * splice, if this is the first page. */ if (!page->mapping) { err = -ENODATA; goto error; } /* * Uh oh, read-error from disk. */ if (!PageUptodate(page)) { err = -EIO; goto error; } /* * Page is ok afterall, we are done. */ unlock_page(page); } return 0; error: unlock_page(page); return err; } const struct pipe_buf_operations page_cache_pipe_buf_ops = { .can_merge = 0, .confirm = page_cache_pipe_buf_confirm, .release = page_cache_pipe_buf_release, .steal = page_cache_pipe_buf_steal, .get = generic_pipe_buf_get, }; static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) return 1; buf->flags |= PIPE_BUF_FLAG_LRU; return generic_pipe_buf_steal(pipe, buf); } static const struct pipe_buf_operations user_page_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = page_cache_pipe_buf_release, .steal = user_page_pipe_buf_steal, .get = generic_pipe_buf_get, }; static void wakeup_pipe_readers(struct pipe_inode_info *pipe) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } /** * splice_to_pipe - fill passed data into a pipe * @pipe: pipe to fill * @spd: data to fill * * Description: * @spd contains a map of pages and len/offset tuples, along with * the struct pipe_buf_operations associated with these pages. This * function will link that data to the pipe. * */ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { unsigned int spd_pages = spd->nr_pages; int ret, do_wakeup, page_nr; ret = 0; do_wakeup = 0; page_nr = 0; pipe_lock(pipe); for (;;) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } if (pipe->nrbufs < pipe->buffers) { int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); struct pipe_buffer *buf = pipe->bufs + newbuf; buf->page = spd->pages[page_nr]; buf->offset = spd->partial[page_nr].offset; buf->len = spd->partial[page_nr].len; buf->private = spd->partial[page_nr].private; buf->ops = spd->ops; if (spd->flags & SPLICE_F_GIFT) buf->flags |= PIPE_BUF_FLAG_GIFT; pipe->nrbufs++; page_nr++; ret += buf->len; if (pipe->files) do_wakeup = 1; if (!--spd->nr_pages) break; if (pipe->nrbufs < pipe->buffers) continue; break; } if (spd->flags & SPLICE_F_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } pipe_unlock(pipe); if (do_wakeup) wakeup_pipe_readers(pipe); while (page_nr < spd_pages) spd->spd_release(spd, page_nr++); return ret; } void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) { page_cache_release(spd->pages[i]); } /* * Check if we need to grow the arrays holding pages and partial page * descriptions. */ int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { unsigned int buffers = ACCESS_ONCE(pipe->buffers); spd->nr_pages_max = buffers; if (buffers <= PIPE_DEF_BUFFERS) return 0; spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL); spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL); if (spd->pages && spd->partial) return 0; kfree(spd->pages); kfree(spd->partial); return -ENOMEM; } void splice_shrink_spd(struct splice_pipe_desc *spd) { if (spd->nr_pages_max <= PIPE_DEF_BUFFERS) return; kfree(spd->pages); kfree(spd->partial); } static int __generic_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct address_space *mapping = in->f_mapping; unsigned int loff, nr_pages, req_pages; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct page *page; pgoff_t index, end_index; loff_t isize; int error, page_nr; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &page_cache_pipe_buf_ops, .spd_release = spd_release_page, }; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; index = *ppos >> PAGE_CACHE_SHIFT; loff = *ppos & ~PAGE_CACHE_MASK; req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; nr_pages = min(req_pages, spd.nr_pages_max); /* * Lookup the (hopefully) full range of pages we need. */ spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); index += spd.nr_pages; /* * If find_get_pages_contig() returned fewer pages than we needed, * readahead/allocate the rest and fill in the holes. */ if (spd.nr_pages < nr_pages) page_cache_sync_readahead(mapping, &in->f_ra, in, index, req_pages - spd.nr_pages); error = 0; while (spd.nr_pages < nr_pages) { /* * Page could be there, find_get_pages_contig() breaks on * the first hole. */ page = find_get_page(mapping, index); if (!page) { /* * page didn't exist, allocate one. */ page = page_cache_alloc_cold(mapping); if (!page) break; error = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (unlikely(error)) { page_cache_release(page); if (error == -EEXIST) continue; break; } /* * add_to_page_cache() locks the page, unlock it * to avoid convoluting the logic below even more. */ unlock_page(page); } spd.pages[spd.nr_pages++] = page; index++; } /* * Now loop over the map and see if we need to start IO on any * pages, fill in the partial map, etc. */ index = *ppos >> PAGE_CACHE_SHIFT; nr_pages = spd.nr_pages; spd.nr_pages = 0; for (page_nr = 0; page_nr < nr_pages; page_nr++) { unsigned int this_len; if (!len) break; /* * this_len is the max we'll use from this page */ this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); page = spd.pages[page_nr]; if (PageReadahead(page)) page_cache_async_readahead(mapping, &in->f_ra, in, page, index, req_pages - page_nr); /* * If the page isn't uptodate, we may need to start io on it */ if (!PageUptodate(page)) { lock_page(page); /* * Page was truncated, or invalidated by the * filesystem. Redo the find/create, but this time the * page is kept locked, so there's no chance of another * race with truncate/invalidate. */ if (!page->mapping) { unlock_page(page); page = find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); if (!page) { error = -ENOMEM; break; } page_cache_release(spd.pages[page_nr]); spd.pages[page_nr] = page; } /* * page was already under io and is now done, great */ if (PageUptodate(page)) { unlock_page(page); goto fill_it; } /* * need to read in the page */ error = mapping->a_ops->readpage(in, page); if (unlikely(error)) { /* * We really should re-lookup the page here, * but it complicates things a lot. Instead * lets just do what we already stored, and * we'll get it the next time we are called. */ if (error == AOP_TRUNCATED_PAGE) error = 0; break; } } fill_it: /* * i_size must be checked after PageUptodate. */ isize = i_size_read(mapping->host); end_index = (isize - 1) >> PAGE_CACHE_SHIFT; if (unlikely(!isize || index > end_index)) break; /* * if this is the last page, see if we need to shrink * the length and stop */ if (end_index == index) { unsigned int plen; /* * max good bytes in this page */ plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; if (plen <= loff) break; /* * force quit after adding this page */ this_len = min(this_len, plen - loff); len = this_len; } spd.partial[page_nr].offset = loff; spd.partial[page_nr].len = this_len; len -= this_len; loff = 0; spd.nr_pages++; index++; } /* * Release any pages at the end, if we quit early. 'page_nr' is how far * we got, 'nr_pages' is how many pages are in the map. */ while (page_nr < nr_pages) page_cache_release(spd.pages[page_nr++]); in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; if (spd.nr_pages) error = splice_to_pipe(pipe, &spd); splice_shrink_spd(&spd); return error; } /** * generic_file_splice_read - splice data from file to a pipe * @in: file to splice from * @ppos: position in @in * @pipe: pipe to splice to * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will read pages from given file and fill them into a pipe. Can be * used as long as the address_space operations for the source implements * a readpage() hook. * */ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { loff_t isize, left; int ret; isize = i_size_read(in->f_mapping->host); if (unlikely(*ppos >= isize)) return 0; left = isize - *ppos; if (unlikely(left < len)) len = left; ret = __generic_file_splice_read(in, ppos, pipe, len, flags); if (ret > 0) { *ppos += ret; file_accessed(in); } return ret; } EXPORT_SYMBOL(generic_file_splice_read); static const struct pipe_buf_operations default_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return 1; } /* Pipe buffer operations for a socket and similar. */ const struct pipe_buf_operations nosteal_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_nosteal, .get = generic_pipe_buf_get, }; EXPORT_SYMBOL(nosteal_pipe_buf_ops); static ssize_t kernel_readv(struct file *file, const struct iovec *vec, unsigned long vlen, loff_t offset) { mm_segment_t old_fs; loff_t pos = offset; ssize_t res; old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); set_fs(old_fs); return res; } ssize_t kernel_write(struct file *file, const char *buf, size_t count, loff_t pos) { mm_segment_t old_fs; ssize_t res; old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ res = vfs_write(file, (__force const char __user *)buf, count, &pos); set_fs(old_fs); return res; } EXPORT_SYMBOL(kernel_write); ssize_t default_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { unsigned int nr_pages; unsigned int nr_freed; size_t offset; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct iovec *vec, __vec[PIPE_DEF_BUFFERS]; ssize_t res; size_t this_len; int error; int i; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &default_pipe_buf_ops, .spd_release = spd_release_page, }; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; res = -ENOMEM; vec = __vec; if (spd.nr_pages_max > PIPE_DEF_BUFFERS) { vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL); if (!vec) goto shrink_ret; } offset = *ppos & ~PAGE_CACHE_MASK; nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { struct page *page; page = alloc_page(GFP_USER); error = -ENOMEM; if (!page) goto err; this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); vec[i].iov_base = (void __user *) page_address(page); vec[i].iov_len = this_len; spd.pages[i] = page; spd.nr_pages++; len -= this_len; offset = 0; } res = kernel_readv(in, vec, spd.nr_pages, *ppos); if (res < 0) { error = res; goto err; } error = 0; if (!res) goto err; nr_freed = 0; for (i = 0; i < spd.nr_pages; i++) { this_len = min_t(size_t, vec[i].iov_len, res); spd.partial[i].offset = 0; spd.partial[i].len = this_len; if (!this_len) { __free_page(spd.pages[i]); spd.pages[i] = NULL; nr_freed++; } res -= this_len; } spd.nr_pages -= nr_freed; res = splice_to_pipe(pipe, &spd); if (res > 0) *ppos += res; shrink_ret: if (vec != __vec) kfree(vec); splice_shrink_spd(&spd); return res; err: for (i = 0; i < spd.nr_pages; i++) __free_page(spd.pages[i]); res = error; goto shrink_ret; } EXPORT_SYMBOL(default_file_splice_read); /* * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' * using sendpage(). Return the number of bytes sent. */ static int pipe_to_sendpage(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct file *file = sd->u.file; loff_t pos = sd->pos; int more; if (!likely(file->f_op->sendpage)) return -EINVAL; more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; if (sd->len < sd->total_len && pipe->nrbufs > 1) more |= MSG_SENDPAGE_NOTLAST; return file->f_op->sendpage(file, buf->page, buf->offset, sd->len, &pos, more); } /* * This is a little more tricky than the file -> pipe splicing. There are * basically three cases: * * - Destination page already exists in the address space and there * are users of it. For that case we have no other option that * copying the data. Tough luck. * - Destination page already exists in the address space, but there * are no users of it. Make sure it's uptodate, then drop it. Fall * through to last case. * - Destination page does not exist, we can add the pipe page to * the page cache and avoid the copy. * * If asked to move pages to the output file (SPLICE_F_MOVE is set in * sd->flags), we attempt to migrate pages from the pipe to the output * file address space page cache. This is possible if no one else has * the pipe page referenced outside of the pipe and page cache. If * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create * a new page in the output file page cache and fill/dirty that. */ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct file *file = sd->u.file; struct address_space *mapping = file->f_mapping; unsigned int offset, this_len; struct page *page; void *fsdata; int ret; offset = sd->pos & ~PAGE_CACHE_MASK; this_len = sd->len; if (this_len + offset > PAGE_CACHE_SIZE) this_len = PAGE_CACHE_SIZE - offset; ret = pagecache_write_begin(file, mapping, sd->pos, this_len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); if (unlikely(ret)) goto out; if (buf->page != page) { char *src = kmap_atomic(buf->page); char *dst = kmap_atomic(page); memcpy(dst + offset, src + buf->offset, this_len); flush_dcache_page(page); kunmap_atomic(dst); kunmap_atomic(src); } ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, page, fsdata); out: return ret; } EXPORT_SYMBOL(pipe_to_file); static void wakeup_pipe_writers(struct pipe_inode_info *pipe) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } /** * splice_from_pipe_feed - feed available data from a pipe to a file * @pipe: pipe to splice from * @sd: information to @actor * @actor: handler that splices the data * * Description: * This function loops over the pipe and calls @actor to do the * actual moving of a single struct pipe_buffer to the desired * destination. It returns when there's no more buffers left in * the pipe or if the requested number of bytes (@sd->total_len) * have been copied. It returns a positive number (one) if the * pipe needs to be filled with more data, zero if the required * number of bytes have been copied and -errno on error. * * This, together with splice_from_pipe_{begin,end,next}, may be * used to implement the functionality of __splice_from_pipe() when * locking is required around copying the pipe buffers to the * destination. */ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_actor *actor) { int ret; while (pipe->nrbufs) { struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; const struct pipe_buf_operations *ops = buf->ops; sd->len = buf->len; if (sd->len > sd->total_len) sd->len = sd->total_len; ret = buf->ops->confirm(pipe, buf); if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; return ret; } ret = actor(pipe, buf, sd); if (ret <= 0) return ret; buf->offset += ret; buf->len -= ret; sd->num_spliced += ret; sd->len -= ret; sd->pos += ret; sd->total_len -= ret; if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); pipe->nrbufs--; if (pipe->files) sd->need_wakeup = true; } if (!sd->total_len) return 0; } return 1; } EXPORT_SYMBOL(splice_from_pipe_feed); /** * splice_from_pipe_next - wait for some data to splice from * @pipe: pipe to splice from * @sd: information about the splice operation * * Description: * This function will wait for some data and return a positive * value (one) if pipe buffers are available. It will return zero * or -errno if no more data needs to be spliced. */ int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) { while (!pipe->nrbufs) { if (!pipe->writers) return 0; if (!pipe->waiting_writers && sd->num_spliced) return 0; if (sd->flags & SPLICE_F_NONBLOCK) return -EAGAIN; if (signal_pending(current)) return -ERESTARTSYS; if (sd->need_wakeup) { wakeup_pipe_writers(pipe); sd->need_wakeup = false; } pipe_wait(pipe); } return 1; } EXPORT_SYMBOL(splice_from_pipe_next); /** * splice_from_pipe_begin - start splicing from pipe * @sd: information about the splice operation * * Description: * This function should be called before a loop containing * splice_from_pipe_next() and splice_from_pipe_feed() to * initialize the necessary fields of @sd. */ void splice_from_pipe_begin(struct splice_desc *sd) { sd->num_spliced = 0; sd->need_wakeup = false; } EXPORT_SYMBOL(splice_from_pipe_begin); /** * splice_from_pipe_end - finish splicing from pipe * @pipe: pipe to splice from * @sd: information about the splice operation * * Description: * This function will wake up pipe writers if necessary. It should * be called after a loop containing splice_from_pipe_next() and * splice_from_pipe_feed(). */ void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd) { if (sd->need_wakeup) wakeup_pipe_writers(pipe); } EXPORT_SYMBOL(splice_from_pipe_end); /** * __splice_from_pipe - splice data from a pipe to given actor * @pipe: pipe to splice from * @sd: information to @actor * @actor: handler that splices the data * * Description: * This function does little more than loop over the pipe and call * @actor to do the actual moving of a single struct pipe_buffer to * the desired destination. See pipe_to_file, pipe_to_sendpage, or * pipe_to_user. * */ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_actor *actor) { int ret; splice_from_pipe_begin(sd); do { ret = splice_from_pipe_next(pipe, sd); if (ret > 0) ret = splice_from_pipe_feed(pipe, sd, actor); } while (ret > 0); splice_from_pipe_end(pipe, sd); return sd->num_spliced ? sd->num_spliced : ret; } EXPORT_SYMBOL(__splice_from_pipe); /** * splice_from_pipe - splice data from a pipe to a file * @pipe: pipe to splice from * @out: file to splice to * @ppos: position in @out * @len: how many bytes to splice * @flags: splice modifier flags * @actor: handler that splices the data * * Description: * See __splice_from_pipe. This function locks the pipe inode, * otherwise it's identical to __splice_from_pipe(). * */ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags, splice_actor *actor) { ssize_t ret; struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; pipe_lock(pipe); ret = __splice_from_pipe(pipe, &sd, actor); pipe_unlock(pipe); return ret; } /** * generic_file_splice_write - splice data from a pipe to a file * @pipe: pipe info * @out: file to write to * @ppos: position in @out * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will either move or copy pages (determined by @flags options) from * the given pipe inode to the given file. * */ ssize_t generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct address_space *mapping = out->f_mapping; struct inode *inode = mapping->host; struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; ssize_t ret; pipe_lock(pipe); splice_from_pipe_begin(&sd); do { ret = splice_from_pipe_next(pipe, &sd); if (ret <= 0) break; mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); ret = file_remove_suid(out); if (!ret) { ret = file_update_time(out); if (!ret) ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file); } mutex_unlock(&inode->i_mutex); } while (ret > 0); splice_from_pipe_end(pipe, &sd); pipe_unlock(pipe); if (sd.num_spliced) ret = sd.num_spliced; if (ret > 0) { int err; err = generic_write_sync(out, *ppos, ret); if (err) ret = err; else *ppos += ret; balance_dirty_pages_ratelimited(mapping); } return ret; } EXPORT_SYMBOL(generic_file_splice_write); static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { int ret; void *data; loff_t tmp = sd->pos; data = kmap(buf->page); ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp); kunmap(buf->page); return ret; } static ssize_t default_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { ssize_t ret; ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf); if (ret > 0) *ppos += ret; return ret; } /** * generic_splice_sendpage - splice data from a pipe to a socket * @pipe: pipe to splice from * @out: socket to write to * @ppos: position in @out * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will send @len bytes from the pipe to a network socket. No data copying * is involved. * */ ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); } EXPORT_SYMBOL(generic_splice_sendpage); /* * Attempt to initiate a splice from pipe to file. */ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); if (out->f_op->splice_write) splice_write = out->f_op->splice_write; else splice_write = default_file_splice_write; return splice_write(pipe, out, ppos, len, flags); } /* * Attempt to initiate a splice from a file to a pipe. */ static long do_splice_to(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); int ret; if (unlikely(!(in->f_mode & FMODE_READ))) return -EBADF; ret = rw_verify_area(READ, in, ppos, len); if (unlikely(ret < 0)) return ret; if (in->f_op->splice_read) splice_read = in->f_op->splice_read; else splice_read = default_file_splice_read; return splice_read(in, ppos, pipe, len, flags); } /** * splice_direct_to_actor - splices data directly between two non-pipes * @in: file to splice from * @sd: actor information on where to splice to * @actor: handles the data splicing * * Description: * This is a special case helper to splice directly between two * points, without requiring an explicit pipe. Internally an allocated * pipe is cached in the process, and reused during the lifetime of * that process. * */ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, splice_direct_actor *actor) { struct pipe_inode_info *pipe; long ret, bytes; umode_t i_mode; size_t len; int i, flags; /* * We require the input being a regular file, as we don't want to * randomly drop data for eg socket -> socket splicing. Use the * piped splicing for that! */ i_mode = file_inode(in)->i_mode; if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) return -EINVAL; /* * neither in nor out is a pipe, setup an internal pipe attached to * 'out' and transfer the wanted data from 'in' to 'out' through that */ pipe = current->splice_pipe; if (unlikely(!pipe)) { pipe = alloc_pipe_info(); if (!pipe) return -ENOMEM; /* * We don't have an immediate reader, but we'll read the stuff * out of the pipe right after the splice_to_pipe(). So set * PIPE_READERS appropriately. */ pipe->readers = 1; current->splice_pipe = pipe; } /* * Do the splice. */ ret = 0; bytes = 0; len = sd->total_len; flags = sd->flags; /* * Don't block on output, we have to drain the direct pipe. */ sd->flags &= ~SPLICE_F_NONBLOCK; while (len) { size_t read_len; loff_t pos = sd->pos, prev_pos = pos; ret = do_splice_to(in, &pos, pipe, len, flags); if (unlikely(ret <= 0)) goto out_release; read_len = ret; sd->total_len = read_len; /* * NOTE: nonblocking mode only applies to the input. We * must not do the output in nonblocking mode as then we * could get stuck data in the internal pipe: */ ret = actor(pipe, sd); if (unlikely(ret <= 0)) { sd->pos = prev_pos; goto out_release; } bytes += ret; len -= ret; sd->pos = pos; if (ret < read_len) { sd->pos = prev_pos + ret; goto out_release; } } done: pipe->nrbufs = pipe->curbuf = 0; file_accessed(in); return bytes; out_release: /* * If we did an incomplete transfer we must release * the pipe buffers in question: */ for (i = 0; i < pipe->buffers; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) { buf->ops->release(pipe, buf); buf->ops = NULL; } } if (!bytes) bytes = ret; goto done; } EXPORT_SYMBOL(splice_direct_to_actor); static int direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) { struct file *file = sd->u.file; return do_splice_from(pipe, file, sd->opos, sd->total_len, sd->flags); } /** * do_splice_direct - splices data directly between two files * @in: file to splice from * @ppos: input file offset * @out: file to splice to * @opos: output file offset * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * For use by do_sendfile(). splice can easily emulate sendfile, but * doing it in the application would incur an extra system call * (splice in + splice out, as compared to just sendfile()). So this helper * can splice directly through a process-private pipe. * */ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, loff_t *opos, size_t len, unsigned int flags) { struct splice_desc sd = { .len = len, .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, .opos = opos, }; long ret; if (unlikely(!(out->f_mode & FMODE_WRITE))) return -EBADF; if (unlikely(out->f_flags & O_APPEND)) return -EINVAL; ret = rw_verify_area(WRITE, out, opos, len); if (unlikely(ret < 0)) return ret; ret = splice_direct_to_actor(in, &sd, direct_splice_actor); if (ret > 0) *ppos = sd.pos; return ret; } static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags); /* * Determine where to splice to/from. */ static long do_splice(struct file *in, loff_t __user *off_in, struct file *out, loff_t __user *off_out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe; struct pipe_inode_info *opipe; loff_t offset; long ret; ipipe = get_pipe_info(in); opipe = get_pipe_info(out); if (ipipe && opipe) { if (off_in || off_out) return -ESPIPE; if (!(in->f_mode & FMODE_READ)) return -EBADF; if (!(out->f_mode & FMODE_WRITE)) return -EBADF; /* Splicing to self would be fun, but... */ if (ipipe == opipe) return -EINVAL; return splice_pipe_to_pipe(ipipe, opipe, len, flags); } if (ipipe) { if (off_in) return -ESPIPE; if (off_out) { if (!(out->f_mode & FMODE_PWRITE)) return -EINVAL; if (copy_from_user(&offset, off_out, sizeof(loff_t))) return -EFAULT; } else { offset = out->f_pos; } if (unlikely(!(out->f_mode & FMODE_WRITE))) return -EBADF; if (unlikely(out->f_flags & O_APPEND)) return -EINVAL; ret = rw_verify_area(WRITE, out, &offset, len); if (unlikely(ret < 0)) return ret; file_start_write(out); ret = do_splice_from(ipipe, out, &offset, len, flags); file_end_write(out); if (!off_out) out->f_pos = offset; else if (copy_to_user(off_out, &offset, sizeof(loff_t))) ret = -EFAULT; return ret; } if (opipe) { if (off_out) return -ESPIPE; if (off_in) { if (!(in->f_mode & FMODE_PREAD)) return -EINVAL; if (copy_from_user(&offset, off_in, sizeof(loff_t))) return -EFAULT; } else { offset = in->f_pos; } ret = do_splice_to(in, &offset, opipe, len, flags); if (!off_in) in->f_pos = offset; else if (copy_to_user(off_in, &offset, sizeof(loff_t))) ret = -EFAULT; return ret; } return -EINVAL; } /* * Map an iov into an array of pages and offset/length tupples. With the * partial_page structure, we can map several non-contiguous ranges into * our ones pages[] map instead of splitting that operation into pieces. * Could easily be exported as a generic helper for other users, in which * case one would probably want to add a 'max_nr_pages' parameter as well. */ static int get_iovec_page_array(const struct iovec __user *iov, unsigned int nr_vecs, struct page **pages, struct partial_page *partial, bool aligned, unsigned int pipe_buffers) { int buffers = 0, error = 0; while (nr_vecs) { unsigned long off, npages; struct iovec entry; void __user *base; size_t len; int i; error = -EFAULT; if (copy_from_user(&entry, iov, sizeof(entry))) break; base = entry.iov_base; len = entry.iov_len; /* * Sanity check this iovec. 0 read succeeds. */ error = 0; if (unlikely(!len)) break; error = -EFAULT; if (!access_ok(VERIFY_READ, base, len)) break; /* * Get this base offset and number of pages, then map * in the user pages. */ off = (unsigned long) base & ~PAGE_MASK; /* * If asked for alignment, the offset must be zero and the * length a multiple of the PAGE_SIZE. */ error = -EINVAL; if (aligned && (off || len & ~PAGE_MASK)) break; npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; if (npages > pipe_buffers - buffers) npages = pipe_buffers - buffers; error = get_user_pages_fast((unsigned long)base, npages, 0, &pages[buffers]); if (unlikely(error <= 0)) break; /* * Fill this contiguous range into the partial page map. */ for (i = 0; i < error; i++) { const int plen = min_t(size_t, len, PAGE_SIZE - off); partial[buffers].offset = off; partial[buffers].len = plen; off = 0; len -= plen; buffers++; } /* * We didn't complete this iov, stop here since it probably * means we have to move some of this into a pipe to * be able to continue. */ if (len) break; /* * Don't continue if we mapped fewer pages than we asked for, * or if we mapped the max number of pages that we have * room for. */ if (error < npages || buffers == pipe_buffers) break; nr_vecs--; iov++; } if (buffers) return buffers; return error; } static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data); return n == sd->len ? n : -EFAULT; } /* * For lack of a better implementation, implement vmsplice() to userspace * as a simple copy of the pipes pages to the user iov. */ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov, unsigned long nr_segs, unsigned int flags) { struct pipe_inode_info *pipe; struct splice_desc sd; long ret; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; ssize_t count = 0; pipe = get_pipe_info(file); if (!pipe) return -EBADF; ret = rw_copy_check_uvector(READ, uiov, nr_segs, ARRAY_SIZE(iovstack), iovstack, &iov); if (ret <= 0) return ret; iov_iter_init(&iter, READ, iov, nr_segs, count); sd.len = 0; sd.total_len = count; sd.flags = flags; sd.u.data = &iter; sd.pos = 0; pipe_lock(pipe); ret = __splice_from_pipe(pipe, &sd, pipe_to_user); pipe_unlock(pipe); if (iov != iovstack) kfree(iov); return ret; } /* * vmsplice splices a user address range into a pipe. It can be thought of * as splice-from-memory, where the regular splice is splice-from-file (or * to file). In both cases the output is a pipe, naturally. */ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, unsigned long nr_segs, unsigned int flags) { struct pipe_inode_info *pipe; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &user_page_pipe_buf_ops, .spd_release = spd_release_page, }; long ret; pipe = get_pipe_info(file); if (!pipe) return -EBADF; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages, spd.partial, false, spd.nr_pages_max); if (spd.nr_pages <= 0) ret = spd.nr_pages; else ret = splice_to_pipe(pipe, &spd); splice_shrink_spd(&spd); return ret; } /* * Note that vmsplice only really supports true splicing _from_ user memory * to a pipe, not the other way around. Splicing from user memory is a simple * operation that can be supported without any funky alignment restrictions * or nasty vm tricks. We simply map in the user memory and fill them into * a pipe. The reverse isn't quite as easy, though. There are two possible * solutions for that: * * - memcpy() the data internally, at which point we might as well just * do a regular read() on the buffer anyway. * - Lots of nasty vm tricks, that are neither fast nor flexible (it * has restriction limitations on both ends of the pipe). * * Currently we punt and implement it as a normal copy, see pipe_to_user(). * */ SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov, unsigned long, nr_segs, unsigned int, flags) { struct fd f; long error; if (unlikely(nr_segs > UIO_MAXIOV)) return -EINVAL; else if (unlikely(!nr_segs)) return 0; error = -EBADF; f = fdget(fd); if (f.file) { if (f.file->f_mode & FMODE_WRITE) error = vmsplice_to_pipe(f.file, iov, nr_segs, flags); else if (f.file->f_mode & FMODE_READ) error = vmsplice_to_user(f.file, iov, nr_segs, flags); fdput(f); } return error; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32, unsigned int, nr_segs, unsigned int, flags) { unsigned i; struct iovec __user *iov; if (nr_segs > UIO_MAXIOV) return -EINVAL; iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec)); for (i = 0; i < nr_segs; i++) { struct compat_iovec v; if (get_user(v.iov_base, &iov32[i].iov_base) || get_user(v.iov_len, &iov32[i].iov_len) || put_user(compat_ptr(v.iov_base), &iov[i].iov_base) || put_user(v.iov_len, &iov[i].iov_len)) return -EFAULT; } return sys_vmsplice(fd, iov, nr_segs, flags); } #endif SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags) { struct fd in, out; long error; if (unlikely(!len)) return 0; error = -EBADF; in = fdget(fd_in); if (in.file) { if (in.file->f_mode & FMODE_READ) { out = fdget(fd_out); if (out.file) { if (out.file->f_mode & FMODE_WRITE) error = do_splice(in.file, off_in, out.file, off_out, len, flags); fdput(out); } } fdput(in); } return error; } /* * Make sure there's data to read. Wait for input if we can, otherwise * return an appropriate error. */ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check ->nrbufs without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (pipe->nrbufs) return 0; ret = 0; pipe_lock(pipe); while (!pipe->nrbufs) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!pipe->writers) break; if (!pipe->waiting_writers) { if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } } pipe_wait(pipe); } pipe_unlock(pipe); return ret; } /* * Make sure there's writeable room. Wait for room if we can, otherwise * return an appropriate error. */ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check ->nrbufs without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (pipe->nrbufs < pipe->buffers) return 0; ret = 0; pipe_lock(pipe); while (pipe->nrbufs >= pipe->buffers) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; break; } if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } pipe_unlock(pipe); return ret; } /* * Splice contents of ipipe to opipe. */ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; int ret = 0, nbuf; bool input_wakeup = false; retry: ret = ipipe_prep(ipipe, flags); if (ret) return ret; ret = opipe_prep(opipe, flags); if (ret) return ret; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } if (!ipipe->nrbufs && !ipipe->writers) break; /* * Cannot make any progress, because either the input * pipe is empty or the output pipe is full. */ if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) { /* Already processed some buffers, break */ if (ret) break; if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } /* * We raced with another reader/writer and haven't * managed to process any buffers. A zero return * value means EOF, so retry instead. */ pipe_unlock(ipipe); pipe_unlock(opipe); goto retry; } ibuf = ipipe->bufs + ipipe->curbuf; nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); obuf = opipe->bufs + nbuf; if (len >= ibuf->len) { /* * Simply move the whole buffer from ipipe to opipe */ *obuf = *ibuf; ibuf->ops = NULL; opipe->nrbufs++; ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1); ipipe->nrbufs--; input_wakeup = true; } else { /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ ibuf->ops->get(ipipe, ibuf); *obuf = *ibuf; /* * Don't inherit the gift flag, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; obuf->len = len; opipe->nrbufs++; ibuf->offset += obuf->len; ibuf->len -= obuf->len; } ret += obuf->len; len -= obuf->len; } while (len); pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); if (input_wakeup) wakeup_pipe_writers(ipipe); return ret; } /* * Link contents of ipipe to opipe. */ static int link_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; int ret = 0, i = 0, nbuf; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } /* * If we have iterated all input buffers or ran out of * output room, break. */ if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) break; ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1)); nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ ibuf->ops->get(ipipe, ibuf); obuf = opipe->bufs + nbuf; *obuf = *ibuf; /* * Don't inherit the gift flag, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; if (obuf->len > len) obuf->len = len; opipe->nrbufs++; ret += obuf->len; len -= obuf->len; i++; } while (len); /* * return EAGAIN if we have the potential of some data in the * future, otherwise just return 0 */ if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ret = -EAGAIN; pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); return ret; } /* * This is a tee(1) implementation that works on pipes. It doesn't copy * any data, it simply references the 'in' pages on the 'out' pipe. * The 'flags' used are the SPLICE_F_* variants, currently the only * applicable one is SPLICE_F_NONBLOCK. */ static long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe = get_pipe_info(in); struct pipe_inode_info *opipe = get_pipe_info(out); int ret = -EINVAL; /* * Duplicate the contents of ipipe to opipe without actually * copying the data. */ if (ipipe && opipe && ipipe != opipe) { /* * Keep going, unless we encounter an error. The ipipe/opipe * ordering doesn't really matter. */ ret = ipipe_prep(ipipe, flags); if (!ret) { ret = opipe_prep(opipe, flags); if (!ret) ret = link_pipe(ipipe, opipe, len, flags); } } return ret; } SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags) { struct fd in; int error; if (unlikely(!len)) return 0; error = -EBADF; in = fdget(fdin); if (in.file) { if (in.file->f_mode & FMODE_READ) { struct fd out = fdget(fdout); if (out.file) { if (out.file->f_mode & FMODE_WRITE) error = do_tee(in.file, out.file, len, flags); fdput(out); } } fdput(in); } return error; }
./CrossVul/dataset_final_sorted/CWE-264/c/bad_2287_11
crossvul-cpp_data_good_2126_0
/* * Fast Userspace Mutexes (which I call "Futexes!"). * (C) Rusty Russell, IBM 2002 * * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar * (C) Copyright 2003 Red Hat Inc, All Rights Reserved * * Removed page pinning, fix privately mapped COW pages and other cleanups * (C) Copyright 2003, 2004 Jamie Lokier * * Robust futex support started by Ingo Molnar * (C) Copyright 2006 Red Hat Inc, All Rights Reserved * Thanks to Thomas Gleixner for suggestions, analysis and fixes. * * PI-futex support started by Ingo Molnar and Thomas Gleixner * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * * PRIVATE futexes by Eric Dumazet * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> * * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> * Copyright (C) IBM Corporation, 2009 * Thanks to Thomas Gleixner for conceptual design and careful reviews. * * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly * enough at me, Linus for the original (flawed) idea, Matthew * Kirkwood for proof-of-concept implementation. * * "The futexes are also cursed." * "But they come in a choice of three flavours!" * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <linux/poll.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/jhash.h> #include <linux/init.h> #include <linux/futex.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/signal.h> #include <linux/export.h> #include <linux/magic.h> #include <linux/pid.h> #include <linux/nsproxy.h> #include <linux/ptrace.h> #include <linux/sched/rt.h> #include <linux/hugetlb.h> #include <linux/freezer.h> #include <linux/bootmem.h> #include <asm/futex.h> #include "locking/rtmutex_common.h" /* * READ this before attempting to hack on futexes! * * Basic futex operation and ordering guarantees * ============================================= * * The waiter reads the futex value in user space and calls * futex_wait(). This function computes the hash bucket and acquires * the hash bucket lock. After that it reads the futex user space value * again and verifies that the data has not changed. If it has not changed * it enqueues itself into the hash bucket, releases the hash bucket lock * and schedules. * * The waker side modifies the user space value of the futex and calls * futex_wake(). This function computes the hash bucket and acquires the * hash bucket lock. Then it looks for waiters on that futex in the hash * bucket and wakes them. * * In futex wake up scenarios where no tasks are blocked on a futex, taking * the hb spinlock can be avoided and simply return. In order for this * optimization to work, ordering guarantees must exist so that the waiter * being added to the list is acknowledged when the list is concurrently being * checked by the waker, avoiding scenarios like the following: * * CPU 0 CPU 1 * val = *futex; * sys_futex(WAIT, futex, val); * futex_wait(futex, val); * uval = *futex; * *futex = newval; * sys_futex(WAKE, futex); * futex_wake(futex); * if (queue_empty()) * return; * if (uval == val) * lock(hash_bucket(futex)); * queue(); * unlock(hash_bucket(futex)); * schedule(); * * This would cause the waiter on CPU 0 to wait forever because it * missed the transition of the user space value from val to newval * and the waker did not find the waiter in the hash bucket queue. * * The correct serialization ensures that a waiter either observes * the changed user space value before blocking or is woken by a * concurrent waker: * * CPU 0 CPU 1 * val = *futex; * sys_futex(WAIT, futex, val); * futex_wait(futex, val); * * waiters++; (a) * mb(); (A) <-- paired with -. * | * lock(hash_bucket(futex)); | * | * uval = *futex; | * | *futex = newval; * | sys_futex(WAKE, futex); * | futex_wake(futex); * | * `-------> mb(); (B) * if (uval == val) * queue(); * unlock(hash_bucket(futex)); * schedule(); if (waiters) * lock(hash_bucket(futex)); * else wake_waiters(futex); * waiters--; (b) unlock(hash_bucket(futex)); * * Where (A) orders the waiters increment and the futex value read through * atomic operations (see hb_waiters_inc) and where (B) orders the write * to futex and the waiters read -- this is done by the barriers in * get_futex_key_refs(), through either ihold or atomic_inc, depending on the * futex type. * * This yields the following case (where X:=waiters, Y:=futex): * * X = Y = 0 * * w[X]=1 w[Y]=1 * MB MB * r[Y]=y r[X]=x * * Which guarantees that x==0 && y==0 is impossible; which translates back into * the guarantee that we cannot both miss the futex variable change and the * enqueue. * * Note that a new waiter is accounted for in (a) even when it is possible that * the wait call can return error, in which case we backtrack from it in (b). * Refer to the comment in queue_lock(). * * Similarly, in order to account for waiters being requeued on another * address we always increment the waiters for the destination bucket before * acquiring the lock. It then decrements them again after releasing it - * the code that actually moves the futex(es) between hash buckets (requeue_futex) * will do the additional required waiter count housekeeping. This is done for * double_lock_hb() and double_unlock_hb(), respectively. */ #ifndef CONFIG_HAVE_FUTEX_CMPXCHG int __read_mostly futex_cmpxchg_enabled; #endif /* * Futex flags used to encode options to functions and preserve them across * restarts. */ #define FLAGS_SHARED 0x01 #define FLAGS_CLOCKRT 0x02 #define FLAGS_HAS_TIMEOUT 0x04 /* * Priority Inheritance state: */ struct futex_pi_state { /* * list of 'owned' pi_state instances - these have to be * cleaned up in do_exit() if the task exits prematurely: */ struct list_head list; /* * The PI object: */ struct rt_mutex pi_mutex; struct task_struct *owner; atomic_t refcount; union futex_key key; }; /** * struct futex_q - The hashed futex queue entry, one per waiting task * @list: priority-sorted list of tasks waiting on this futex * @task: the task waiting on the futex * @lock_ptr: the hash bucket lock * @key: the key the futex is hashed on * @pi_state: optional priority inheritance state * @rt_waiter: rt_waiter storage for use with requeue_pi * @requeue_pi_key: the requeue_pi target futex key * @bitset: bitset for the optional bitmasked wakeup * * We use this hashed waitqueue, instead of a normal wait_queue_t, so * we can wake only the relevant ones (hashed queues may be shared). * * A futex_q has a woken state, just like tasks have TASK_RUNNING. * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. * The order of wakeup is always to make the first condition true, then * the second. * * PI futexes are typically woken before they are removed from the hash list via * the rt_mutex code. See unqueue_me_pi(). */ struct futex_q { struct plist_node list; struct task_struct *task; spinlock_t *lock_ptr; union futex_key key; struct futex_pi_state *pi_state; struct rt_mutex_waiter *rt_waiter; union futex_key *requeue_pi_key; u32 bitset; }; static const struct futex_q futex_q_init = { /* list gets initialized in queue_me()*/ .key = FUTEX_KEY_INIT, .bitset = FUTEX_BITSET_MATCH_ANY }; /* * Hash buckets are shared by all the futex_keys that hash to the same * location. Each key may have multiple futex_q structures, one for each task * waiting on a futex. */ struct futex_hash_bucket { atomic_t waiters; spinlock_t lock; struct plist_head chain; } ____cacheline_aligned_in_smp; static unsigned long __read_mostly futex_hashsize; static struct futex_hash_bucket *futex_queues; static inline void futex_get_mm(union futex_key *key) { atomic_inc(&key->private.mm->mm_count); /* * Ensure futex_get_mm() implies a full barrier such that * get_futex_key() implies a full barrier. This is relied upon * as full barrier (B), see the ordering comment above. */ smp_mb__after_atomic_inc(); } /* * Reflects a new waiter being added to the waitqueue. */ static inline void hb_waiters_inc(struct futex_hash_bucket *hb) { #ifdef CONFIG_SMP atomic_inc(&hb->waiters); /* * Full barrier (A), see the ordering comment above. */ smp_mb__after_atomic_inc(); #endif } /* * Reflects a waiter being removed from the waitqueue by wakeup * paths. */ static inline void hb_waiters_dec(struct futex_hash_bucket *hb) { #ifdef CONFIG_SMP atomic_dec(&hb->waiters); #endif } static inline int hb_waiters_pending(struct futex_hash_bucket *hb) { #ifdef CONFIG_SMP return atomic_read(&hb->waiters); #else return 1; #endif } /* * We hash on the keys returned from get_futex_key (see below). */ static struct futex_hash_bucket *hash_futex(union futex_key *key) { u32 hash = jhash2((u32*)&key->both.word, (sizeof(key->both.word)+sizeof(key->both.ptr))/4, key->both.offset); return &futex_queues[hash & (futex_hashsize - 1)]; } /* * Return 1 if two futex_keys are equal, 0 otherwise. */ static inline int match_futex(union futex_key *key1, union futex_key *key2) { return (key1 && key2 && key1->both.word == key2->both.word && key1->both.ptr == key2->both.ptr && key1->both.offset == key2->both.offset); } /* * Take a reference to the resource addressed by a key. * Can be called while holding spinlocks. * */ static void get_futex_key_refs(union futex_key *key) { if (!key->both.ptr) return; switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: ihold(key->shared.inode); /* implies MB (B) */ break; case FUT_OFF_MMSHARED: futex_get_mm(key); /* implies MB (B) */ break; } } /* * Drop a reference to the resource addressed by a key. * The hash bucket spinlock must not be held. */ static void drop_futex_key_refs(union futex_key *key) { if (!key->both.ptr) { /* If we're here then we tried to put a key we failed to get */ WARN_ON_ONCE(1); return; } switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: iput(key->shared.inode); break; case FUT_OFF_MMSHARED: mmdrop(key->private.mm); break; } } /** * get_futex_key() - Get parameters which are the keys for a futex * @uaddr: virtual address of the futex * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED * @key: address where result is stored. * @rw: mapping needs to be read/write (values: VERIFY_READ, * VERIFY_WRITE) * * Return: a negative error code or 0 * * The key words are stored in *key on success. * * For shared mappings, it's (page->index, file_inode(vma->vm_file), * offset_within_page). For private mappings, it's (uaddr, current->mm). * We can usually work out the index without swapping in the page. * * lock_page() might sleep, the caller should not hold a spinlock. */ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page, *page_head; int err, ro = 0; /* * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; if (unlikely((address % sizeof(u32)) != 0)) return -EINVAL; address -= key->both.offset; if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) return -EFAULT; /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs * virtual address, we dont even have to find the underlying vma. * Note : We do have to check 'uaddr' is a valid user address, * but access_ok() should be faster than find_vma() */ if (!fshared) { key->private.mm = mm; key->private.address = address; get_futex_key_refs(key); /* implies MB (B) */ return 0; } again: err = get_user_pages_fast(address, 1, 1, &page); /* * If write access is not required (eg. FUTEX_WAIT), try * and get read-only access. */ if (err == -EFAULT && rw == VERIFY_READ) { err = get_user_pages_fast(address, 1, 0, &page); ro = 1; } if (err < 0) return err; else err = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE page_head = page; if (unlikely(PageTail(page))) { put_page(page); /* serialize against __split_huge_page_splitting() */ local_irq_disable(); if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) { page_head = compound_head(page); /* * page_head is valid pointer but we must pin * it before taking the PG_lock and/or * PG_compound_lock. The moment we re-enable * irqs __split_huge_page_splitting() can * return and the head page can be freed from * under us. We can't take the PG_lock and/or * PG_compound_lock on a page that could be * freed from under us. */ if (page != page_head) { get_page(page_head); put_page(page); } local_irq_enable(); } else { local_irq_enable(); goto again; } } #else page_head = compound_head(page); if (page != page_head) { get_page(page_head); put_page(page); } #endif lock_page(page_head); /* * If page_head->mapping is NULL, then it cannot be a PageAnon * page; but it might be the ZERO_PAGE or in the gate area or * in a special mapping (all cases which we are happy to fail); * or it may have been a good file page when get_user_pages_fast * found it, but truncated or holepunched or subjected to * invalidate_complete_page2 before we got the page lock (also * cases which we are happy to fail). And we hold a reference, * so refcount care in invalidate_complete_page's remove_mapping * prevents drop_caches from setting mapping to NULL beneath us. * * The case we do have to guard against is when memory pressure made * shmem_writepage move it from filecache to swapcache beneath us: * an unlikely race, but we do need to retry for page_head->mapping. */ if (!page_head->mapping) { int shmem_swizzled = PageSwapCache(page_head); unlock_page(page_head); put_page(page_head); if (shmem_swizzled) goto again; return -EFAULT; } /* * Private mappings are handled in a simple way. * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. */ if (PageAnon(page_head)) { /* * A RO anonymous page will never change and thus doesn't make * sense for futex operations. */ if (ro) { err = -EFAULT; goto out; } key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; } else { key->both.offset |= FUT_OFF_INODE; /* inode-based key */ key->shared.inode = page_head->mapping->host; key->shared.pgoff = basepage_index(page); } get_futex_key_refs(key); /* implies MB (B) */ out: unlock_page(page_head); put_page(page_head); return err; } static inline void put_futex_key(union futex_key *key) { drop_futex_key_refs(key); } /** * fault_in_user_writeable() - Fault in user address and verify RW access * @uaddr: pointer to faulting user space address * * Slow path to fixup the fault we just took in the atomic write * access to @uaddr. * * We have no generic implementation of a non-destructive write to the * user address. We know that we faulted in the atomic pagefault * disabled section so we can as well avoid the #PF overhead by * calling get_user_pages() right away. */ static int fault_in_user_writeable(u32 __user *uaddr) { struct mm_struct *mm = current->mm; int ret; down_read(&mm->mmap_sem); ret = fixup_user_fault(current, mm, (unsigned long)uaddr, FAULT_FLAG_WRITE); up_read(&mm->mmap_sem); return ret < 0 ? ret : 0; } /** * futex_top_waiter() - Return the highest priority waiter on a futex * @hb: the hash bucket the futex_q's reside in * @key: the futex key (to distinguish it from other futex futex_q's) * * Must be called with the hb lock held. */ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key) { struct futex_q *this; plist_for_each_entry(this, &hb->chain, list) { if (match_futex(&this->key, key)) return this; } return NULL; } static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval) { int ret; pagefault_disable(); ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); pagefault_enable(); return ret; } static int get_futex_value_locked(u32 *dest, u32 __user *from) { int ret; pagefault_disable(); ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); pagefault_enable(); return ret ? -EFAULT : 0; } /* * PI code: */ static int refill_pi_state_cache(void) { struct futex_pi_state *pi_state; if (likely(current->pi_state_cache)) return 0; pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); if (!pi_state) return -ENOMEM; INIT_LIST_HEAD(&pi_state->list); /* pi_mutex gets initialized later */ pi_state->owner = NULL; atomic_set(&pi_state->refcount, 1); pi_state->key = FUTEX_KEY_INIT; current->pi_state_cache = pi_state; return 0; } static struct futex_pi_state * alloc_pi_state(void) { struct futex_pi_state *pi_state = current->pi_state_cache; WARN_ON(!pi_state); current->pi_state_cache = NULL; return pi_state; } static void free_pi_state(struct futex_pi_state *pi_state) { if (!atomic_dec_and_test(&pi_state->refcount)) return; /* * If pi_state->owner is NULL, the owner is most probably dying * and has cleaned up the pi_state already */ if (pi_state->owner) { raw_spin_lock_irq(&pi_state->owner->pi_lock); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); } if (current->pi_state_cache) kfree(pi_state); else { /* * pi_state->list is already empty. * clear pi_state->owner. * refcount is at 0 - put it back to 1. */ pi_state->owner = NULL; atomic_set(&pi_state->refcount, 1); current->pi_state_cache = pi_state; } } /* * Look up the task based on what TID userspace gave us. * We dont trust it. */ static struct task_struct * futex_find_get_task(pid_t pid) { struct task_struct *p; rcu_read_lock(); p = find_task_by_vpid(pid); if (p) get_task_struct(p); rcu_read_unlock(); return p; } /* * This task is holding PI mutexes at exit time => bad. * Kernel cleans up PI-state, but userspace is likely hosed. * (Robust-futex cleanup is separate and might save the day for userspace.) */ void exit_pi_state_list(struct task_struct *curr) { struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; struct futex_hash_bucket *hb; union futex_key key = FUTEX_KEY_INIT; if (!futex_cmpxchg_enabled) return; /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful * versus waiters unqueueing themselves: */ raw_spin_lock_irq(&curr->pi_lock); while (!list_empty(head)) { next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; hb = hash_futex(&key); raw_spin_unlock_irq(&curr->pi_lock); spin_lock(&hb->lock); raw_spin_lock_irq(&curr->pi_lock); /* * We dropped the pi-lock, so re-check whether this * task still owns the PI-state: */ if (head->next != next) { spin_unlock(&hb->lock); continue; } WARN_ON(pi_state->owner != curr); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); pi_state->owner = NULL; raw_spin_unlock_irq(&curr->pi_lock); rt_mutex_unlock(&pi_state->pi_mutex); spin_unlock(&hb->lock); raw_spin_lock_irq(&curr->pi_lock); } raw_spin_unlock_irq(&curr->pi_lock); } static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, struct task_struct *task) { struct futex_pi_state *pi_state = NULL; struct futex_q *this, *next; struct task_struct *p; pid_t pid = uval & FUTEX_TID_MASK; plist_for_each_entry_safe(this, next, &hb->chain, list) { if (match_futex(&this->key, key)) { /* * Another waiter already exists - bump up * the refcount and return its pi_state: */ pi_state = this->pi_state; /* * Userspace might have messed up non-PI and PI futexes */ if (unlikely(!pi_state)) return -EINVAL; WARN_ON(!atomic_read(&pi_state->refcount)); /* * When pi_state->owner is NULL then the owner died * and another waiter is on the fly. pi_state->owner * is fixed up by the task which acquires * pi_state->rt_mutex. * * We do not check for pid == 0 which can happen when * the owner died and robust_list_exit() cleared the * TID. */ if (pid && pi_state->owner) { /* * Bail out if user space manipulated the * futex value. */ if (pid != task_pid_vnr(pi_state->owner)) return -EINVAL; } /* * Protect against a corrupted uval. If uval * is 0x80000000 then pid is 0 and the waiter * bit is set. So the deadlock check in the * calling code has failed and we did not fall * into the check above due to !pid. */ if (task && pi_state->owner == task) return -EDEADLK; atomic_inc(&pi_state->refcount); *ps = pi_state; return 0; } } /* * We are the first waiter - try to look up the real owner and attach * the new pi_state to it, but bail out when TID = 0 */ if (!pid) return -ESRCH; p = futex_find_get_task(pid); if (!p) return -ESRCH; if (!p->mm) { put_task_struct(p); return -EPERM; } /* * We need to look at the task state flags to figure out, * whether the task is exiting. To protect against the do_exit * change of the task flags, we do this protected by * p->pi_lock: */ raw_spin_lock_irq(&p->pi_lock); if (unlikely(p->flags & PF_EXITING)) { /* * The task is on the way out. When PF_EXITPIDONE is * set, we know that the task has finished the * cleanup: */ int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); return ret; } pi_state = alloc_pi_state(); /* * Initialize the pi_mutex in locked state and make 'p' * the owner of it: */ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); /* Store the key for possible exit cleanups: */ pi_state->key = *key; WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &p->pi_state_list); pi_state->owner = p; raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); *ps = pi_state; return 0; } /** * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex * @uaddr: the pi futex user address * @hb: the pi futex hash bucket * @key: the futex key associated with uaddr and hb * @ps: the pi_state pointer where we store the result of the * lookup * @task: the task to perform the atomic lock work for. This will * be "current" except in the case of requeue pi. * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Return: * 0 - ready to wait; * 1 - acquired the lock; * <0 - error * * The hb->lock and futex_key refs shall be held by the caller. */ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, struct task_struct *task, int set_waiters) { int lock_taken, ret, force_take = 0; u32 uval, newval, curval, vpid = task_pid_vnr(task); retry: ret = lock_taken = 0; /* * To avoid races, we attempt to take the lock here again * (by doing a 0 -> TID atomic cmpxchg), while holding all * the locks. It will most likely not succeed. */ newval = vpid; if (set_waiters) newval |= FUTEX_WAITERS; if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval))) return -EFAULT; /* * Detect deadlocks. */ if ((unlikely((curval & FUTEX_TID_MASK) == vpid))) return -EDEADLK; /* * Surprise - we got the lock. Just return to userspace: */ if (unlikely(!curval)) return 1; uval = curval; /* * Set the FUTEX_WAITERS flag, so the owner will know it has someone * to wake at the next unlock. */ newval = curval | FUTEX_WAITERS; /* * Should we force take the futex? See below. */ if (unlikely(force_take)) { /* * Keep the OWNER_DIED and the WAITERS bit and set the * new TID value. */ newval = (curval & ~FUTEX_TID_MASK) | vpid; force_take = 0; lock_taken = 1; } if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) return -EFAULT; if (unlikely(curval != uval)) goto retry; /* * We took the lock due to forced take over. */ if (unlikely(lock_taken)) return 1; /* * We dont have the lock. Look up the PI state (or create it if * we are the first waiter): */ ret = lookup_pi_state(uval, hb, key, ps, task); if (unlikely(ret)) { switch (ret) { case -ESRCH: /* * We failed to find an owner for this * futex. So we have no pi_state to block * on. This can happen in two cases: * * 1) The owner died * 2) A stale FUTEX_WAITERS bit * * Re-read the futex value. */ if (get_futex_value_locked(&curval, uaddr)) return -EFAULT; /* * If the owner died or we have a stale * WAITERS bit the owner TID in the user space * futex is 0. */ if (!(curval & FUTEX_TID_MASK)) { force_take = 1; goto retry; } default: break; } } return ret; } /** * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be NULL and must be held by the caller. */ static void __unqueue_futex(struct futex_q *q) { struct futex_hash_bucket *hb; if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) || WARN_ON(plist_node_empty(&q->list))) return; hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); plist_del(&q->list, &hb->chain); hb_waiters_dec(hb); } /* * The hash bucket lock must be held when this is called. * Afterwards, the futex_q must not be accessed. */ static void wake_futex(struct futex_q *q) { struct task_struct *p = q->task; if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) return; /* * We set q->lock_ptr = NULL _before_ we wake up the task. If * a non-futex wake up happens on another CPU then the task * might exit and p would dereference a non-existing task * struct. Prevent this by holding a reference on p across the * wake up. */ get_task_struct(p); __unqueue_futex(q); /* * The waiting task can free the futex_q as soon as * q->lock_ptr = NULL is written, without taking any locks. A * memory barrier is required here to prevent the following * store to lock_ptr from getting ahead of the plist_del. */ smp_wmb(); q->lock_ptr = NULL; wake_up_state(p, TASK_NORMAL); put_task_struct(p); } static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) { struct task_struct *new_owner; struct futex_pi_state *pi_state = this->pi_state; u32 uninitialized_var(curval), newval; if (!pi_state) return -EINVAL; /* * If current does not own the pi_state then the futex is * inconsistent and user space fiddled with the futex value. */ if (pi_state->owner != current) return -EINVAL; raw_spin_lock(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); /* * It is possible that the next waiter (the one that brought * this owner to the kernel) timed out and is no longer * waiting on the lock. */ if (!new_owner) new_owner = this->task; /* * We pass it to the next owner. (The WAITERS bit is always * kept enabled while there is PI state around. We must also * preserve the owner died bit.) */ if (!(uval & FUTEX_OWNER_DIED)) { int ret = 0; newval = FUTEX_WAITERS | task_pid_vnr(new_owner); if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) ret = -EFAULT; else if (curval != uval) ret = -EINVAL; if (ret) { raw_spin_unlock(&pi_state->pi_mutex.wait_lock); return ret; } } raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); raw_spin_lock_irq(&new_owner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &new_owner->pi_state_list); pi_state->owner = new_owner; raw_spin_unlock_irq(&new_owner->pi_lock); raw_spin_unlock(&pi_state->pi_mutex.wait_lock); rt_mutex_unlock(&pi_state->pi_mutex); return 0; } static int unlock_futex_pi(u32 __user *uaddr, u32 uval) { u32 uninitialized_var(oldval); /* * There is no waiter, so we unlock the futex. The owner died * bit has not to be preserved here. We are the owner: */ if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0)) return -EFAULT; if (oldval != uval) return -EAGAIN; return 0; } /* * Express the locking dependencies for lockdep: */ static inline void double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { if (hb1 <= hb2) { spin_lock(&hb1->lock); if (hb1 < hb2) spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); } else { /* hb1 > hb2 */ spin_lock(&hb2->lock); spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); } } static inline void double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { spin_unlock(&hb1->lock); if (hb1 != hb2) spin_unlock(&hb2->lock); } /* * Wake up waiters matching bitset queued on this futex (uaddr). */ static int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) { struct futex_hash_bucket *hb; struct futex_q *this, *next; union futex_key key = FUTEX_KEY_INIT; int ret; if (!bitset) return -EINVAL; ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ); if (unlikely(ret != 0)) goto out; hb = hash_futex(&key); /* Make sure we really have tasks to wakeup */ if (!hb_waiters_pending(hb)) goto out_put_key; spin_lock(&hb->lock); plist_for_each_entry_safe(this, next, &hb->chain, list) { if (match_futex (&this->key, &key)) { if (this->pi_state || this->rt_waiter) { ret = -EINVAL; break; } /* Check if one of the bits is set in both bitsets */ if (!(this->bitset & bitset)) continue; wake_futex(this); if (++ret >= nr_wake) break; } } spin_unlock(&hb->lock); out_put_key: put_futex_key(&key); out: return ret; } /* * Wake up all waiters hashed on the physical page that is mapped * to this virtual address: */ static int futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, int nr_wake, int nr_wake2, int op) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; struct futex_hash_bucket *hb1, *hb2; struct futex_q *this, *next; int ret, op_ret; retry: ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); if (unlikely(ret != 0)) goto out; ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) goto out_put_key1; hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); retry_private: double_lock_hb(hb1, hb2); op_ret = futex_atomic_op_inuser(op, uaddr2); if (unlikely(op_ret < 0)) { double_unlock_hb(hb1, hb2); #ifndef CONFIG_MMU /* * we don't get EFAULT from MMU faults if we don't have an MMU, * but we might get them from range checking */ ret = op_ret; goto out_put_keys; #endif if (unlikely(op_ret != -EFAULT)) { ret = op_ret; goto out_put_keys; } ret = fault_in_user_writeable(uaddr2); if (ret) goto out_put_keys; if (!(flags & FLAGS_SHARED)) goto retry_private; put_futex_key(&key2); put_futex_key(&key1); goto retry; } plist_for_each_entry_safe(this, next, &hb1->chain, list) { if (match_futex (&this->key, &key1)) { if (this->pi_state || this->rt_waiter) { ret = -EINVAL; goto out_unlock; } wake_futex(this); if (++ret >= nr_wake) break; } } if (op_ret > 0) { op_ret = 0; plist_for_each_entry_safe(this, next, &hb2->chain, list) { if (match_futex (&this->key, &key2)) { if (this->pi_state || this->rt_waiter) { ret = -EINVAL; goto out_unlock; } wake_futex(this); if (++op_ret >= nr_wake2) break; } } ret += op_ret; } out_unlock: double_unlock_hb(hb1, hb2); out_put_keys: put_futex_key(&key2); out_put_key1: put_futex_key(&key1); out: return ret; } /** * requeue_futex() - Requeue a futex_q from one hb to another * @q: the futex_q to requeue * @hb1: the source hash_bucket * @hb2: the target hash_bucket * @key2: the new key for the requeued futex_q */ static inline void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key2) { /* * If key1 and key2 hash to the same bucket, no need to * requeue. */ if (likely(&hb1->chain != &hb2->chain)) { plist_del(&q->list, &hb1->chain); hb_waiters_dec(hb1); plist_add(&q->list, &hb2->chain); hb_waiters_inc(hb2); q->lock_ptr = &hb2->lock; } get_futex_key_refs(key2); q->key = *key2; } /** * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue * @q: the futex_q * @key: the key of the requeue target futex * @hb: the hash_bucket of the requeue target futex * * During futex_requeue, with requeue_pi=1, it is possible to acquire the * target futex if it is uncontended or via a lock steal. Set the futex_q key * to the requeue target futex so the waiter can detect the wakeup on the right * futex, but remove it from the hb and NULL the rt_waiter so it can detect * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock * to protect access to the pi_state to fixup the owner later. Must be called * with both q->lock_ptr and hb->lock held. */ static inline void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, struct futex_hash_bucket *hb) { get_futex_key_refs(key); q->key = *key; __unqueue_futex(q); WARN_ON(!q->rt_waiter); q->rt_waiter = NULL; q->lock_ptr = &hb->lock; wake_up_state(q->task, TASK_NORMAL); } /** * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter * @pifutex: the user address of the to futex * @hb1: the from futex hash bucket, must be locked by the caller * @hb2: the to futex hash bucket, must be locked by the caller * @key1: the from futex key * @key2: the to futex key * @ps: address to store the pi_state pointer * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Try and get the lock on behalf of the top waiter if we can do it atomically. * Wake the top waiter if we succeed. If the caller specified set_waiters, * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. * hb1 and hb2 must be held by the caller. * * Return: * 0 - failed to acquire the lock atomically; * >0 - acquired the lock, return value is vpid of the top_waiter * <0 - error */ static int futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key1, union futex_key *key2, struct futex_pi_state **ps, int set_waiters) { struct futex_q *top_waiter = NULL; u32 curval; int ret, vpid; if (get_futex_value_locked(&curval, pifutex)) return -EFAULT; /* * Find the top_waiter and determine if there are additional waiters. * If the caller intends to requeue more than 1 waiter to pifutex, * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, * as we have means to handle the possible fault. If not, don't set * the bit unecessarily as it will force the subsequent unlock to enter * the kernel. */ top_waiter = futex_top_waiter(hb1, key1); /* There are no waiters, nothing for us to do. */ if (!top_waiter) return 0; /* Ensure we requeue to the expected futex. */ if (!match_futex(top_waiter->requeue_pi_key, key2)) return -EINVAL; /* * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in * the contended case or if set_waiters is 1. The pi_state is returned * in ps in contended cases. */ vpid = task_pid_vnr(top_waiter->task); ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, set_waiters); if (ret == 1) { requeue_pi_wake_futex(top_waiter, key2, hb2); return vpid; } return ret; } /** * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 * @uaddr1: source futex user address * @flags: futex flags (FLAGS_SHARED, etc.) * @uaddr2: target futex user address * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) * @nr_requeue: number of waiters to requeue (0-INT_MAX) * @cmpval: @uaddr1 expected value (or %NULL) * @requeue_pi: if we are attempting to requeue from a non-pi futex to a * pi futex (pi to pi requeue is not supported) * * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire * uaddr2 atomically on behalf of the top waiter. * * Return: * >=0 - on success, the number of tasks requeued or woken; * <0 - on error */ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; int drop_count = 0, task_count = 0, ret; struct futex_pi_state *pi_state = NULL; struct futex_hash_bucket *hb1, *hb2; struct futex_q *this, *next; if (requeue_pi) { /* * Requeue PI only works on two distinct uaddrs. This * check is only valid for private futexes. See below. */ if (uaddr1 == uaddr2) return -EINVAL; /* * requeue_pi requires a pi_state, try to allocate it now * without any locks in case it fails. */ if (refill_pi_state_cache()) return -ENOMEM; /* * requeue_pi must wake as many tasks as it can, up to nr_wake * + nr_requeue, since it acquires the rt_mutex prior to * returning to userspace, so as to not leave the rt_mutex with * waiters and no owner. However, second and third wake-ups * cannot be predicted as they involve race conditions with the * first wake and a fault while looking up the pi_state. Both * pthread_cond_signal() and pthread_cond_broadcast() should * use nr_wake=1. */ if (nr_wake != 1) return -EINVAL; } retry: if (pi_state != NULL) { /* * We will have to lookup the pi_state again, so free this one * to keep the accounting correct. */ free_pi_state(pi_state); pi_state = NULL; } ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); if (unlikely(ret != 0)) goto out; ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, requeue_pi ? VERIFY_WRITE : VERIFY_READ); if (unlikely(ret != 0)) goto out_put_key1; /* * The check above which compares uaddrs is not sufficient for * shared futexes. We need to compare the keys: */ if (requeue_pi && match_futex(&key1, &key2)) { ret = -EINVAL; goto out_put_keys; } hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); retry_private: hb_waiters_inc(hb2); double_lock_hb(hb1, hb2); if (likely(cmpval != NULL)) { u32 curval; ret = get_futex_value_locked(&curval, uaddr1); if (unlikely(ret)) { double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); ret = get_user(curval, uaddr1); if (ret) goto out_put_keys; if (!(flags & FLAGS_SHARED)) goto retry_private; put_futex_key(&key2); put_futex_key(&key1); goto retry; } if (curval != *cmpval) { ret = -EAGAIN; goto out_unlock; } } if (requeue_pi && (task_count - nr_wake < nr_requeue)) { /* * Attempt to acquire uaddr2 and wake the top waiter. If we * intend to requeue waiters, force setting the FUTEX_WAITERS * bit. We force this here where we are able to easily handle * faults rather in the requeue loop below. */ ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, &key2, &pi_state, nr_requeue); /* * At this point the top_waiter has either taken uaddr2 or is * waiting on it. If the former, then the pi_state will not * exist yet, look it up one more time to ensure we have a * reference to it. If the lock was taken, ret contains the * vpid of the top waiter task. */ if (ret > 0) { WARN_ON(pi_state); drop_count++; task_count++; /* * If we acquired the lock, then the user * space value of uaddr2 should be vpid. It * cannot be changed by the top waiter as it * is blocked on hb2 lock if it tries to do * so. If something fiddled with it behind our * back the pi state lookup might unearth * it. So we rather use the known value than * rereading and handing potential crap to * lookup_pi_state. */ ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL); } switch (ret) { case 0: break; case -EFAULT: double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); put_futex_key(&key2); put_futex_key(&key1); ret = fault_in_user_writeable(uaddr2); if (!ret) goto retry; goto out; case -EAGAIN: /* The owner was exiting, try again. */ double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); put_futex_key(&key2); put_futex_key(&key1); cond_resched(); goto retry; default: goto out_unlock; } } plist_for_each_entry_safe(this, next, &hb1->chain, list) { if (task_count - nr_wake >= nr_requeue) break; if (!match_futex(&this->key, &key1)) continue; /* * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always * be paired with each other and no other futex ops. * * We should never be requeueing a futex_q with a pi_state, * which is awaiting a futex_unlock_pi(). */ if ((requeue_pi && !this->rt_waiter) || (!requeue_pi && this->rt_waiter) || this->pi_state) { ret = -EINVAL; break; } /* * Wake nr_wake waiters. For requeue_pi, if we acquired the * lock, we already woke the top_waiter. If not, it will be * woken by futex_unlock_pi(). */ if (++task_count <= nr_wake && !requeue_pi) { wake_futex(this); continue; } /* Ensure we requeue to the expected futex for requeue_pi. */ if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) { ret = -EINVAL; break; } /* * Requeue nr_requeue waiters and possibly one more in the case * of requeue_pi if we couldn't acquire the lock atomically. */ if (requeue_pi) { /* Prepare the waiter to take the rt_mutex. */ atomic_inc(&pi_state->refcount); this->pi_state = pi_state; ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, this->rt_waiter, this->task, 1); if (ret == 1) { /* We got the lock. */ requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; } else if (ret) { /* -EDEADLK */ this->pi_state = NULL; free_pi_state(pi_state); goto out_unlock; } } requeue_futex(this, hb1, hb2, &key2); drop_count++; } out_unlock: double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); /* * drop_futex_key_refs() must be called outside the spinlocks. During * the requeue we moved futex_q's from the hash bucket at key1 to the * one at key2 and updated their key pointer. We no longer need to * hold the references to key1. */ while (--drop_count >= 0) drop_futex_key_refs(&key1); out_put_keys: put_futex_key(&key2); out_put_key1: put_futex_key(&key1); out: if (pi_state != NULL) free_pi_state(pi_state); return ret ? ret : task_count; } /* The key must be already stored in q->key. */ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) __acquires(&hb->lock) { struct futex_hash_bucket *hb; hb = hash_futex(&q->key); /* * Increment the counter before taking the lock so that * a potential waker won't miss a to-be-slept task that is * waiting for the spinlock. This is safe as all queue_lock() * users end up calling queue_me(). Similarly, for housekeeping, * decrement the counter at queue_unlock() when some error has * occurred and we don't end up adding the task to the list. */ hb_waiters_inc(hb); q->lock_ptr = &hb->lock; spin_lock(&hb->lock); /* implies MB (A) */ return hb; } static inline void queue_unlock(struct futex_hash_bucket *hb) __releases(&hb->lock) { spin_unlock(&hb->lock); hb_waiters_dec(hb); } /** * queue_me() - Enqueue the futex_q on the futex_hash_bucket * @q: The futex_q to enqueue * @hb: The destination hash bucket * * The hb->lock must be held by the caller, and is released here. A call to * queue_me() is typically paired with exactly one call to unqueue_me(). The * exceptions involve the PI related operations, which may use unqueue_me_pi() * or nothing if the unqueue is done as part of the wake process and the unqueue * state is implicit in the state of woken task (see futex_wait_requeue_pi() for * an example). */ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) __releases(&hb->lock) { int prio; /* * The priority used to register this element is * - either the real thread-priority for the real-time threads * (i.e. threads with a priority lower than MAX_RT_PRIO) * - or MAX_RT_PRIO for non-RT threads. * Thus, all RT-threads are woken first in priority order, and * the others are woken last, in FIFO order. */ prio = min(current->normal_prio, MAX_RT_PRIO); plist_node_init(&q->list, prio); plist_add(&q->list, &hb->chain); q->task = current; spin_unlock(&hb->lock); } /** * unqueue_me() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must * be paired with exactly one earlier call to queue_me(). * * Return: * 1 - if the futex_q was still queued (and we removed unqueued it); * 0 - if the futex_q was already removed by the waking thread */ static int unqueue_me(struct futex_q *q) { spinlock_t *lock_ptr; int ret = 0; /* In the common case we don't take the spinlock, which is nice. */ retry: lock_ptr = q->lock_ptr; barrier(); if (lock_ptr != NULL) { spin_lock(lock_ptr); /* * q->lock_ptr can change between reading it and * spin_lock(), causing us to take the wrong lock. This * corrects the race condition. * * Reasoning goes like this: if we have the wrong lock, * q->lock_ptr must have changed (maybe several times) * between reading it and the spin_lock(). It can * change again after the spin_lock() but only if it was * already changed before the spin_lock(). It cannot, * however, change back to the original value. Therefore * we can detect whether we acquired the correct lock. */ if (unlikely(lock_ptr != q->lock_ptr)) { spin_unlock(lock_ptr); goto retry; } __unqueue_futex(q); BUG_ON(q->pi_state); spin_unlock(lock_ptr); ret = 1; } drop_futex_key_refs(&q->key); return ret; } /* * PI futexes can not be requeued and must remove themself from the * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry * and dropped here. */ static void unqueue_me_pi(struct futex_q *q) __releases(q->lock_ptr) { __unqueue_futex(q); BUG_ON(!q->pi_state); free_pi_state(q->pi_state); q->pi_state = NULL; spin_unlock(q->lock_ptr); } /* * Fixup the pi_state owner with the new owner. * * Must be called with hash bucket lock held and mm->sem held for non * private futexes. */ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, struct task_struct *newowner) { u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; struct task_struct *oldowner = pi_state->owner; u32 uval, uninitialized_var(curval), newval; int ret; /* Owner died? */ if (!pi_state->owner) newtid |= FUTEX_OWNER_DIED; /* * We are here either because we stole the rtmutex from the * previous highest priority waiter or we are the highest priority * waiter but failed to get the rtmutex the first time. * We have to replace the newowner TID in the user space variable. * This must be atomic as we have to preserve the owner died bit here. * * Note: We write the user space value _before_ changing the pi_state * because we can fault here. Imagine swapped out pages or a fork * that marked all the anonymous memory readonly for cow. * * Modifying pi_state _before_ the user space value would * leave the pi_state in an inconsistent state when we fault * here, because we need to drop the hash bucket lock to * handle the fault. This might be observed in the PID check * in lookup_pi_state. */ retry: if (get_futex_value_locked(&uval, uaddr)) goto handle_fault; while (1) { newval = (uval & FUTEX_OWNER_DIED) | newtid; if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) goto handle_fault; if (curval == uval) break; uval = curval; } /* * We fixed up user space. Now we need to fix the pi_state * itself. */ if (pi_state->owner != NULL) { raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); } pi_state->owner = newowner; raw_spin_lock_irq(&newowner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &newowner->pi_state_list); raw_spin_unlock_irq(&newowner->pi_lock); return 0; /* * To handle the page fault we need to drop the hash bucket * lock here. That gives the other task (either the highest priority * waiter itself or the task which stole the rtmutex) the * chance to try the fixup of the pi_state. So once we are * back from handling the fault we need to check the pi_state * after reacquiring the hash bucket lock and before trying to * do another fixup. When the fixup has been done already we * simply return. */ handle_fault: spin_unlock(q->lock_ptr); ret = fault_in_user_writeable(uaddr); spin_lock(q->lock_ptr); /* * Check if someone else fixed it for us: */ if (pi_state->owner != oldowner) return 0; if (ret) return ret; goto retry; } static long futex_wait_restart(struct restart_block *restart); /** * fixup_owner() - Post lock pi_state and corner case management * @uaddr: user address of the futex * @q: futex_q (contains pi_state and access to the rt_mutex) * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) * * After attempting to lock an rt_mutex, this function is called to cleanup * the pi_state owner as well as handle race conditions that may allow us to * acquire the lock. Must be called with the hb lock held. * * Return: * 1 - success, lock taken; * 0 - success, lock not taken; * <0 - on error (-EFAULT) */ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) { struct task_struct *owner; int ret = 0; if (locked) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case: */ if (q->pi_state->owner != current) ret = fixup_pi_state_owner(uaddr, q, current); goto out; } /* * Catch the rare case, where the lock was released when we were on the * way back before we locked the hash bucket. */ if (q->pi_state->owner == current) { /* * Try to get the rt_mutex now. This might fail as some other * task acquired the rt_mutex after we removed ourself from the * rt_mutex waiters list. */ if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { locked = 1; goto out; } /* * pi_state is incorrect, some other task did a lock steal and * we returned due to timeout or signal without taking the * rt_mutex. Too late. */ raw_spin_lock(&q->pi_state->pi_mutex.wait_lock); owner = rt_mutex_owner(&q->pi_state->pi_mutex); if (!owner) owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock); ret = fixup_pi_state_owner(uaddr, q, owner); goto out; } /* * Paranoia check. If we did not take the lock, then we should not be * the owner of the rt_mutex. */ if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " "pi-state %p\n", ret, q->pi_state->pi_mutex.owner, q->pi_state->owner); out: return ret ? ret : locked; } /** * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal * @hb: the futex hash bucket, must be locked by the caller * @q: the futex_q to queue up on * @timeout: the prepared hrtimer_sleeper, or null for no timeout */ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, struct hrtimer_sleeper *timeout) { /* * The task state is guaranteed to be set before another task can * wake it. set_current_state() is implemented using set_mb() and * queue_me() calls spin_unlock() upon completion, both serializing * access to the hash list and forcing another memory barrier. */ set_current_state(TASK_INTERRUPTIBLE); queue_me(q, hb); /* Arm the timer */ if (timeout) { hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&timeout->timer)) timeout->task = NULL; } /* * If we have been removed from the hash list, then another task * has tried to wake us, and we can skip the call to schedule(). */ if (likely(!plist_node_empty(&q->list))) { /* * If the timer has already expired, current will already be * flagged for rescheduling. Only call schedule if there * is no timeout, or if it has yet to expire. */ if (!timeout || timeout->task) freezable_schedule(); } __set_current_state(TASK_RUNNING); } /** * futex_wait_setup() - Prepare to wait on a futex * @uaddr: the futex userspace address * @val: the expected value * @flags: futex flags (FLAGS_SHARED, etc.) * @q: the associated futex_q * @hb: storage for hash_bucket pointer to be returned to caller * * Setup the futex_q and locate the hash_bucket. Get the futex value and * compare it with the expected value. Handle atomic faults internally. * Return with the hb lock held and a q.key reference on success, and unlocked * with no q.key reference on failure. * * Return: * 0 - uaddr contains val and hb has been locked; * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked */ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, struct futex_q *q, struct futex_hash_bucket **hb) { u32 uval; int ret; /* * Access the page AFTER the hash-bucket is locked. * Order is important: * * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } * * The basic logical guarantee of a futex is that it blocks ONLY * if cond(var) is known to be true at the time of blocking, for * any cond. If we locked the hash-bucket after testing *uaddr, that * would open a race condition where we could block indefinitely with * cond(var) false, which would violate the guarantee. * * On the other hand, we insert q and release the hash-bucket only * after testing *uaddr. This guarantees that futex_wait() will NOT * absorb a wakeup if *uaddr does not match the desired values * while the syscall executes. */ retry: ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ); if (unlikely(ret != 0)) return ret; retry_private: *hb = queue_lock(q); ret = get_futex_value_locked(&uval, uaddr); if (ret) { queue_unlock(*hb); ret = get_user(uval, uaddr); if (ret) goto out; if (!(flags & FLAGS_SHARED)) goto retry_private; put_futex_key(&q->key); goto retry; } if (uval != val) { queue_unlock(*hb); ret = -EWOULDBLOCK; } out: if (ret) put_futex_key(&q->key); return ret; } static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset) { struct hrtimer_sleeper timeout, *to = NULL; struct restart_block *restart; struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; int ret; if (!bitset) return -EINVAL; q.bitset = bitset; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } retry: /* * Prepare to wait on uaddr. On success, holds hb lock and increments * q.key refs. */ ret = futex_wait_setup(uaddr, val, flags, &q, &hb); if (ret) goto out; /* queue_me and wait for wakeup, timeout, or a signal. */ futex_wait_queue_me(hb, &q, to); /* If we were woken (and unqueued), we succeeded, whatever. */ ret = 0; /* unqueue_me() drops q.key ref */ if (!unqueue_me(&q)) goto out; ret = -ETIMEDOUT; if (to && !to->task) goto out; /* * We expect signal_pending(current), but we might be the * victim of a spurious wakeup as well. */ if (!signal_pending(current)) goto retry; ret = -ERESTARTSYS; if (!abs_time) goto out; restart = &current_thread_info()->restart_block; restart->fn = futex_wait_restart; restart->futex.uaddr = uaddr; restart->futex.val = val; restart->futex.time = abs_time->tv64; restart->futex.bitset = bitset; restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; ret = -ERESTART_RESTARTBLOCK; out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; } static long futex_wait_restart(struct restart_block *restart) { u32 __user *uaddr = restart->futex.uaddr; ktime_t t, *tp = NULL; if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { t.tv64 = restart->futex.time; tp = &t; } restart->fn = do_no_restart_syscall; return (long)futex_wait(uaddr, restart->futex.flags, restart->futex.val, tp, restart->futex.bitset); } /* * Userspace tried a 0 -> TID atomic transition of the futex value * and failed. The kernel side here does the whole locking operation: * if there are waiters then it will block, it does PI, etc. (Due to * races the kernel might see a 0 value of the futex too.) */ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to = NULL; struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; int res, ret; if (refill_pi_state_cache()) return -ENOMEM; if (time) { to = &timeout; hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires(&to->timer, *time); } retry: ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE); if (unlikely(ret != 0)) goto out; retry_private: hb = queue_lock(&q); ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); if (unlikely(ret)) { switch (ret) { case 1: /* We got the lock. */ ret = 0; goto out_unlock_put_key; case -EFAULT: goto uaddr_faulted; case -EAGAIN: /* * Task is exiting and we just wait for the * exit to complete. */ queue_unlock(hb); put_futex_key(&q.key); cond_resched(); goto retry; default: goto out_unlock_put_key; } } /* * Only actually queue now that the atomic ops are done: */ queue_me(&q, hb); WARN_ON(!q.pi_state); /* * Block on the PI mutex: */ if (!trylock) ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1); else { ret = rt_mutex_trylock(&q.pi_state->pi_mutex); /* Fixup the trylock return value: */ ret = ret ? 0 : -EWOULDBLOCK; } spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_owner(uaddr, &q, !ret); /* * If fixup_owner() returned an error, proprogate that. If it acquired * the lock, clear our -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; /* * If fixup_owner() faulted and was unable to handle the fault, unlock * it and return the fault to userspace. */ if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) rt_mutex_unlock(&q.pi_state->pi_mutex); /* Unqueue and drop the lock */ unqueue_me_pi(&q); goto out_put_key; out_unlock_put_key: queue_unlock(hb); out_put_key: put_futex_key(&q.key); out: if (to) destroy_hrtimer_on_stack(&to->timer); return ret != -EINTR ? ret : -ERESTARTNOINTR; uaddr_faulted: queue_unlock(hb); ret = fault_in_user_writeable(uaddr); if (ret) goto out_put_key; if (!(flags & FLAGS_SHARED)) goto retry_private; put_futex_key(&q.key); goto retry; } /* * Userspace attempted a TID -> 0 atomic transition, and failed. * This is the in-kernel slowpath: we look up the PI state (if any), * and do the rt-mutex unlock. */ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) { struct futex_hash_bucket *hb; struct futex_q *this, *next; union futex_key key = FUTEX_KEY_INIT; u32 uval, vpid = task_pid_vnr(current); int ret; retry: if (get_user(uval, uaddr)) return -EFAULT; /* * We release only a lock we actually own: */ if ((uval & FUTEX_TID_MASK) != vpid) return -EPERM; ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE); if (unlikely(ret != 0)) goto out; hb = hash_futex(&key); spin_lock(&hb->lock); /* * To avoid races, try to do the TID -> 0 atomic transition * again. If it succeeds then we can return without waking * anyone else up: */ if (!(uval & FUTEX_OWNER_DIED) && cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) goto pi_faulted; /* * Rare case: we managed to release the lock atomically, * no need to wake anyone else up: */ if (unlikely(uval == vpid)) goto out_unlock; /* * Ok, other tasks may need to be woken up - check waiters * and do the wakeup if necessary: */ plist_for_each_entry_safe(this, next, &hb->chain, list) { if (!match_futex (&this->key, &key)) continue; ret = wake_futex_pi(uaddr, uval, this); /* * The atomic access to the futex value * generated a pagefault, so retry the * user-access and the wakeup: */ if (ret == -EFAULT) goto pi_faulted; goto out_unlock; } /* * No waiters - kernel unlocks the futex: */ if (!(uval & FUTEX_OWNER_DIED)) { ret = unlock_futex_pi(uaddr, uval); if (ret == -EFAULT) goto pi_faulted; } out_unlock: spin_unlock(&hb->lock); put_futex_key(&key); out: return ret; pi_faulted: spin_unlock(&hb->lock); put_futex_key(&key); ret = fault_in_user_writeable(uaddr); if (!ret) goto retry; return ret; } /** * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex * @hb: the hash_bucket futex_q was original enqueued on * @q: the futex_q woken while waiting to be requeued * @key2: the futex_key of the requeue target futex * @timeout: the timeout associated with the wait (NULL if none) * * Detect if the task was woken on the initial futex as opposed to the requeue * target futex. If so, determine if it was a timeout or a signal that caused * the wakeup and return the appropriate error code to the caller. Must be * called with the hb lock held. * * Return: * 0 = no early wakeup detected; * <0 = -ETIMEDOUT or -ERESTARTNOINTR */ static inline int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, struct futex_q *q, union futex_key *key2, struct hrtimer_sleeper *timeout) { int ret = 0; /* * With the hb lock held, we avoid races while we process the wakeup. * We only need to hold hb (and not hb2) to ensure atomicity as the * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. * It can't be requeued from uaddr2 to something else since we don't * support a PI aware source futex for requeue. */ if (!match_futex(&q->key, key2)) { WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr)); /* * We were woken prior to requeue by a timeout or a signal. * Unqueue the futex_q and determine which it was. */ plist_del(&q->list, &hb->chain); hb_waiters_dec(hb); /* Handle spurious wakeups gracefully */ ret = -EWOULDBLOCK; if (timeout && !timeout->task) ret = -ETIMEDOUT; else if (signal_pending(current)) ret = -ERESTARTNOINTR; } return ret; } /** * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 * @uaddr: the futex we initially wait on (non-pi) * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be * the same type, no requeueing from private to shared, etc. * @val: the expected value of uaddr * @abs_time: absolute timeout * @bitset: 32 bit wakeup bitset set by userspace, defaults to all * @uaddr2: the pi futex we will take prior to returning to user-space * * The caller will wait on uaddr and will be requeued by futex_requeue() to * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to * userspace. This ensures the rt_mutex maintains an owner when it has waiters; * without one, the pi logic would not know which task to boost/deboost, if * there was a need to. * * We call schedule in futex_wait_queue_me() when we enqueue and return there * via the following-- * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() * 2) wakeup on uaddr2 after a requeue * 3) signal * 4) timeout * * If 3, cleanup and return -ERESTARTNOINTR. * * If 2, we may then block on trying to take the rt_mutex and return via: * 5) successful lock * 6) signal * 7) timeout * 8) other lock acquisition failure * * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). * * If 4 or 7, we cleanup and return with -ETIMEDOUT. * * Return: * 0 - On success; * <0 - On error */ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset, u32 __user *uaddr2) { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; struct futex_hash_bucket *hb; union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; if (uaddr == uaddr2) return -EINVAL; if (!bitset) return -EINVAL; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } /* * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ debug_rt_mutex_init_waiter(&rt_waiter); RB_CLEAR_NODE(&rt_waiter.pi_tree_entry); RB_CLEAR_NODE(&rt_waiter.tree_entry); rt_waiter.task = NULL; ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) goto out; q.bitset = bitset; q.rt_waiter = &rt_waiter; q.requeue_pi_key = &key2; /* * Prepare to wait on uaddr. On success, increments q.key (key1) ref * count. */ ret = futex_wait_setup(uaddr, val, flags, &q, &hb); if (ret) goto out_key2; /* * The check above which compares uaddrs is not sufficient for * shared futexes. We need to compare the keys: */ if (match_futex(&q.key, &key2)) { ret = -EINVAL; goto out_put_keys; } /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); spin_lock(&hb->lock); ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); spin_unlock(&hb->lock); if (ret) goto out_put_keys; /* * In order for us to be here, we know our q.key == key2, and since * we took the hb->lock above, we also know that futex_requeue() has * completed and we no longer have to concern ourselves with a wakeup * race with the atomic proxy lock acquisition by the requeue code. The * futex_requeue dropped our key1 reference and incremented our key2 * reference count. */ /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); spin_unlock(q.lock_ptr); } } else { /* * We have been woken up by futex_unlock_pi(), a timeout, or a * signal. futex_unlock_pi() will not destroy the lock_ptr nor * the pi_state. */ WARN_ON(!q.pi_state); pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_owner(uaddr2, &q, !ret); /* * If fixup_owner() returned an error, proprogate that. If it * acquired the lock, clear -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } /* * If fixup_pi_state_owner() faulted and was unable to handle the * fault, unlock the rt_mutex and return the fault to userspace. */ if (ret == -EFAULT) { if (pi_mutex && rt_mutex_owner(pi_mutex) == current) rt_mutex_unlock(pi_mutex); } else if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling * futex_lock_pi() directly. We could restart this syscall, but * it would detect that the user space "val" changed and return * -EWOULDBLOCK. Save the overhead of the restart and return * -EWOULDBLOCK directly. */ ret = -EWOULDBLOCK; } out_put_keys: put_futex_key(&q.key); out_key2: put_futex_key(&key2); out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; } /* * Support for robust futexes: the kernel cleans up held futexes at * thread exit time. * * Implementation: user-space maintains a per-thread list of locks it * is holding. Upon do_exit(), the kernel carefully walks this list, * and marks all locks that are owned by this thread with the * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is * always manipulated with the lock held, so the list is private and * per-thread. Userspace also maintains a per-thread 'list_op_pending' * field, to allow the kernel to clean up if the thread dies after * acquiring the lock, but just before it could have added itself to * the list. There can only be one such pending lock. */ /** * sys_set_robust_list() - Set the robust-futex list head of a task * @head: pointer to the list-head * @len: length of the list-head, as userspace expects */ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, size_t, len) { if (!futex_cmpxchg_enabled) return -ENOSYS; /* * The kernel knows only one size for now: */ if (unlikely(len != sizeof(*head))) return -EINVAL; current->robust_list = head; return 0; } /** * sys_get_robust_list() - Get the robust-futex list head of a task * @pid: pid of the process [zero for current task] * @head_ptr: pointer to a list-head pointer, the kernel fills it in * @len_ptr: pointer to a length field, the kernel fills in the header size */ SYSCALL_DEFINE3(get_robust_list, int, pid, struct robust_list_head __user * __user *, head_ptr, size_t __user *, len_ptr) { struct robust_list_head __user *head; unsigned long ret; struct task_struct *p; if (!futex_cmpxchg_enabled) return -ENOSYS; rcu_read_lock(); ret = -ESRCH; if (!pid) p = current; else { p = find_task_by_vpid(pid); if (!p) goto err_unlock; } ret = -EPERM; if (!ptrace_may_access(p, PTRACE_MODE_READ)) goto err_unlock; head = p->robust_list; rcu_read_unlock(); if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(head, head_ptr); err_unlock: rcu_read_unlock(); return ret; } /* * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so: */ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) { u32 uval, uninitialized_var(nval), mval; retry: if (get_user(uval, uaddr)) return -1; if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { /* * Ok, this dying thread is truly holding a futex * of interest. Set the OWNER_DIED bit atomically * via cmpxchg, and if the value had FUTEX_WAITERS * set, wake up a waiter (if any). (We have to do a * futex_wake() even if OWNER_DIED is already set - * to handle the rare but possible case of recursive * thread-death.) The rest of the cleanup is done in * userspace. */ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; /* * We are not holding a lock here, but we want to have * the pagefault_disable/enable() protection because * we want to handle the fault gracefully. If the * access fails we try to fault in the futex with R/W * verification via get_user_pages. get_user() above * does not guarantee R/W access. If that fails we * give up and leave the futex locked. */ if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) { if (fault_in_user_writeable(uaddr)) return -1; goto retry; } if (nval != uval) goto retry; /* * Wake robust non-PI futexes here. The wakeup of * PI futexes happens in exit_pi_state(): */ if (!pi && (uval & FUTEX_WAITERS)) futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); } return 0; } /* * Fetch a robust-list pointer. Bit 0 signals PI futexes: */ static inline int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, unsigned int *pi) { unsigned long uentry; if (get_user(uentry, (unsigned long __user *)head)) return -EFAULT; *entry = (void __user *)(uentry & ~1UL); *pi = uentry & 1; return 0; } /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned int uninitialized_var(next_pi); unsigned long futex_offset; int rc; if (!futex_cmpxchg_enabled) return; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ if (fetch_robust_entry(&entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset: */ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists: */ if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) return; next_entry = NULL; /* avoid warning with gcc */ while (entry != &head->list) { /* * Fetch the next entry in the list before calling * handle_futex_death: */ rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); /* * A pending lock might already be on the list, so * don't process it twice: */ if (entry != pending) if (handle_futex_death((void __user *)entry + futex_offset, curr, pi)) return; if (rc) return; entry = next_entry; pi = next_pi; /* * Avoid excessively long or circular lists: */ if (!--limit) break; cond_resched(); } if (pending) handle_futex_death((void __user *)pending + futex_offset, curr, pip); } long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { int cmd = op & FUTEX_CMD_MASK; unsigned int flags = 0; if (!(op & FUTEX_PRIVATE_FLAG)) flags |= FLAGS_SHARED; if (op & FUTEX_CLOCK_REALTIME) { flags |= FLAGS_CLOCKRT; if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) return -ENOSYS; } switch (cmd) { case FUTEX_LOCK_PI: case FUTEX_UNLOCK_PI: case FUTEX_TRYLOCK_PI: case FUTEX_WAIT_REQUEUE_PI: case FUTEX_CMP_REQUEUE_PI: if (!futex_cmpxchg_enabled) return -ENOSYS; } switch (cmd) { case FUTEX_WAIT: val3 = FUTEX_BITSET_MATCH_ANY; case FUTEX_WAIT_BITSET: return futex_wait(uaddr, flags, val, timeout, val3); case FUTEX_WAKE: val3 = FUTEX_BITSET_MATCH_ANY; case FUTEX_WAKE_BITSET: return futex_wake(uaddr, flags, val, val3); case FUTEX_REQUEUE: return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0); case FUTEX_CMP_REQUEUE: return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0); case FUTEX_WAKE_OP: return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); case FUTEX_LOCK_PI: return futex_lock_pi(uaddr, flags, val, timeout, 0); case FUTEX_UNLOCK_PI: return futex_unlock_pi(uaddr, flags); case FUTEX_TRYLOCK_PI: return futex_lock_pi(uaddr, flags, 0, timeout, 1); case FUTEX_WAIT_REQUEUE_PI: val3 = FUTEX_BITSET_MATCH_ANY; return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, uaddr2); case FUTEX_CMP_REQUEUE_PI: return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1); } return -ENOSYS; } SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, struct timespec __user *, utime, u32 __user *, uaddr2, u32, val3) { struct timespec ts; ktime_t t, *tp = NULL; u32 val2 = 0; int cmd = op & FUTEX_CMD_MASK; if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || cmd == FUTEX_WAIT_BITSET || cmd == FUTEX_WAIT_REQUEUE_PI)) { if (copy_from_user(&ts, utime, sizeof(ts)) != 0) return -EFAULT; if (!timespec_valid(&ts)) return -EINVAL; t = timespec_to_ktime(ts); if (cmd == FUTEX_WAIT) t = ktime_add_safe(ktime_get(), t); tp = &t; } /* * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*. * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. */ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) val2 = (u32) (unsigned long) utime; return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } static void __init futex_detect_cmpxchg(void) { #ifndef CONFIG_HAVE_FUTEX_CMPXCHG u32 curval; /* * This will fail and we want it. Some arch implementations do * runtime detection of the futex_atomic_cmpxchg_inatomic() * functionality. We want to know that before we call in any * of the complex code paths. Also we want to prevent * registration of robust lists in that case. NULL is * guaranteed to fault and we get -EFAULT on functional * implementation, the non-functional ones will return * -ENOSYS. */ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) futex_cmpxchg_enabled = 1; #endif } static int __init futex_init(void) { unsigned int futex_shift; unsigned long i; #if CONFIG_BASE_SMALL futex_hashsize = 16; #else futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus()); #endif futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues), futex_hashsize, 0, futex_hashsize < 256 ? HASH_SMALL : 0, &futex_shift, NULL, futex_hashsize, futex_hashsize); futex_hashsize = 1UL << futex_shift; futex_detect_cmpxchg(); for (i = 0; i < futex_hashsize; i++) { atomic_set(&futex_queues[i].waiters, 0); plist_head_init(&futex_queues[i].chain); spin_lock_init(&futex_queues[i].lock); } return 0; } __initcall(futex_init);
./CrossVul/dataset_final_sorted/CWE-264/c/good_2126_0
crossvul-cpp_data_good_2123_0
/* * linux/mm/mlock.c * * (C) Copyright 1995 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig */ #include <linux/capability.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/mempolicy.h> #include <linux/syscalls.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/rmap.h> #include <linux/mmzone.h> #include <linux/hugetlb.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include "internal.h" int can_do_mlock(void) { if (capable(CAP_IPC_LOCK)) return 1; if (rlimit(RLIMIT_MEMLOCK) != 0) return 1; return 0; } EXPORT_SYMBOL(can_do_mlock); /* * Mlocked pages are marked with PageMlocked() flag for efficient testing * in vmscan and, possibly, the fault path; and to support semi-accurate * statistics. * * An mlocked page [PageMlocked(page)] is unevictable. As such, it will * be placed on the LRU "unevictable" list, rather than the [in]active lists. * The unevictable list is an LRU sibling list to the [in]active lists. * PageUnevictable is set to indicate the unevictable state. * * When lazy mlocking via vmscan, it is important to ensure that the * vma's VM_LOCKED status is not concurrently being modified, otherwise we * may have mlocked a page that is being munlocked. So lazy mlock must take * the mmap_sem for read, and verify that the vma really is locked * (see mm/rmap.c). */ /* * LRU accounting for clear_page_mlock() */ void clear_page_mlock(struct page *page) { if (!TestClearPageMlocked(page)) return; mod_zone_page_state(page_zone(page), NR_MLOCK, -hpage_nr_pages(page)); count_vm_event(UNEVICTABLE_PGCLEARED); if (!isolate_lru_page(page)) { putback_lru_page(page); } else { /* * We lost the race. the page already moved to evictable list. */ if (PageUnevictable(page)) count_vm_event(UNEVICTABLE_PGSTRANDED); } } /* * Mark page as mlocked if not already. * If page on LRU, isolate and putback to move to unevictable list. */ void mlock_vma_page(struct page *page) { /* Serialize with page migration */ BUG_ON(!PageLocked(page)); if (!TestSetPageMlocked(page)) { mod_zone_page_state(page_zone(page), NR_MLOCK, hpage_nr_pages(page)); count_vm_event(UNEVICTABLE_PGMLOCKED); if (!isolate_lru_page(page)) putback_lru_page(page); } } /* * Isolate a page from LRU with optional get_page() pin. * Assumes lru_lock already held and page already pinned. */ static bool __munlock_isolate_lru_page(struct page *page, bool getpage) { if (PageLRU(page)) { struct lruvec *lruvec; lruvec = mem_cgroup_page_lruvec(page, page_zone(page)); if (getpage) get_page(page); ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_lru(page)); return true; } return false; } /* * Finish munlock after successful page isolation * * Page must be locked. This is a wrapper for try_to_munlock() * and putback_lru_page() with munlock accounting. */ static void __munlock_isolated_page(struct page *page) { int ret = SWAP_AGAIN; /* * Optimization: if the page was mapped just once, that's our mapping * and we don't need to check all the other vmas. */ if (page_mapcount(page) > 1) ret = try_to_munlock(page); /* Did try_to_unlock() succeed or punt? */ if (ret != SWAP_MLOCK) count_vm_event(UNEVICTABLE_PGMUNLOCKED); putback_lru_page(page); } /* * Accounting for page isolation fail during munlock * * Performs accounting when page isolation fails in munlock. There is nothing * else to do because it means some other task has already removed the page * from the LRU. putback_lru_page() will take care of removing the page from * the unevictable list, if necessary. vmscan [page_referenced()] will move * the page back to the unevictable list if some other vma has it mlocked. */ static void __munlock_isolation_failed(struct page *page) { if (PageUnevictable(page)) __count_vm_event(UNEVICTABLE_PGSTRANDED); else __count_vm_event(UNEVICTABLE_PGMUNLOCKED); } /** * munlock_vma_page - munlock a vma page * @page - page to be unlocked, either a normal page or THP page head * * returns the size of the page as a page mask (0 for normal page, * HPAGE_PMD_NR - 1 for THP head page) * * called from munlock()/munmap() path with page supposedly on the LRU. * When we munlock a page, because the vma where we found the page is being * munlock()ed or munmap()ed, we want to check whether other vmas hold the * page locked so that we can leave it on the unevictable lru list and not * bother vmscan with it. However, to walk the page's rmap list in * try_to_munlock() we must isolate the page from the LRU. If some other * task has removed the page from the LRU, we won't be able to do that. * So we clear the PageMlocked as we might not get another chance. If we * can't isolate the page, we leave it for putback_lru_page() and vmscan * [page_referenced()/try_to_unmap()] to deal with. */ unsigned int munlock_vma_page(struct page *page) { unsigned int nr_pages; struct zone *zone = page_zone(page); /* For try_to_munlock() and to serialize with page migration */ BUG_ON(!PageLocked(page)); /* * Serialize with any parallel __split_huge_page_refcount() which * might otherwise copy PageMlocked to part of the tail pages before * we clear it in the head page. It also stabilizes hpage_nr_pages(). */ spin_lock_irq(&zone->lru_lock); nr_pages = hpage_nr_pages(page); if (!TestClearPageMlocked(page)) goto unlock_out; __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); if (__munlock_isolate_lru_page(page, true)) { spin_unlock_irq(&zone->lru_lock); __munlock_isolated_page(page); goto out; } __munlock_isolation_failed(page); unlock_out: spin_unlock_irq(&zone->lru_lock); out: return nr_pages - 1; } /** * __mlock_vma_pages_range() - mlock a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * * This takes care of making the pages present too. * * return 0 on success, negative error code on error. * * vma->vm_mm->mmap_sem must be held for at least read. */ long __mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON(start < vma->vm_start); VM_BUG_ON(end > vma->vm_end); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); gup_flags = FOLL_TOUCH | FOLL_MLOCK; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); } /* * convert get_user_pages() return value to posix mlock() error */ static int __mlock_posix_error_return(long retval) { if (retval == -EFAULT) retval = -ENOMEM; else if (retval == -ENOMEM) retval = -EAGAIN; return retval; } /* * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec() * * The fast path is available only for evictable pages with single mapping. * Then we can bypass the per-cpu pvec and get better performance. * when mapcount > 1 we need try_to_munlock() which can fail. * when !page_evictable(), we need the full redo logic of putback_lru_page to * avoid leaving evictable page in unevictable list. * * In case of success, @page is added to @pvec and @pgrescued is incremented * in case that the page was previously unevictable. @page is also unlocked. */ static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, int *pgrescued) { VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); if (page_mapcount(page) <= 1 && page_evictable(page)) { pagevec_add(pvec, page); if (TestClearPageUnevictable(page)) (*pgrescued)++; unlock_page(page); return true; } return false; } /* * Putback multiple evictable pages to the LRU * * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of * the pages might have meanwhile become unevictable but that is OK. */ static void __putback_lru_fast(struct pagevec *pvec, int pgrescued) { count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); /* *__pagevec_lru_add() calls release_pages() so we don't call * put_page() explicitly */ __pagevec_lru_add(pvec); count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); } /* * Munlock a batch of pages from the same zone * * The work is split to two main phases. First phase clears the Mlocked flag * and attempts to isolate the pages, all under a single zone lru lock. * The second phase finishes the munlock only for pages where isolation * succeeded. * * Note that the pagevec may be modified during the process. */ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) { int i; int nr = pagevec_count(pvec); int delta_munlocked; struct pagevec pvec_putback; int pgrescued = 0; pagevec_init(&pvec_putback, 0); /* Phase 1: page isolation */ spin_lock_irq(&zone->lru_lock); for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (TestClearPageMlocked(page)) { /* * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ if (__munlock_isolate_lru_page(page, false)) continue; else __munlock_isolation_failed(page); } /* * We won't be munlocking this page in the next phase * but we still need to release the follow_page_mask() * pin. We cannot do it under lru_lock however. If it's * the last pin, __page_cache_release() would deadlock. */ pagevec_add(&pvec_putback, pvec->pages[i]); pvec->pages[i] = NULL; } delta_munlocked = -nr + pagevec_count(&pvec_putback); __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); spin_unlock_irq(&zone->lru_lock); /* Now we can release pins of pages that we are not munlocking */ pagevec_release(&pvec_putback); /* Phase 2: page munlock */ for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (page) { lock_page(page); if (!__putback_lru_fast_prepare(page, &pvec_putback, &pgrescued)) { /* * Slow path. We don't want to lose the last * pin before unlock_page() */ get_page(page); /* for putback_lru_page() */ __munlock_isolated_page(page); unlock_page(page); put_page(page); /* from follow_page_mask() */ } } } /* * Phase 3: page putback for pages that qualified for the fast path * This will also call put_page() to return pin from follow_page_mask() */ if (pagevec_count(&pvec_putback)) __putback_lru_fast(&pvec_putback, pgrescued); } /* * Fill up pagevec for __munlock_pagevec using pte walk * * The function expects that the struct page corresponding to @start address is * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone. * * The rest of @pvec is filled by subsequent pages within the same pmd and same * zone, as long as the pte's are present and vm_normal_page() succeeds. These * pages also get pinned. * * Returns the address of the next page that should be scanned. This equals * @start + PAGE_SIZE when no page could be added by the pte walk. */ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, struct vm_area_struct *vma, int zoneid, unsigned long start, unsigned long end) { pte_t *pte; spinlock_t *ptl; /* * Initialize pte walk starting at the already pinned page where we * are sure that there is a pte, as it was pinned under the same * mmap_sem write op. */ pte = get_locked_pte(vma->vm_mm, start, &ptl); /* Make sure we do not cross the page table boundary */ end = pgd_addr_end(start, end); end = pud_addr_end(start, end); end = pmd_addr_end(start, end); /* The page next to the pinned page is the first we will try to get */ start += PAGE_SIZE; while (start < end) { struct page *page = NULL; pte++; if (pte_present(*pte)) page = vm_normal_page(vma, start, *pte); /* * Break if page could not be obtained or the page's node+zone does not * match */ if (!page || page_zone_id(page) != zoneid) break; get_page(page); /* * Increase the address that will be returned *before* the * eventual break due to pvec becoming full by adding the page */ start += PAGE_SIZE; if (pagevec_add(pvec, page) == 0) break; } pte_unmap_unlock(pte, ptl); return start; } /* * munlock_vma_pages_range() - munlock all pages in the vma range.' * @vma - vma containing range to be munlock()ed. * @start - start address in @vma of the range * @end - end of range in @vma. * * For mremap(), munmap() and exit(). * * Called with @vma VM_LOCKED. * * Returns with VM_LOCKED cleared. Callers must be prepared to * deal with this. * * We don't save and restore VM_LOCKED here because pages are * still on lru. In unmap path, pages might be scanned by reclaim * and re-mlocked by try_to_{munlock|unmap} before we unmap and * free them. This will result in freeing mlocked pages. */ void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { vma->vm_flags &= ~VM_LOCKED; while (start < end) { struct page *page = NULL; unsigned int page_mask; unsigned long page_increm; struct pagevec pvec; struct zone *zone; int zoneid; pagevec_init(&pvec, 0); /* * Although FOLL_DUMP is intended for get_dump_page(), * it just so happens that its special treatment of the * ZERO_PAGE (returning an error instead of doing get_page) * suits munlock very well (and if somehow an abnormal page * has sneaked into the range, we won't oops here: great). */ page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, &page_mask); if (page && !IS_ERR(page)) { if (PageTransHuge(page)) { lock_page(page); /* * Any THP page found by follow_page_mask() may * have gotten split before reaching * munlock_vma_page(), so we need to recompute * the page_mask here. */ page_mask = munlock_vma_page(page); unlock_page(page); put_page(page); /* follow_page_mask() */ } else { /* * Non-huge pages are handled in batches via * pagevec. The pin from follow_page_mask() * prevents them from collapsing by THP. */ pagevec_add(&pvec, page); zone = page_zone(page); zoneid = page_zone_id(page); /* * Try to fill the rest of pagevec using fast * pte walk. This will also update start to * the next page to process. Then munlock the * pagevec. */ start = __munlock_pagevec_fill(&pvec, vma, zoneid, start, end); __munlock_pagevec(&pvec, zone); goto next; } } /* It's a bug to munlock in the middle of a THP page */ VM_BUG_ON((start >> PAGE_SHIFT) & page_mask); page_increm = 1 + page_mask; start += page_increm * PAGE_SIZE; next: cond_resched(); } } /* * mlock_fixup - handle mlock[all]/munlock[all] requests. * * Filters out "special" vmas -- VM_LOCKED never gets set for these, and * munlock is a no-op. However, for some special vmas, we go ahead and * populate the ptes. * * For vmas that pass the filters, merge/split as appropriate. */ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) { struct mm_struct *mm = vma->vm_mm; pgoff_t pgoff; int nr_pages; int ret = 0; int lock = !!(newflags & VM_LOCKED); if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) goto out; /* don't set VM_LOCKED, don't count */ pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); if (*prev) { vma = *prev; goto success; } if (start != vma->vm_start) { ret = split_vma(mm, vma, start, 1); if (ret) goto out; } if (end != vma->vm_end) { ret = split_vma(mm, vma, end, 0); if (ret) goto out; } success: /* * Keep track of amount of locked VM. */ nr_pages = (end - start) >> PAGE_SHIFT; if (!lock) nr_pages = -nr_pages; mm->locked_vm += nr_pages; /* * vm_flags is protected by the mmap_sem held in write mode. * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, __mlock_vma_pages_range will bring it back. */ if (lock) vma->vm_flags = newflags; else munlock_vma_pages_range(vma, start, end); out: *prev = vma; return ret; } static int do_mlock(unsigned long start, size_t len, int on) { unsigned long nstart, end, tmp; struct vm_area_struct * vma, * prev; int error; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; vma = find_vma(current->mm, start); if (!vma || vma->vm_start > start) return -ENOMEM; prev = vma->vm_prev; if (start > vma->vm_start) prev = vma; for (nstart = start ; ; ) { vm_flags_t newflags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ newflags = vma->vm_flags & ~VM_LOCKED; if (on) newflags |= VM_LOCKED; tmp = vma->vm_end; if (tmp > end) tmp = end; error = mlock_fixup(vma, &prev, nstart, tmp, newflags); if (error) break; nstart = tmp; if (nstart < prev->vm_end) nstart = prev->vm_end; if (nstart >= end) break; vma = prev->vm_next; if (!vma || vma->vm_start != nstart) { error = -ENOMEM; break; } } return error; } /* * __mm_populate - populate and/or mlock pages within a range of address space. * * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap * flags. VMAs must be already marked with the desired vm_flags, and * mmap_sem must not be held. */ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; down_read(&mm->mmap_sem); vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; if (!vma || vma->vm_start >= end) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. __mlock_vma_pages_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = __mlock_vma_pages_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } ret = __mlock_posix_error_return(ret); break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ } SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) { unsigned long locked; unsigned long lock_limit; int error = -ENOMEM; if (!can_do_mlock()) return -EPERM; lru_add_drain_all(); /* flush pagevec */ len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; locked = len >> PAGE_SHIFT; down_write(&current->mm->mmap_sem); locked += current->mm->locked_vm; /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = do_mlock(start, len, 1); up_write(&current->mm->mmap_sem); if (!error) error = __mm_populate(start, len, 0); return error; } SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) { int ret; len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; down_write(&current->mm->mmap_sem); ret = do_mlock(start, len, 0); up_write(&current->mm->mmap_sem); return ret; } static int do_mlockall(int flags) { struct vm_area_struct * vma, * prev = NULL; if (flags & MCL_FUTURE) current->mm->def_flags |= VM_LOCKED; else current->mm->def_flags &= ~VM_LOCKED; if (flags == MCL_FUTURE) goto out; for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { vm_flags_t newflags; newflags = vma->vm_flags & ~VM_LOCKED; if (flags & MCL_CURRENT) newflags |= VM_LOCKED; /* Ignore errors */ mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); cond_resched(); } out: return 0; } SYSCALL_DEFINE1(mlockall, int, flags) { unsigned long lock_limit; int ret = -EINVAL; if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) goto out; ret = -EPERM; if (!can_do_mlock()) goto out; if (flags & MCL_CURRENT) lru_add_drain_all(); /* flush pagevec */ lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; down_write(&current->mm->mmap_sem); if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); up_write(&current->mm->mmap_sem); if (!ret && (flags & MCL_CURRENT)) mm_populate(0, TASK_SIZE); out: return ret; } SYSCALL_DEFINE0(munlockall) { int ret; down_write(&current->mm->mmap_sem); ret = do_mlockall(0); up_write(&current->mm->mmap_sem); return ret; } /* * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB * shm segments) get accounted against the user_struct instead. */ static DEFINE_SPINLOCK(shmlock_user_lock); int user_shm_lock(size_t size, struct user_struct *user) { unsigned long lock_limit, locked; int allowed = 0; locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); if (lock_limit == RLIM_INFINITY) allowed = 1; lock_limit >>= PAGE_SHIFT; spin_lock(&shmlock_user_lock); if (!allowed && locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) goto out; get_uid(user); user->locked_shm += locked; allowed = 1; out: spin_unlock(&shmlock_user_lock); return allowed; } void user_shm_unlock(size_t size, struct user_struct *user) { spin_lock(&shmlock_user_lock); user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; spin_unlock(&shmlock_user_lock); free_uid(user); }
./CrossVul/dataset_final_sorted/CWE-264/c/good_2123_0
crossvul-cpp_data_bad_3524_9
//------------------------------------------------------------------------------ // Copyright (c) 2004-2010 Atheros Communications Inc. // All rights reserved. // // // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // // // // Author(s): ="Atheros" //------------------------------------------------------------------------------ /* * This driver is a pseudo ethernet driver to access the Atheros AR6000 * WLAN Device */ #include "ar6000_drv.h" #include "cfg80211.h" #include "htc.h" #include "wmi_filter_linux.h" #include "epping_test.h" #include "wlan_config.h" #include "ar3kconfig.h" #include "ar6k_pal.h" #include "AR6002/addrs.h" /* LINUX_HACK_FUDGE_FACTOR -- this is used to provide a workaround for linux behavior. When * the meta data was added to the header it was found that linux did not correctly provide * enough headroom. However when more headroom was requested beyond what was truly needed * Linux gave the requested headroom. Therefore to get the necessary headroom from Linux * the driver requests more than is needed by the amount = LINUX_HACK_FUDGE_FACTOR */ #define LINUX_HACK_FUDGE_FACTOR 16 #define BDATA_BDADDR_OFFSET 28 u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; u8 null_mac[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; #ifdef DEBUG #define ATH_DEBUG_DBG_LOG ATH_DEBUG_MAKE_MODULE_MASK(0) #define ATH_DEBUG_WLAN_CONNECT ATH_DEBUG_MAKE_MODULE_MASK(1) #define ATH_DEBUG_WLAN_SCAN ATH_DEBUG_MAKE_MODULE_MASK(2) #define ATH_DEBUG_WLAN_TX ATH_DEBUG_MAKE_MODULE_MASK(3) #define ATH_DEBUG_WLAN_RX ATH_DEBUG_MAKE_MODULE_MASK(4) #define ATH_DEBUG_HTC_RAW ATH_DEBUG_MAKE_MODULE_MASK(5) #define ATH_DEBUG_HCI_BRIDGE ATH_DEBUG_MAKE_MODULE_MASK(6) static struct ath_debug_mask_description driver_debug_desc[] = { { ATH_DEBUG_DBG_LOG , "Target Debug Logs"}, { ATH_DEBUG_WLAN_CONNECT , "WLAN connect"}, { ATH_DEBUG_WLAN_SCAN , "WLAN scan"}, { ATH_DEBUG_WLAN_TX , "WLAN Tx"}, { ATH_DEBUG_WLAN_RX , "WLAN Rx"}, { ATH_DEBUG_HTC_RAW , "HTC Raw IF tracing"}, { ATH_DEBUG_HCI_BRIDGE , "HCI Bridge Setup"}, { ATH_DEBUG_HCI_RECV , "HCI Recv tracing"}, { ATH_DEBUG_HCI_DUMP , "HCI Packet dumps"}, }; ATH_DEBUG_INSTANTIATE_MODULE_VAR(driver, "driver", "Linux Driver Interface", ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_WLAN_SCAN | ATH_DEBUG_HCI_BRIDGE, ATH_DEBUG_DESCRIPTION_COUNT(driver_debug_desc), driver_debug_desc); #endif #define IS_MAC_NULL(mac) (mac[0]==0 && mac[1]==0 && mac[2]==0 && mac[3]==0 && mac[4]==0 && mac[5]==0) #define IS_MAC_BCAST(mac) (*mac==0xff) #define DESCRIPTION "Driver to access the Atheros AR600x Device, version " __stringify(__VER_MAJOR_) "." __stringify(__VER_MINOR_) "." __stringify(__VER_PATCH_) "." __stringify(__BUILD_NUMBER_) MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION(DESCRIPTION); MODULE_LICENSE("Dual BSD/GPL"); #ifndef REORG_APTC_HEURISTICS #undef ADAPTIVE_POWER_THROUGHPUT_CONTROL #endif /* REORG_APTC_HEURISTICS */ #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL #define APTC_TRAFFIC_SAMPLING_INTERVAL 100 /* msec */ #define APTC_UPPER_THROUGHPUT_THRESHOLD 3000 /* Kbps */ #define APTC_LOWER_THROUGHPUT_THRESHOLD 2000 /* Kbps */ typedef struct aptc_traffic_record { bool timerScheduled; struct timeval samplingTS; unsigned long bytesReceived; unsigned long bytesTransmitted; } APTC_TRAFFIC_RECORD; A_TIMER aptcTimer; APTC_TRAFFIC_RECORD aptcTR; #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ #ifdef EXPORT_HCI_BRIDGE_INTERFACE // callbacks registered by HCI transport driver struct hci_transport_callbacks ar6kHciTransCallbacks = { NULL }; #endif unsigned int processDot11Hdr = 0; char ifname[IFNAMSIZ] = {0,}; int wlaninitmode = WLAN_INIT_MODE_DEFAULT; static bool bypasswmi; unsigned int debuglevel = 0; int tspecCompliance = ATHEROS_COMPLIANCE; unsigned int busspeedlow = 0; unsigned int onebitmode = 0; unsigned int skipflash = 0; unsigned int wmitimeout = 2; unsigned int wlanNodeCaching = 1; unsigned int enableuartprint = ENABLEUARTPRINT_DEFAULT; unsigned int logWmiRawMsgs = 0; unsigned int enabletimerwar = 0; unsigned int num_device = 1; unsigned int regscanmode; unsigned int fwmode = 1; unsigned int mbox_yield_limit = 99; unsigned int enablerssicompensation = 0; int reduce_credit_dribble = 1 + HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_ONE_HALF; int allow_trace_signal = 0; #ifdef CONFIG_HOST_TCMD_SUPPORT unsigned int testmode =0; #endif unsigned int irqprocmode = HIF_DEVICE_IRQ_SYNC_ONLY;//HIF_DEVICE_IRQ_ASYNC_SYNC; unsigned int panic_on_assert = 1; unsigned int nohifscattersupport = NOHIFSCATTERSUPPORT_DEFAULT; unsigned int setuphci = SETUPHCI_DEFAULT; unsigned int loghci = 0; unsigned int setupbtdev = SETUPBTDEV_DEFAULT; #ifndef EXPORT_HCI_BRIDGE_INTERFACE unsigned int ar3khcibaud = AR3KHCIBAUD_DEFAULT; unsigned int hciuartscale = HCIUARTSCALE_DEFAULT; unsigned int hciuartstep = HCIUARTSTEP_DEFAULT; #endif unsigned int csumOffload=0; unsigned int csumOffloadTest=0; unsigned int eppingtest=0; unsigned int mac_addr_method; unsigned int firmware_bridge; module_param_string(ifname, ifname, sizeof(ifname), 0644); module_param(wlaninitmode, int, 0644); module_param(bypasswmi, bool, 0644); module_param(debuglevel, uint, 0644); module_param(tspecCompliance, int, 0644); module_param(onebitmode, uint, 0644); module_param(busspeedlow, uint, 0644); module_param(skipflash, uint, 0644); module_param(wmitimeout, uint, 0644); module_param(wlanNodeCaching, uint, 0644); module_param(logWmiRawMsgs, uint, 0644); module_param(enableuartprint, uint, 0644); module_param(enabletimerwar, uint, 0644); module_param(fwmode, uint, 0644); module_param(mbox_yield_limit, uint, 0644); module_param(reduce_credit_dribble, int, 0644); module_param(allow_trace_signal, int, 0644); module_param(enablerssicompensation, uint, 0644); module_param(processDot11Hdr, uint, 0644); module_param(csumOffload, uint, 0644); #ifdef CONFIG_HOST_TCMD_SUPPORT module_param(testmode, uint, 0644); #endif module_param(irqprocmode, uint, 0644); module_param(nohifscattersupport, uint, 0644); module_param(panic_on_assert, uint, 0644); module_param(setuphci, uint, 0644); module_param(loghci, uint, 0644); module_param(setupbtdev, uint, 0644); #ifndef EXPORT_HCI_BRIDGE_INTERFACE module_param(ar3khcibaud, uint, 0644); module_param(hciuartscale, uint, 0644); module_param(hciuartstep, uint, 0644); #endif module_param(eppingtest, uint, 0644); /* in 2.6.10 and later this is now a pointer to a uint */ unsigned int _mboxnum = HTC_MAILBOX_NUM_MAX; #define mboxnum &_mboxnum #ifdef DEBUG u32 g_dbg_flags = DBG_DEFAULTS; unsigned int debugflags = 0; int debugdriver = 0; unsigned int debughtc = 0; unsigned int debugbmi = 0; unsigned int debughif = 0; unsigned int txcreditsavailable[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int txcreditsconsumed[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int txcreditintrenable[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int txcreditintrenableaggregate[HTC_MAILBOX_NUM_MAX] = {0}; module_param(debugflags, uint, 0644); module_param(debugdriver, int, 0644); module_param(debughtc, uint, 0644); module_param(debugbmi, uint, 0644); module_param(debughif, uint, 0644); module_param_array(txcreditsavailable, uint, mboxnum, 0644); module_param_array(txcreditsconsumed, uint, mboxnum, 0644); module_param_array(txcreditintrenable, uint, mboxnum, 0644); module_param_array(txcreditintrenableaggregate, uint, mboxnum, 0644); #endif /* DEBUG */ unsigned int resetok = 1; unsigned int tx_attempt[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int tx_post[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int tx_complete[HTC_MAILBOX_NUM_MAX] = {0}; unsigned int hifBusRequestNumMax = 40; unsigned int war23838_disabled = 0; #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL unsigned int enableAPTCHeuristics = 1; #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ module_param_array(tx_attempt, uint, mboxnum, 0644); module_param_array(tx_post, uint, mboxnum, 0644); module_param_array(tx_complete, uint, mboxnum, 0644); module_param(hifBusRequestNumMax, uint, 0644); module_param(war23838_disabled, uint, 0644); module_param(resetok, uint, 0644); #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL module_param(enableAPTCHeuristics, uint, 0644); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ #ifdef BLOCK_TX_PATH_FLAG int blocktx = 0; module_param(blocktx, int, 0644); #endif /* BLOCK_TX_PATH_FLAG */ typedef struct user_rssi_compensation_t { u16 customerID; union { u16 a_enable; u16 bg_enable; u16 enable; }; s16 bg_param_a; s16 bg_param_b; s16 a_param_a; s16 a_param_b; u32 reserved; } USER_RSSI_CPENSATION; static USER_RSSI_CPENSATION rssi_compensation_param; static s16 rssi_compensation_table[96]; int reconnect_flag = 0; static ar6k_pal_config_t ar6k_pal_config_g; /* Function declarations */ static int ar6000_init_module(void); static void ar6000_cleanup_module(void); int ar6000_init(struct net_device *dev); static int ar6000_open(struct net_device *dev); static int ar6000_close(struct net_device *dev); static void ar6000_init_control_info(struct ar6_softc *ar); static int ar6000_data_tx(struct sk_buff *skb, struct net_device *dev); void ar6000_destroy(struct net_device *dev, unsigned int unregister); static void ar6000_detect_error(unsigned long ptr); static void ar6000_set_multicast_list(struct net_device *dev); static struct net_device_stats *ar6000_get_stats(struct net_device *dev); static void disconnect_timer_handler(unsigned long ptr); void read_rssi_compensation_param(struct ar6_softc *ar); /* * HTC service connection handlers */ static int ar6000_avail_ev(void *context, void *hif_handle); static int ar6000_unavail_ev(void *context, void *hif_handle); int ar6000_configure_target(struct ar6_softc *ar); static void ar6000_target_failure(void *Instance, int Status); static void ar6000_rx(void *Context, struct htc_packet *pPacket); static void ar6000_rx_refill(void *Context,HTC_ENDPOINT_ID Endpoint); static void ar6000_tx_complete(void *Context, struct htc_packet_queue *pPackets); static HTC_SEND_FULL_ACTION ar6000_tx_queue_full(void *Context, struct htc_packet *pPacket); static void ar6000_alloc_netbufs(A_NETBUF_QUEUE_T *q, u16 num); static void ar6000_deliver_frames_to_nw_stack(void * dev, void *osbuf); //static void ar6000_deliver_frames_to_bt_stack(void * dev, void *osbuf); static struct htc_packet *ar6000_alloc_amsdu_rxbuf(void *Context, HTC_ENDPOINT_ID Endpoint, int Length); static void ar6000_refill_amsdu_rxbufs(struct ar6_softc *ar, int Count); static void ar6000_cleanup_amsdu_rxbufs(struct ar6_softc *ar); static ssize_t ar6000_sysfs_bmi_read(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count); static ssize_t ar6000_sysfs_bmi_write(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count); static int ar6000_sysfs_bmi_init(struct ar6_softc *ar); void ar6k_cleanup_hci_pal(struct ar6_softc *ar); static void ar6000_sysfs_bmi_deinit(struct ar6_softc *ar); int ar6000_sysfs_bmi_get_config(struct ar6_softc *ar, u32 mode); /* * Static variables */ struct net_device *ar6000_devices[MAX_AR6000]; static int is_netdev_registered; DECLARE_WAIT_QUEUE_HEAD(arEvent); static void ar6000_cookie_init(struct ar6_softc *ar); static void ar6000_cookie_cleanup(struct ar6_softc *ar); static void ar6000_free_cookie(struct ar6_softc *ar, struct ar_cookie * cookie); static struct ar_cookie *ar6000_alloc_cookie(struct ar6_softc *ar); static int ar6000_reinstall_keys(struct ar6_softc *ar,u8 key_op_ctrl); #ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT struct net_device *arApNetDev; #endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */ static struct ar_cookie s_ar_cookie_mem[MAX_COOKIE_NUM]; #define HOST_INTEREST_ITEM_ADDRESS(ar, item) \ (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_HOST_INTEREST_ITEM_ADDRESS(item) : \ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0)) static struct net_device_ops ar6000_netdev_ops = { .ndo_init = NULL, .ndo_open = ar6000_open, .ndo_stop = ar6000_close, .ndo_get_stats = ar6000_get_stats, .ndo_start_xmit = ar6000_data_tx, .ndo_set_multicast_list = ar6000_set_multicast_list, }; /* Debug log support */ /* * Flag to govern whether the debug logs should be parsed in the kernel * or reported to the application. */ #define REPORT_DEBUG_LOGS_TO_APP int ar6000_set_host_app_area(struct ar6_softc *ar) { u32 address, data; struct host_app_area_s host_app_area; /* Fetch the address of the host_app_area_s instance in the host interest area */ address = TARG_VTOP(ar->arTargetType, HOST_INTEREST_ITEM_ADDRESS(ar, hi_app_host_interest)); if (ar6000_ReadRegDiag(ar->arHifDevice, &address, &data) != 0) { return A_ERROR; } address = TARG_VTOP(ar->arTargetType, data); host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION; if (ar6000_WriteDataDiag(ar->arHifDevice, address, (u8 *)&host_app_area, sizeof(struct host_app_area_s)) != 0) { return A_ERROR; } return 0; } u32 dbglog_get_debug_hdr_ptr(struct ar6_softc *ar) { u32 param; u32 address; int status; address = TARG_VTOP(ar->arTargetType, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dbglog_hdr)); if ((status = ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)&param, 4)) != 0) { param = 0; } return param; } /* * The dbglog module has been initialized. Its ok to access the relevant * data stuctures over the diagnostic window. */ void ar6000_dbglog_init_done(struct ar6_softc *ar) { ar->dbglog_init_done = true; } u32 dbglog_get_debug_fragment(s8 *datap, u32 len, u32 limit) { s32 *buffer; u32 count; u32 numargs; u32 length; u32 fraglen; count = fraglen = 0; buffer = (s32 *)datap; length = (limit >> 2); if (len <= limit) { fraglen = len; } else { while (count < length) { numargs = DBGLOG_GET_NUMARGS(buffer[count]); fraglen = (count << 2); count += numargs + 1; } } return fraglen; } void dbglog_parse_debug_logs(s8 *datap, u32 len) { s32 *buffer; u32 count; u32 timestamp; u32 debugid; u32 moduleid; u32 numargs; u32 length; count = 0; buffer = (s32 *)datap; length = (len >> 2); while (count < length) { debugid = DBGLOG_GET_DBGID(buffer[count]); moduleid = DBGLOG_GET_MODULEID(buffer[count]); numargs = DBGLOG_GET_NUMARGS(buffer[count]); timestamp = DBGLOG_GET_TIMESTAMP(buffer[count]); switch (numargs) { case 0: AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d)\n", moduleid, debugid, timestamp)); break; case 1: AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d): 0x%x\n", moduleid, debugid, timestamp, buffer[count+1])); break; case 2: AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d): 0x%x, 0x%x\n", moduleid, debugid, timestamp, buffer[count+1], buffer[count+2])); break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Invalid args: %d\n", numargs)); } count += numargs + 1; } } int ar6000_dbglog_get_debug_logs(struct ar6_softc *ar) { u32 data[8]; /* Should be able to accommodate struct dbglog_buf_s */ u32 address; u32 length; u32 dropped; u32 firstbuf; u32 debug_hdr_ptr; if (!ar->dbglog_init_done) return A_ERROR; AR6000_SPIN_LOCK(&ar->arLock, 0); if (ar->dbgLogFetchInProgress) { AR6000_SPIN_UNLOCK(&ar->arLock, 0); return A_EBUSY; } /* block out others */ ar->dbgLogFetchInProgress = true; AR6000_SPIN_UNLOCK(&ar->arLock, 0); debug_hdr_ptr = dbglog_get_debug_hdr_ptr(ar); printk("debug_hdr_ptr: 0x%x\n", debug_hdr_ptr); /* Get the contents of the ring buffer */ if (debug_hdr_ptr) { address = TARG_VTOP(ar->arTargetType, debug_hdr_ptr); length = 4 /* sizeof(dbuf) */ + 4 /* sizeof(dropped) */; A_MEMZERO(data, sizeof(data)); ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)data, length); address = TARG_VTOP(ar->arTargetType, data[0] /* dbuf */); firstbuf = address; dropped = data[1]; /* dropped */ length = 4 /* sizeof(next) */ + 4 /* sizeof(buffer) */ + 4 /* sizeof(bufsize) */ + 4 /* sizeof(length) */ + 4 /* sizeof(count) */ + 4 /* sizeof(free) */; A_MEMZERO(data, sizeof(data)); ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)&data, length); do { address = TARG_VTOP(ar->arTargetType, data[1] /* buffer*/); length = data[3]; /* length */ if ((length) && (length <= data[2] /* bufsize*/)) { /* Rewind the index if it is about to overrun the buffer */ if (ar->log_cnt > (DBGLOG_HOST_LOG_BUFFER_SIZE - length)) { ar->log_cnt = 0; } if(0 != ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)&ar->log_buffer[ar->log_cnt], length)) { break; } ar6000_dbglog_event(ar, dropped, (s8 *)&ar->log_buffer[ar->log_cnt], length); ar->log_cnt += length; } else { AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("Length: %d (Total size: %d)\n", data[3], data[2])); } address = TARG_VTOP(ar->arTargetType, data[0] /* next */); length = 4 /* sizeof(next) */ + 4 /* sizeof(buffer) */ + 4 /* sizeof(bufsize) */ + 4 /* sizeof(length) */ + 4 /* sizeof(count) */ + 4 /* sizeof(free) */; A_MEMZERO(data, sizeof(data)); if(0 != ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)&data, length)) { break; } } while (address != firstbuf); } ar->dbgLogFetchInProgress = false; return 0; } void ar6000_dbglog_event(struct ar6_softc *ar, u32 dropped, s8 *buffer, u32 length) { #ifdef REPORT_DEBUG_LOGS_TO_APP #define MAX_WIRELESS_EVENT_SIZE 252 /* * Break it up into chunks of MAX_WIRELESS_EVENT_SIZE bytes of messages. * There seems to be a limitation on the length of message that could be * transmitted to the user app via this mechanism. */ u32 send, sent; sent = 0; send = dbglog_get_debug_fragment(&buffer[sent], length - sent, MAX_WIRELESS_EVENT_SIZE); while (send) { sent += send; send = dbglog_get_debug_fragment(&buffer[sent], length - sent, MAX_WIRELESS_EVENT_SIZE); } #else AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Dropped logs: 0x%x\nDebug info length: %d\n", dropped, length)); /* Interpret the debug logs */ dbglog_parse_debug_logs((s8 *)buffer, length); #endif /* REPORT_DEBUG_LOGS_TO_APP */ } static int __init ar6000_init_module(void) { static int probed = 0; int r; OSDRV_CALLBACKS osdrvCallbacks; a_module_debug_support_init(); #ifdef DEBUG /* check for debug mask overrides */ if (debughtc != 0) { ATH_DEBUG_SET_DEBUG_MASK(htc,debughtc); } if (debugbmi != 0) { ATH_DEBUG_SET_DEBUG_MASK(bmi,debugbmi); } if (debughif != 0) { ATH_DEBUG_SET_DEBUG_MASK(hif,debughif); } if (debugdriver != 0) { ATH_DEBUG_SET_DEBUG_MASK(driver,debugdriver); } #endif A_REGISTER_MODULE_DEBUG_INFO(driver); A_MEMZERO(&osdrvCallbacks,sizeof(osdrvCallbacks)); osdrvCallbacks.deviceInsertedHandler = ar6000_avail_ev; osdrvCallbacks.deviceRemovedHandler = ar6000_unavail_ev; #ifdef CONFIG_PM osdrvCallbacks.deviceSuspendHandler = ar6000_suspend_ev; osdrvCallbacks.deviceResumeHandler = ar6000_resume_ev; osdrvCallbacks.devicePowerChangeHandler = ar6000_power_change_ev; #endif #ifdef DEBUG /* Set the debug flags if specified at load time */ if(debugflags != 0) { g_dbg_flags = debugflags; } #endif if (probed) { return -ENODEV; } probed++; #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL memset(&aptcTR, 0, sizeof(APTC_TRAFFIC_RECORD)); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ r = HIFInit(&osdrvCallbacks); if (r) return r; return 0; } static void __exit ar6000_cleanup_module(void) { int i = 0; struct net_device *ar6000_netdev; #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL /* Delete the Adaptive Power Control timer */ if (timer_pending(&aptcTimer)) { del_timer_sync(&aptcTimer); } #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ for (i=0; i < MAX_AR6000; i++) { if (ar6000_devices[i] != NULL) { ar6000_netdev = ar6000_devices[i]; ar6000_devices[i] = NULL; ar6000_destroy(ar6000_netdev, 1); } } HIFShutDownDevice(NULL); a_module_debug_support_cleanup(); AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_cleanup: success\n")); } #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL void aptcTimerHandler(unsigned long arg) { u32 numbytes; u32 throughput; struct ar6_softc *ar; int status; ar = (struct ar6_softc *)arg; A_ASSERT(ar != NULL); A_ASSERT(!timer_pending(&aptcTimer)); AR6000_SPIN_LOCK(&ar->arLock, 0); /* Get the number of bytes transferred */ numbytes = aptcTR.bytesTransmitted + aptcTR.bytesReceived; aptcTR.bytesTransmitted = aptcTR.bytesReceived = 0; /* Calculate and decide based on throughput thresholds */ throughput = ((numbytes * 8)/APTC_TRAFFIC_SAMPLING_INTERVAL); /* Kbps */ if (throughput < APTC_LOWER_THROUGHPUT_THRESHOLD) { /* Enable Sleep and delete the timer */ A_ASSERT(ar->arWmiReady == true); AR6000_SPIN_UNLOCK(&ar->arLock, 0); status = wmi_powermode_cmd(ar->arWmi, REC_POWER); AR6000_SPIN_LOCK(&ar->arLock, 0); A_ASSERT(status == 0); aptcTR.timerScheduled = false; } else { A_TIMEOUT_MS(&aptcTimer, APTC_TRAFFIC_SAMPLING_INTERVAL, 0); } AR6000_SPIN_UNLOCK(&ar->arLock, 0); } #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ static void ar6000_alloc_netbufs(A_NETBUF_QUEUE_T *q, u16 num) { void * osbuf; while(num) { if((osbuf = A_NETBUF_ALLOC(AR6000_BUFFER_SIZE))) { A_NETBUF_ENQUEUE(q, osbuf); } else { break; } num--; } if(num) { A_PRINTF("%s(), allocation of netbuf failed", __func__); } } static struct bin_attribute bmi_attr = { .attr = {.name = "bmi", .mode = 0600}, .read = ar6000_sysfs_bmi_read, .write = ar6000_sysfs_bmi_write, }; static ssize_t ar6000_sysfs_bmi_read(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { int index; struct ar6_softc *ar; struct hif_device_os_device_info *osDevInfo; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Read %d bytes\n", (u32)count)); for (index=0; index < MAX_AR6000; index++) { ar = (struct ar6_softc *)ar6k_priv(ar6000_devices[index]); osDevInfo = &ar->osDevInfo; if (kobj == (&(((struct device *)osDevInfo->pOSDevice)->kobj))) { break; } } if (index == MAX_AR6000) return 0; if ((BMIRawRead(ar->arHifDevice, (u8*)buf, count, true)) != 0) { return 0; } return count; } static ssize_t ar6000_sysfs_bmi_write(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { int index; struct ar6_softc *ar; struct hif_device_os_device_info *osDevInfo; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Write %d bytes\n", (u32)count)); for (index=0; index < MAX_AR6000; index++) { ar = (struct ar6_softc *)ar6k_priv(ar6000_devices[index]); osDevInfo = &ar->osDevInfo; if (kobj == (&(((struct device *)osDevInfo->pOSDevice)->kobj))) { break; } } if (index == MAX_AR6000) return 0; if ((BMIRawWrite(ar->arHifDevice, (u8*)buf, count)) != 0) { return 0; } return count; } static int ar6000_sysfs_bmi_init(struct ar6_softc *ar) { int status; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Creating sysfs entry\n")); A_MEMZERO(&ar->osDevInfo, sizeof(struct hif_device_os_device_info)); /* Get the underlying OS device */ status = HIFConfigureDevice(ar->arHifDevice, HIF_DEVICE_GET_OS_DEVICE, &ar->osDevInfo, sizeof(struct hif_device_os_device_info)); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI: Failed to get OS device info from HIF\n")); return A_ERROR; } /* Create a bmi entry in the sysfs filesystem */ if ((sysfs_create_bin_file(&(((struct device *)ar->osDevInfo.pOSDevice)->kobj), &bmi_attr)) < 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMI: Failed to create entry for bmi in sysfs filesystem\n")); return A_ERROR; } return 0; } static void ar6000_sysfs_bmi_deinit(struct ar6_softc *ar) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Deleting sysfs entry\n")); sysfs_remove_bin_file(&(((struct device *)ar->osDevInfo.pOSDevice)->kobj), &bmi_attr); } #define bmifn(fn) do { \ if ((fn) < 0) { \ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); \ return A_ERROR; \ } \ } while(0) #ifdef SOFTMAC_FILE_USED #define AR6002_MAC_ADDRESS_OFFSET 0x0A #define AR6003_MAC_ADDRESS_OFFSET 0x16 static void calculate_crc(u32 TargetType, u8 *eeprom_data) { u16 *ptr_crc; u16 *ptr16_eeprom; u16 checksum; u32 i; u32 eeprom_size; if (TargetType == TARGET_TYPE_AR6001) { eeprom_size = 512; ptr_crc = (u16 *)eeprom_data; } else if (TargetType == TARGET_TYPE_AR6003) { eeprom_size = 1024; ptr_crc = (u16 *)((u8 *)eeprom_data + 0x04); } else { eeprom_size = 768; ptr_crc = (u16 *)((u8 *)eeprom_data + 0x04); } // Clear the crc *ptr_crc = 0; // Recalculate new CRC checksum = 0; ptr16_eeprom = (u16 *)eeprom_data; for (i = 0;i < eeprom_size; i += 2) { checksum = checksum ^ (*ptr16_eeprom); ptr16_eeprom++; } checksum = 0xFFFF ^ checksum; *ptr_crc = checksum; } static void ar6000_softmac_update(struct ar6_softc *ar, u8 *eeprom_data, size_t size) { const char *source = "random generated"; const struct firmware *softmac_entry; u8 *ptr_mac; switch (ar->arTargetType) { case TARGET_TYPE_AR6002: ptr_mac = (u8 *)((u8 *)eeprom_data + AR6002_MAC_ADDRESS_OFFSET); break; case TARGET_TYPE_AR6003: ptr_mac = (u8 *)((u8 *)eeprom_data + AR6003_MAC_ADDRESS_OFFSET); break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Target Type\n")); return; } printk(KERN_DEBUG "MAC from EEPROM %pM\n", ptr_mac); /* create a random MAC in case we cannot read file from system */ ptr_mac[0] = 0; ptr_mac[1] = 0x03; ptr_mac[2] = 0x7F; ptr_mac[3] = random32() & 0xff; ptr_mac[4] = random32() & 0xff; ptr_mac[5] = random32() & 0xff; if ((A_REQUEST_FIRMWARE(&softmac_entry, "softmac", ((struct device *)ar->osDevInfo.pOSDevice))) == 0) { char *macbuf = A_MALLOC_NOWAIT(softmac_entry->size+1); if (macbuf) { unsigned int softmac[6]; memcpy(macbuf, softmac_entry->data, softmac_entry->size); macbuf[softmac_entry->size] = '\0'; if (sscanf(macbuf, "%02x:%02x:%02x:%02x:%02x:%02x", &softmac[0], &softmac[1], &softmac[2], &softmac[3], &softmac[4], &softmac[5])==6) { int i; for (i=0; i<6; ++i) { ptr_mac[i] = softmac[i] & 0xff; } source = "softmac file"; } kfree(macbuf); } A_RELEASE_FIRMWARE(softmac_entry); } printk(KERN_DEBUG "MAC from %s %pM\n", source, ptr_mac); calculate_crc(ar->arTargetType, eeprom_data); } #endif /* SOFTMAC_FILE_USED */ static int ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address, bool compressed) { int status; const char *filename; const struct firmware *fw_entry; u32 fw_entry_size; switch (file) { case AR6K_OTP_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_OTP_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_OTP_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_OTP_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } break; case AR6K_FIRMWARE_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } if (eppingtest) { bypasswmi = true; if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_EPPING_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_EPPING_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_EPPING_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("eppingtest : unsupported firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } compressed = false; } #ifdef CONFIG_HOST_TCMD_SUPPORT if(testmode) { if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_TCMD_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_TCMD_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_TCMD_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } compressed = false; } #endif #ifdef HTC_RAW_INTERFACE if (!eppingtest && bypasswmi) { if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_ART_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_ART_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } compressed = false; } #endif break; case AR6K_PATCH_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_PATCH_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_PATCH_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_PATCH_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } break; case AR6K_BOARD_DATA_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_BOARD_DATA_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_BOARD_DATA_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_BOARD_DATA_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown file type: %d\n", file)); return A_ERROR; } if ((A_REQUEST_FIRMWARE(&fw_entry, filename, ((struct device *)ar->osDevInfo.pOSDevice))) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to get %s\n", filename)); return A_ENOENT; } #ifdef SOFTMAC_FILE_USED if (file==AR6K_BOARD_DATA_FILE && fw_entry->data) { ar6000_softmac_update(ar, (u8 *)fw_entry->data, fw_entry->size); } #endif fw_entry_size = fw_entry->size; /* Load extended board data for AR6003 */ if ((file==AR6K_BOARD_DATA_FILE) && (fw_entry->data)) { u32 board_ext_address; u32 board_ext_data_size; u32 board_data_size; board_ext_data_size = (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_BOARD_EXT_DATA_SZ : \ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_BOARD_EXT_DATA_SZ : 0)); board_data_size = (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_BOARD_DATA_SZ : \ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_BOARD_DATA_SZ : 0)); /* Determine where in Target RAM to write Board Data */ bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data), (u8 *)&board_ext_address, 4)); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Board extended Data download address: 0x%x\n", board_ext_address)); /* check whether the target has allocated memory for extended board data and file contains extended board data */ if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) { u32 param; status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (u8 *)(fw_entry->data + board_data_size), board_ext_data_size); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); A_RELEASE_FIRMWARE(fw_entry); return A_ERROR; } /* Record the fact that extended board Data IS initialized */ param = (board_ext_data_size << 16) | 1; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data_config), (unsigned char *)&param, 4)); } fw_entry_size = board_data_size; } if (compressed) { status = BMIFastDownload(ar->arHifDevice, address, (u8 *)fw_entry->data, fw_entry_size); } else { status = BMIWriteMemory(ar->arHifDevice, address, (u8 *)fw_entry->data, fw_entry_size); } if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); A_RELEASE_FIRMWARE(fw_entry); return A_ERROR; } A_RELEASE_FIRMWARE(fw_entry); return 0; } int ar6000_update_bdaddr(struct ar6_softc *ar) { if (setupbtdev != 0) { u32 address; if (BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data), (u8 *)&address, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for hi_board_data failed\n")); return A_ERROR; } if (BMIReadMemory(ar->arHifDevice, address + BDATA_BDADDR_OFFSET, (u8 *)ar->bdaddr, 6) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for BD address failed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BDADDR 0x%x:0x%x:0x%x:0x%x:0x%x:0x%x\n", ar->bdaddr[0], ar->bdaddr[1], ar->bdaddr[2], ar->bdaddr[3], ar->bdaddr[4], ar->bdaddr[5])); } return 0; } int ar6000_sysfs_bmi_get_config(struct ar6_softc *ar, u32 mode) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Requesting device specific configuration\n")); if (mode == WLAN_INIT_MODE_UDEV) { char version[16]; const struct firmware *fw_entry; /* Get config using udev through a script in user space */ sprintf(version, "%2.2x", ar->arVersion.target_ver); if ((A_REQUEST_FIRMWARE(&fw_entry, version, ((struct device *)ar->osDevInfo.pOSDevice))) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI: Failure to get configuration for target version: %s\n", version)); return A_ERROR; } A_RELEASE_FIRMWARE(fw_entry); } else { /* The config is contained within the driver itself */ int status; u32 param, options, sleep, address; /* Temporarily disable system sleep */ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS; bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param)); options = param; param |= AR6K_OPTION_SLEEP_DISABLE; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS; bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param)); sleep = param; param |= WLAN_SYSTEM_SLEEP_DISABLE_SET(1); bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("old options: %d, old sleep: %d\n", options, sleep)); if (ar->arTargetType == TARGET_TYPE_AR6003) { /* Program analog PLL register */ bmifn(BMIWriteSOCRegister(ar->arHifDevice, ANALOG_INTF_BASE_ADDRESS + 0x284, 0xF9104001)); /* Run at 80/88MHz by default */ param = CPU_CLOCK_STANDARD_SET(1); } else { /* Run at 40/44MHz by default */ param = CPU_CLOCK_STANDARD_SET(0); } address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); param = 0; if (ar->arTargetType == TARGET_TYPE_AR6002) { bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_ext_clk_detected), (u8 *)&param, 4)); } /* LPO_CAL.ENABLE = 1 if no external clk is detected */ if (param != 1) { address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS; param = LPO_CAL_ENABLE_SET(1); bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); } /* Venus2.0: Lower SDIO pad drive strength, * temporary WAR to avoid SDIO CRC error */ if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("AR6K: Temporary WAR to avoid SDIO CRC error\n")); param = 0x20; address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); } #ifdef FORCE_INTERNAL_CLOCK /* Ignore external clock, if any, and force use of internal clock */ if (ar->arTargetType == TARGET_TYPE_AR6003) { /* hi_ext_clk_detected = 0 */ param = 0; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_ext_clk_detected), (u8 *)&param, 4)); /* CLOCK_CONTROL &= ~LF_CLK32 */ address = RTC_BASE_ADDRESS + CLOCK_CONTROL_ADDRESS; bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param)); param &= (~CLOCK_CONTROL_LF_CLK32_SET(1)); bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); } #endif /* FORCE_INTERNAL_CLOCK */ /* Transfer Board Data from Target EEPROM to Target RAM */ if (ar->arTargetType == TARGET_TYPE_AR6003) { /* Determine where in Target RAM to write Board Data */ bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data), (u8 *)&address, 4)); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Board Data download address: 0x%x\n", address)); /* Write EEPROM data to Target RAM */ if ((ar6000_transfer_bin_file(ar, AR6K_BOARD_DATA_FILE, address, false)) != 0) { return A_ERROR; } /* Record the fact that Board Data IS initialized */ param = 1; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data_initialized), (u8 *)&param, 4)); /* Transfer One time Programmable data */ AR6K_APP_LOAD_ADDRESS(address, ar->arVersion.target_ver); if (ar->arVersion.target_ver == AR6003_REV3_VERSION) address = 0x1234; status = ar6000_transfer_bin_file(ar, AR6K_OTP_FILE, address, true); if (status == 0) { /* Execute the OTP code */ param = 0; AR6K_APP_START_OVERRIDE_ADDRESS(address, ar->arVersion.target_ver); bmifn(BMIExecute(ar->arHifDevice, address, &param)); } else if (status != A_ENOENT) { return A_ERROR; } } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Programming of board data for chip %d not supported\n", ar->arTargetType)); return A_ERROR; } /* Download Target firmware */ AR6K_APP_LOAD_ADDRESS(address, ar->arVersion.target_ver); if (ar->arVersion.target_ver == AR6003_REV3_VERSION) address = 0x1234; if ((ar6000_transfer_bin_file(ar, AR6K_FIRMWARE_FILE, address, true)) != 0) { return A_ERROR; } /* Set starting address for firmware */ AR6K_APP_START_OVERRIDE_ADDRESS(address, ar->arVersion.target_ver); bmifn(BMISetAppStart(ar->arHifDevice, address)); if(ar->arTargetType == TARGET_TYPE_AR6003) { AR6K_DATASET_PATCH_ADDRESS(address, ar->arVersion.target_ver); if ((ar6000_transfer_bin_file(ar, AR6K_PATCH_FILE, address, false)) != 0) return A_ERROR; param = address; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dset_list_head), (unsigned char *)&param, 4)); } /* Restore system sleep */ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, sleep)); address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS; param = options | 0x20; bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); if (ar->arTargetType == TARGET_TYPE_AR6003) { /* Configure GPIO AR6003 UART */ #ifndef CONFIG_AR600x_DEBUG_UART_TX_PIN #define CONFIG_AR600x_DEBUG_UART_TX_PIN 8 #endif param = CONFIG_AR600x_DEBUG_UART_TX_PIN; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dbg_uart_txpin), (u8 *)&param, 4)); #if (CONFIG_AR600x_DEBUG_UART_TX_PIN == 23) { address = GPIO_BASE_ADDRESS + CLOCK_GPIO_ADDRESS; bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param)); param |= CLOCK_GPIO_BT_CLK_OUT_EN_SET(1); bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param)); } #endif /* Configure GPIO for BT Reset */ #ifdef ATH6KL_CONFIG_GPIO_BT_RESET #define CONFIG_AR600x_BT_RESET_PIN 0x16 param = CONFIG_AR600x_BT_RESET_PIN; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_hci_uart_support_pins), (u8 *)&param, 4)); #endif /* ATH6KL_CONFIG_GPIO_BT_RESET */ /* Configure UART flow control polarity */ #ifndef CONFIG_ATH6KL_BT_UART_FC_POLARITY #define CONFIG_ATH6KL_BT_UART_FC_POLARITY 0 #endif #if (CONFIG_ATH6KL_BT_UART_FC_POLARITY == 1) if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { param = ((CONFIG_ATH6KL_BT_UART_FC_POLARITY << 1) & 0x2); bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_hci_uart_pwr_mgmt_params), (u8 *)&param, 4)); } #endif /* CONFIG_ATH6KL_BT_UART_FC_POLARITY */ } #ifdef HTC_RAW_INTERFACE if (!eppingtest && bypasswmi) { /* Don't run BMIDone for ART mode and force resetok=0 */ resetok = 0; msleep(1000); } #endif /* HTC_RAW_INTERFACE */ } return 0; } int ar6000_configure_target(struct ar6_softc *ar) { u32 param; if (enableuartprint) { param = 1; if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_serial_enable), (u8 *)&param, 4)!= 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for enableuartprint failed \n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Serial console prints enabled\n")); } /* Tell target which HTC version it is used*/ param = HTC_PROTOCOL_VERSION; if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_app_host_interest), (u8 *)&param, 4)!= 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for htc version failed \n")); return A_ERROR; } #ifdef CONFIG_HOST_TCMD_SUPPORT if(testmode) { ar->arTargetMode = AR6000_TCMD_MODE; }else { ar->arTargetMode = AR6000_WLAN_MODE; } #endif if (enabletimerwar) { u32 param; if (BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4)!= 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for enabletimerwar failed \n")); return A_ERROR; } param |= HI_OPTION_TIMER_WAR; if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for enabletimerwar failed \n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Timer WAR enabled\n")); } /* set the firmware mode to STA/IBSS/AP */ { u32 param; if (BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4)!= 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for setting fwmode failed \n")); return A_ERROR; } param |= (num_device << HI_OPTION_NUM_DEV_SHIFT); param |= (fwmode << HI_OPTION_FW_MODE_SHIFT); param |= (mac_addr_method << HI_OPTION_MAC_ADDR_METHOD_SHIFT); param |= (firmware_bridge << HI_OPTION_FW_BRIDGE_SHIFT); if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for setting fwmode failed \n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Firmware mode set\n")); } #ifdef ATH6KL_DISABLE_TARGET_DBGLOGS { u32 param; if (BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4)!= 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for disabling debug logs failed\n")); return A_ERROR; } param |= HI_OPTION_DISABLE_DBGLOG; if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for HI_OPTION_DISABLE_DBGLOG\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Firmware mode set\n")); } #endif /* ATH6KL_DISABLE_TARGET_DBGLOGS */ /* * Hardcode the address use for the extended board data * Ideally this should be pre-allocate by the OS at boot time * But since it is a new feature and board data is loaded * at init time, we have to workaround this from host. * It is difficult to patch the firmware boot code, * but possible in theory. */ if (ar->arTargetType == TARGET_TYPE_AR6003) { u32 ramReservedSz; if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { param = AR6003_REV2_BOARD_EXT_DATA_ADDRESS; ramReservedSz = AR6003_REV2_RAM_RESERVE_SIZE; } else { param = AR6003_REV3_BOARD_EXT_DATA_ADDRESS; ramReservedSz = AR6003_REV3_RAM_RESERVE_SIZE; } if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIWriteMemory for " "hi_board_ext_data failed\n")); return A_ERROR; } if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_end_RAM_reserve_sz), (u8 *)&ramReservedSz, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR , ("BMIWriteMemory for " "hi_end_RAM_reserve_sz failed\n")); return A_ERROR; } } /* since BMIInit is called in the driver layer, we have to set the block * size here for the target */ if (ar6000_set_htc_params(ar->arHifDevice, ar->arTargetType, mbox_yield_limit, 0)) { /* use default number of control buffers */ return A_ERROR; } if (setupbtdev != 0) { if (ar6000_set_hci_bridge_flags(ar->arHifDevice, ar->arTargetType, setupbtdev)) { return A_ERROR; } } return 0; } static void init_netdev(struct net_device *dev, char *name) { dev->netdev_ops = &ar6000_netdev_ops; dev->watchdog_timeo = AR6000_TX_TIMEOUT; /* * We need the OS to provide us with more headroom in order to * perform dix to 802.3, WMI header encap, and the HTC header */ if (processDot11Hdr) { dev->hard_header_len = sizeof(struct ieee80211_qosframe) + sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR) + HTC_HEADER_LEN + WMI_MAX_TX_META_SZ + LINUX_HACK_FUDGE_FACTOR; } else { dev->hard_header_len = ETH_HLEN + sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR) + HTC_HEADER_LEN + WMI_MAX_TX_META_SZ + LINUX_HACK_FUDGE_FACTOR; } if (name[0]) { strcpy(dev->name, name); } #ifdef CONFIG_CHECKSUM_OFFLOAD if(csumOffload){ dev->features |= NETIF_F_IP_CSUM; /*advertise kernel capability to do TCP/UDP CSUM offload for IPV4*/ } #endif return; } static int __ath6kl_init_netdev(struct net_device *dev) { int r; rtnl_lock(); r = ar6000_init(dev); rtnl_unlock(); if (r) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: ar6000_init\n")); return r; } return 0; } #ifdef HTC_RAW_INTERFACE static int ath6kl_init_netdev_wmi(struct net_device *dev) { if (!eppingtest && bypasswmi) return 0; return __ath6kl_init_netdev(dev); } #else static int ath6kl_init_netdev_wmi(struct net_device *dev) { return __ath6kl_init_netdev(dev); } #endif static int ath6kl_init_netdev(struct ar6_softc *ar) { int r; r = ar6000_sysfs_bmi_get_config(ar, wlaninitmode); if (r) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ar6000_avail: " "ar6000_sysfs_bmi_get_config failed\n")); return r; } return ath6kl_init_netdev_wmi(ar->arNetDev); } /* * HTC Event handlers */ static int ar6000_avail_ev(void *context, void *hif_handle) { int i; struct net_device *dev; void *ar_netif; struct ar6_softc *ar; int device_index = 0; struct htc_init_info htcInfo; struct wireless_dev *wdev; int r = 0; struct hif_device_os_device_info osDevInfo; memset(&osDevInfo, 0, sizeof(osDevInfo)); if (HIFConfigureDevice(hif_handle, HIF_DEVICE_GET_OS_DEVICE, &osDevInfo, sizeof(osDevInfo))) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: Failed to get OS device instance\n", __func__)); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_available\n")); for (i=0; i < MAX_AR6000; i++) { if (ar6000_devices[i] == NULL) { break; } } if (i == MAX_AR6000) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_available: max devices reached\n")); return A_ERROR; } /* Save this. It gives a bit better readability especially since */ /* we use another local "i" variable below. */ device_index = i; wdev = ar6k_cfg80211_init(osDevInfo.pOSDevice); if (IS_ERR(wdev)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ar6k_cfg80211_init failed\n", __func__)); return A_ERROR; } ar_netif = wdev_priv(wdev); if (ar_netif == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Can't allocate ar6k priv memory\n", __func__)); return A_ERROR; } A_MEMZERO(ar_netif, sizeof(struct ar6_softc)); ar = (struct ar6_softc *)ar_netif; ar->wdev = wdev; wdev->iftype = NL80211_IFTYPE_STATION; dev = alloc_netdev_mq(0, "wlan%d", ether_setup, 1); if (!dev) { printk(KERN_CRIT "AR6K: no memory for network device instance\n"); ar6k_cfg80211_deinit(ar); return A_ERROR; } dev->ieee80211_ptr = wdev; SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy)); wdev->netdev = dev; ar->arNetworkType = INFRA_NETWORK; ar->smeState = SME_DISCONNECTED; ar->arAutoAuthStage = AUTH_IDLE; init_netdev(dev, ifname); ar->arNetDev = dev; ar->arHifDevice = hif_handle; ar->arWlanState = WLAN_ENABLED; ar->arDeviceIndex = device_index; ar->arWlanPowerState = WLAN_POWER_STATE_ON; ar->arWlanOff = false; /* We are in ON state */ #ifdef CONFIG_PM ar->arWowState = WLAN_WOW_STATE_NONE; ar->arBTOff = true; /* BT chip assumed to be OFF */ ar->arBTSharing = WLAN_CONFIG_BT_SHARING; ar->arWlanOffConfig = WLAN_CONFIG_WLAN_OFF; ar->arSuspendConfig = WLAN_CONFIG_PM_SUSPEND; ar->arWow2Config = WLAN_CONFIG_PM_WOW2; #endif /* CONFIG_PM */ A_INIT_TIMER(&ar->arHBChallengeResp.timer, ar6000_detect_error, dev); ar->arHBChallengeResp.seqNum = 0; ar->arHBChallengeResp.outstanding = false; ar->arHBChallengeResp.missCnt = 0; ar->arHBChallengeResp.frequency = AR6000_HB_CHALLENGE_RESP_FREQ_DEFAULT; ar->arHBChallengeResp.missThres = AR6000_HB_CHALLENGE_RESP_MISS_THRES_DEFAULT; ar6000_init_control_info(ar); init_waitqueue_head(&arEvent); sema_init(&ar->arSem, 1); ar->bIsDestroyProgress = false; INIT_HTC_PACKET_QUEUE(&ar->amsdu_rx_buffer_queue); #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL A_INIT_TIMER(&aptcTimer, aptcTimerHandler, ar); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ A_INIT_TIMER(&ar->disconnect_timer, disconnect_timer_handler, dev); BMIInit(); ar6000_sysfs_bmi_init(ar); { struct bmi_target_info targ_info; r = BMIGetTargetInfo(ar->arHifDevice, &targ_info); if (r) goto avail_ev_failed; ar->arVersion.target_ver = targ_info.target_ver; ar->arTargetType = targ_info.target_type; wdev->wiphy->hw_version = targ_info.target_ver; } r = ar6000_configure_target(ar); if (r) goto avail_ev_failed; A_MEMZERO(&htcInfo,sizeof(htcInfo)); htcInfo.pContext = ar; htcInfo.TargetFailure = ar6000_target_failure; ar->arHtcTarget = HTCCreate(ar->arHifDevice,&htcInfo); if (!ar->arHtcTarget) { r = -ENOMEM; goto avail_ev_failed; } spin_lock_init(&ar->arLock); #ifdef WAPI_ENABLE ar->arWapiEnable = 0; #endif if(csumOffload){ /*if external frame work is also needed, change and use an extended rxMetaVerion*/ ar->rxMetaVersion=WMI_META_VERSION_2; } ar->aggr_cntxt = aggr_init(ar6000_alloc_netbufs); if (!ar->aggr_cntxt) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Failed to initialize aggr.\n", __func__)); r = -ENOMEM; goto avail_ev_failed; } aggr_register_rx_dispatcher(ar->aggr_cntxt, (void *)dev, ar6000_deliver_frames_to_nw_stack); HIFClaimDevice(ar->arHifDevice, ar); /* We only register the device in the global list if we succeed. */ /* If the device is in the global list, it will be destroyed */ /* when the module is unloaded. */ ar6000_devices[device_index] = dev; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("BMI enabled: %d\n", wlaninitmode)); if ((wlaninitmode == WLAN_INIT_MODE_UDEV) || (wlaninitmode == WLAN_INIT_MODE_DRV)) { r = ath6kl_init_netdev(ar); if (r) goto avail_ev_failed; } /* This runs the init function if registered */ r = register_netdev(dev); if (r) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: register_netdev failed\n")); ar6000_destroy(dev, 0); return r; } is_netdev_registered = 1; #ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT arApNetDev = NULL; #endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_avail: name=%s hifdevice=0x%lx, dev=0x%lx (%d), ar=0x%lx\n", dev->name, (unsigned long)ar->arHifDevice, (unsigned long)dev, device_index, (unsigned long)ar)); avail_ev_failed : if (r) ar6000_sysfs_bmi_deinit(ar); return r; } static void ar6000_target_failure(void *Instance, int Status) { struct ar6_softc *ar = (struct ar6_softc *)Instance; WMI_TARGET_ERROR_REPORT_EVENT errEvent; static bool sip = false; if (Status != 0) { printk(KERN_ERR "ar6000_target_failure: target asserted \n"); if (timer_pending(&ar->arHBChallengeResp.timer)) { A_UNTIMEOUT(&ar->arHBChallengeResp.timer); } /* try dumping target assertion information (if any) */ ar6000_dump_target_assert_info(ar->arHifDevice,ar->arTargetType); /* * Fetch the logs from the target via the diagnostic * window. */ ar6000_dbglog_get_debug_logs(ar); /* Report the error only once */ if (!sip) { sip = true; errEvent.errorVal = WMI_TARGET_COM_ERR | WMI_TARGET_FATAL_ERR; } } } static int ar6000_unavail_ev(void *context, void *hif_handle) { struct ar6_softc *ar = (struct ar6_softc *)context; /* NULL out it's entry in the global list */ ar6000_devices[ar->arDeviceIndex] = NULL; ar6000_destroy(ar->arNetDev, 1); return 0; } void ar6000_restart_endpoint(struct net_device *dev) { int status = 0; struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); BMIInit(); do { if ( (status=ar6000_configure_target(ar))!= 0) break; if ( (status=ar6000_sysfs_bmi_get_config(ar, wlaninitmode)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: ar6000_sysfs_bmi_get_config failed\n")); break; } rtnl_lock(); status = (ar6000_init(dev)==0) ? 0 : A_ERROR; rtnl_unlock(); if (status) { break; } if (ar->arSsidLen && ar->arWlanState == WLAN_ENABLED) { ar6000_connect_to_ap(ar); } } while (0); if (status== 0) { return; } ar6000_devices[ar->arDeviceIndex] = NULL; ar6000_destroy(ar->arNetDev, 1); } void ar6000_stop_endpoint(struct net_device *dev, bool keepprofile, bool getdbglogs) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); /* Stop the transmit queues */ netif_stop_queue(dev); /* Disable the target and the interrupts associated with it */ if (ar->arWmiReady == true) { if (!bypasswmi) { bool disconnectIssued; disconnectIssued = (ar->arConnected) || (ar->arConnectPending); ar6000_disconnect(ar); if (!keepprofile) { ar6000_init_profile_info(ar); } A_UNTIMEOUT(&ar->disconnect_timer); if (getdbglogs) { ar6000_dbglog_get_debug_logs(ar); } ar->arWmiReady = false; wmi_shutdown(ar->arWmi); ar->arWmiEnabled = false; ar->arWmi = NULL; /* * After wmi_shudown all WMI events will be dropped. * We need to cleanup the buffers allocated in AP mode * and give disconnect notification to stack, which usually * happens in the disconnect_event. * Simulate the disconnect_event by calling the function directly. * Sometimes disconnect_event will be received when the debug logs * are collected. */ if (disconnectIssued) { if(ar->arNetworkType & AP_NETWORK) { ar6000_disconnect_event(ar, DISCONNECT_CMD, bcast_mac, 0, NULL, 0); } else { ar6000_disconnect_event(ar, DISCONNECT_CMD, ar->arBssid, 0, NULL, 0); } } ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT; ar->user_key_ctrl = 0; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): WMI stopped\n", __func__)); } else { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): WMI not ready 0x%lx 0x%lx\n", __func__, (unsigned long) ar, (unsigned long) ar->arWmi)); /* Shut down WMI if we have started it */ if(ar->arWmiEnabled == true) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): Shut down WMI\n", __func__)); wmi_shutdown(ar->arWmi); ar->arWmiEnabled = false; ar->arWmi = NULL; } } if (ar->arHtcTarget != NULL) { #ifdef EXPORT_HCI_BRIDGE_INTERFACE if (NULL != ar6kHciTransCallbacks.cleanupTransport) { ar6kHciTransCallbacks.cleanupTransport(NULL); } #else // FIXME: workaround to reset BT's UART baud rate to default if (NULL != ar->exitCallback) { struct ar3k_config_info ar3kconfig; int status; A_MEMZERO(&ar3kconfig,sizeof(ar3kconfig)); ar6000_set_default_ar3kconfig(ar, (void *)&ar3kconfig); status = ar->exitCallback(&ar3kconfig); if (0 != status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to reset AR3K baud rate! \n")); } } // END workaround if (setuphci) ar6000_cleanup_hci(ar); #endif AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Shutting down HTC .... \n")); /* stop HTC */ HTCStop(ar->arHtcTarget); } if (resetok) { /* try to reset the device if we can * The driver may have been configure NOT to reset the target during * a debug session */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Attempting to reset target on instance destroy.... \n")); if (ar->arHifDevice != NULL) { bool coldReset = (ar->arTargetType == TARGET_TYPE_AR6003) ? true: false; ar6000_reset_device(ar->arHifDevice, ar->arTargetType, true, coldReset); } } else { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Host does not want target reset. \n")); } /* Done with cookies */ ar6000_cookie_cleanup(ar); /* cleanup any allocated AMSDU buffers */ ar6000_cleanup_amsdu_rxbufs(ar); } /* * We need to differentiate between the surprise and planned removal of the * device because of the following consideration: * - In case of surprise removal, the hcd already frees up the pending * for the device and hence there is no need to unregister the function * driver inorder to get these requests. For planned removal, the function * driver has to explicitly unregister itself to have the hcd return all the * pending requests before the data structures for the devices are freed up. * Note that as per the current implementation, the function driver will * end up releasing all the devices since there is no API to selectively * release a particular device. * - Certain commands issued to the target can be skipped for surprise * removal since they will anyway not go through. */ void ar6000_destroy(struct net_device *dev, unsigned int unregister) { struct ar6_softc *ar; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("+ar6000_destroy \n")); if((dev == NULL) || ((ar = ar6k_priv(dev)) == NULL)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s(): Failed to get device structure.\n", __func__)); return; } ar->bIsDestroyProgress = true; if (down_interruptible(&ar->arSem)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s(): down_interruptible failed \n", __func__)); return; } if (ar->arWlanPowerState != WLAN_POWER_STATE_CUT_PWR) { /* only stop endpoint if we are not stop it in suspend_ev */ ar6000_stop_endpoint(dev, false, true); } ar->arWlanState = WLAN_DISABLED; if (ar->arHtcTarget != NULL) { /* destroy HTC */ HTCDestroy(ar->arHtcTarget); } if (ar->arHifDevice != NULL) { /*release the device so we do not get called back on remove incase we * we're explicity destroyed by module unload */ HIFReleaseDevice(ar->arHifDevice); HIFShutDownDevice(ar->arHifDevice); } aggr_module_destroy(ar->aggr_cntxt); /* Done with cookies */ ar6000_cookie_cleanup(ar); /* cleanup any allocated AMSDU buffers */ ar6000_cleanup_amsdu_rxbufs(ar); ar6000_sysfs_bmi_deinit(ar); /* Cleanup BMI */ BMICleanup(); /* Clear the tx counters */ memset(tx_attempt, 0, sizeof(tx_attempt)); memset(tx_post, 0, sizeof(tx_post)); memset(tx_complete, 0, sizeof(tx_complete)); #ifdef HTC_RAW_INTERFACE if (ar->arRawHtc) { kfree(ar->arRawHtc); ar->arRawHtc = NULL; } #endif /* Free up the device data structure */ if (unregister && is_netdev_registered) { unregister_netdev(dev); is_netdev_registered = 0; } free_netdev(dev); ar6k_cfg80211_deinit(ar); #ifdef CONFIG_AP_VIRTUL_ADAPTER_SUPPORT ar6000_remove_ap_interface(); #endif /*CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("-ar6000_destroy \n")); } static void disconnect_timer_handler(unsigned long ptr) { struct net_device *dev = (struct net_device *)ptr; struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); A_UNTIMEOUT(&ar->disconnect_timer); ar6000_init_profile_info(ar); ar6000_disconnect(ar); } static void ar6000_detect_error(unsigned long ptr) { struct net_device *dev = (struct net_device *)ptr; struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); WMI_TARGET_ERROR_REPORT_EVENT errEvent; AR6000_SPIN_LOCK(&ar->arLock, 0); if (ar->arHBChallengeResp.outstanding) { ar->arHBChallengeResp.missCnt++; } else { ar->arHBChallengeResp.missCnt = 0; } if (ar->arHBChallengeResp.missCnt > ar->arHBChallengeResp.missThres) { /* Send Error Detect event to the application layer and do not reschedule the error detection module timer */ ar->arHBChallengeResp.missCnt = 0; ar->arHBChallengeResp.seqNum = 0; errEvent.errorVal = WMI_TARGET_COM_ERR | WMI_TARGET_FATAL_ERR; AR6000_SPIN_UNLOCK(&ar->arLock, 0); return; } /* Generate the sequence number for the next challenge */ ar->arHBChallengeResp.seqNum++; ar->arHBChallengeResp.outstanding = true; AR6000_SPIN_UNLOCK(&ar->arLock, 0); /* Send the challenge on the control channel */ if (wmi_get_challenge_resp_cmd(ar->arWmi, ar->arHBChallengeResp.seqNum, DRV_HB_CHALLENGE) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to send heart beat challenge\n")); } /* Reschedule the timer for the next challenge */ A_TIMEOUT_MS(&ar->arHBChallengeResp.timer, ar->arHBChallengeResp.frequency * 1000, 0); } void ar6000_init_profile_info(struct ar6_softc *ar) { ar->arSsidLen = 0; A_MEMZERO(ar->arSsid, sizeof(ar->arSsid)); switch(fwmode) { case HI_OPTION_FW_MODE_IBSS: ar->arNetworkType = ar->arNextMode = ADHOC_NETWORK; break; case HI_OPTION_FW_MODE_BSS_STA: ar->arNetworkType = ar->arNextMode = INFRA_NETWORK; break; case HI_OPTION_FW_MODE_AP: ar->arNetworkType = ar->arNextMode = AP_NETWORK; break; } ar->arDot11AuthMode = OPEN_AUTH; ar->arAuthMode = NONE_AUTH; ar->arPairwiseCrypto = NONE_CRYPT; ar->arPairwiseCryptoLen = 0; ar->arGroupCrypto = NONE_CRYPT; ar->arGroupCryptoLen = 0; A_MEMZERO(ar->arWepKeyList, sizeof(ar->arWepKeyList)); A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid)); A_MEMZERO(ar->arBssid, sizeof(ar->arBssid)); ar->arBssChannel = 0; } static void ar6000_init_control_info(struct ar6_softc *ar) { ar->arWmiEnabled = false; ar6000_init_profile_info(ar); ar->arDefTxKeyIndex = 0; A_MEMZERO(ar->arWepKeyList, sizeof(ar->arWepKeyList)); ar->arChannelHint = 0; ar->arListenIntervalT = A_DEFAULT_LISTEN_INTERVAL; ar->arListenIntervalB = 0; ar->arVersion.host_ver = AR6K_SW_VERSION; ar->arRssi = 0; ar->arTxPwr = 0; ar->arTxPwrSet = false; ar->arSkipScan = 0; ar->arBeaconInterval = 0; ar->arBitRate = 0; ar->arMaxRetries = 0; ar->arWmmEnabled = true; ar->intra_bss = 1; ar->scan_triggered = 0; A_MEMZERO(&ar->scParams, sizeof(ar->scParams)); ar->scParams.shortScanRatio = WMI_SHORTSCANRATIO_DEFAULT; ar->scParams.scanCtrlFlags = DEFAULT_SCAN_CTRL_FLAGS; /* Initialize the AP mode state info */ { u8 ctr; A_MEMZERO((u8 *)ar->sta_list, AP_MAX_NUM_STA * sizeof(sta_t)); /* init the Mutexes */ A_MUTEX_INIT(&ar->mcastpsqLock); /* Init the PS queues */ for (ctr=0; ctr < AP_MAX_NUM_STA ; ctr++) { A_MUTEX_INIT(&ar->sta_list[ctr].psqLock); A_NETBUF_QUEUE_INIT(&ar->sta_list[ctr].psq); } ar->ap_profile_flag = 0; A_NETBUF_QUEUE_INIT(&ar->mcastpsq); memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3); ar->ap_wmode = DEF_AP_WMODE_G; ar->ap_dtim_period = DEF_AP_DTIM; ar->ap_beacon_interval = DEF_BEACON_INTERVAL; } } static int ar6000_open(struct net_device *dev) { unsigned long flags; struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); spin_lock_irqsave(&ar->arLock, flags); if(ar->arWlanState == WLAN_DISABLED) { ar->arWlanState = WLAN_ENABLED; } if( ar->arConnected || bypasswmi) { netif_carrier_on(dev); /* Wake up the queues */ netif_wake_queue(dev); } else netif_carrier_off(dev); spin_unlock_irqrestore(&ar->arLock, flags); return 0; } static int ar6000_close(struct net_device *dev) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); netif_stop_queue(dev); ar6000_disconnect(ar); if(ar->arWmiReady == true) { if (wmi_scanparams_cmd(ar->arWmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0) { return -EIO; } ar->arWlanState = WLAN_DISABLED; } ar6k_cfg80211_scanComplete_event(ar, A_ECANCELED); return 0; } /* connect to a service */ static int ar6000_connectservice(struct ar6_softc *ar, struct htc_service_connect_req *pConnect, char *pDesc) { int status; struct htc_service_connect_resp response; do { A_MEMZERO(&response,sizeof(response)); status = HTCConnectService(ar->arHtcTarget, pConnect, &response); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Failed to connect to %s service status:%d \n", pDesc, status)); break; } switch (pConnect->ServiceID) { case WMI_CONTROL_SVC : if (ar->arWmiEnabled) { /* set control endpoint for WMI use */ wmi_set_control_ep(ar->arWmi, response.Endpoint); } /* save EP for fast lookup */ ar->arControlEp = response.Endpoint; break; case WMI_DATA_BE_SVC : arSetAc2EndpointIDMap(ar, WMM_AC_BE, response.Endpoint); break; case WMI_DATA_BK_SVC : arSetAc2EndpointIDMap(ar, WMM_AC_BK, response.Endpoint); break; case WMI_DATA_VI_SVC : arSetAc2EndpointIDMap(ar, WMM_AC_VI, response.Endpoint); break; case WMI_DATA_VO_SVC : arSetAc2EndpointIDMap(ar, WMM_AC_VO, response.Endpoint); break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ServiceID not mapped %d\n", pConnect->ServiceID)); status = A_EINVAL; break; } } while (false); return status; } void ar6000_TxDataCleanup(struct ar6_softc *ar) { /* flush all the data (non-control) streams * we only flush packets that are tagged as data, we leave any control packets that * were in the TX queues alone */ HTCFlushEndpoint(ar->arHtcTarget, arAc2EndpointID(ar, WMM_AC_BE), AR6K_DATA_PKT_TAG); HTCFlushEndpoint(ar->arHtcTarget, arAc2EndpointID(ar, WMM_AC_BK), AR6K_DATA_PKT_TAG); HTCFlushEndpoint(ar->arHtcTarget, arAc2EndpointID(ar, WMM_AC_VI), AR6K_DATA_PKT_TAG); HTCFlushEndpoint(ar->arHtcTarget, arAc2EndpointID(ar, WMM_AC_VO), AR6K_DATA_PKT_TAG); } HTC_ENDPOINT_ID ar6000_ac2_endpoint_id ( void * devt, u8 ac) { struct ar6_softc *ar = (struct ar6_softc *) devt; return(arAc2EndpointID(ar, ac)); } u8 ar6000_endpoint_id2_ac(void * devt, HTC_ENDPOINT_ID ep ) { struct ar6_softc *ar = (struct ar6_softc *) devt; return(arEndpoint2Ac(ar, ep )); } #if defined(CONFIG_ATH6KL_ENABLE_COEXISTENCE) static int ath6kl_config_btcoex_params(struct ar6_softc *ar) { int r; WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD sbcb_cmd; WMI_SET_BTCOEX_FE_ANT_CMD sbfa_cmd; /* Configure the type of BT collocated with WLAN */ memset(&sbcb_cmd, 0, sizeof(WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD)); sbcb_cmd.btcoexCoLocatedBTdev = ATH6KL_BT_DEV; r = wmi_set_btcoex_colocated_bt_dev_cmd(ar->arWmi, &sbcb_cmd); if (r) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to set collocated BT type\n")); return r; } /* Configure the type of BT collocated with WLAN */ memset(&sbfa_cmd, 0, sizeof(WMI_SET_BTCOEX_FE_ANT_CMD)); sbfa_cmd.btcoexFeAntType = ATH6KL_BT_ANTENNA; r = wmi_set_btcoex_fe_ant_cmd(ar->arWmi, &sbfa_cmd); if (r) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to set fornt end antenna configuration\n")); return r; } return 0; } #else static int ath6kl_config_btcoex_params(struct ar6_softc *ar) { return 0; } #endif /* CONFIG_ATH6KL_ENABLE_COEXISTENCE */ /* * This function applies WLAN specific configuration defined in wlan_config.h */ int ar6000_target_config_wlan_params(struct ar6_softc *ar) { int status = 0; #ifdef CONFIG_HOST_TCMD_SUPPORT if (ar->arTargetMode != AR6000_WLAN_MODE) { return 0; } #endif /* CONFIG_HOST_TCMD_SUPPORT */ /* * configure the device for rx dot11 header rules 0,0 are the default values * therefore this command can be skipped if the inputs are 0,FALSE,FALSE.Required * if checksum offload is needed. Set RxMetaVersion to 2 */ if ((wmi_set_rx_frame_format_cmd(ar->arWmi,ar->rxMetaVersion, processDot11Hdr, processDot11Hdr)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set the rx frame format.\n")); status = A_ERROR; } status = ath6kl_config_btcoex_params(ar); if (status) return status; #if WLAN_CONFIG_IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN if ((wmi_pmparams_cmd(ar->arWmi, 0, 1, 0, 0, 1, IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set power save fail event policy\n")); status = A_ERROR; } #endif #if WLAN_CONFIG_DONOT_IGNORE_BARKER_IN_ERP if ((wmi_set_lpreamble_cmd(ar->arWmi, 0, WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set barker preamble policy\n")); status = A_ERROR; } #endif if ((wmi_set_keepalive_cmd(ar->arWmi, WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set keep alive interval\n")); status = A_ERROR; } #if WLAN_CONFIG_DISABLE_11N { WMI_SET_HT_CAP_CMD htCap; memset(&htCap, 0, sizeof(WMI_SET_HT_CAP_CMD)); htCap.band = 0; if ((wmi_set_ht_cap_cmd(ar->arWmi, &htCap)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set ht capabilities \n")); status = A_ERROR; } htCap.band = 1; if ((wmi_set_ht_cap_cmd(ar->arWmi, &htCap)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set ht capabilities \n")); status = A_ERROR; } } #endif /* WLAN_CONFIG_DISABLE_11N */ #ifdef ATH6K_CONFIG_OTA_MODE if ((wmi_powermode_cmd(ar->arWmi, MAX_PERF_POWER)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set power mode \n")); status = A_ERROR; } #endif if ((wmi_disctimeout_cmd(ar->arWmi, WLAN_CONFIG_DISCONNECT_TIMEOUT)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set disconnect timeout \n")); status = A_ERROR; } #if WLAN_CONFIG_DISABLE_TX_BURSTING if ((wmi_set_wmm_txop(ar->arWmi, WMI_TXOP_DISABLED)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set txop bursting \n")); status = A_ERROR; } #endif return status; } /* This function does one time initialization for the lifetime of the device */ int ar6000_init(struct net_device *dev) { struct ar6_softc *ar; int status; s32 timeleft; s16 i; int ret = 0; if((ar = ar6k_priv(dev)) == NULL) { return -EIO; } if (wlaninitmode == WLAN_INIT_MODE_USR || wlaninitmode == WLAN_INIT_MODE_DRV) { ar6000_update_bdaddr(ar); if (enablerssicompensation) { ar6000_copy_cust_data_from_target(ar->arHifDevice, ar->arTargetType); read_rssi_compensation_param(ar); for (i=-95; i<=0; i++) { rssi_compensation_table[0-i] = rssi_compensation_calc(ar,i); } } } dev_hold(dev); rtnl_unlock(); /* Do we need to finish the BMI phase */ if ((wlaninitmode == WLAN_INIT_MODE_USR || wlaninitmode == WLAN_INIT_MODE_DRV) && (BMIDone(ar->arHifDevice) != 0)) { ret = -EIO; goto ar6000_init_done; } if (!bypasswmi) { #if 0 /* TBDXXX */ if (ar->arVersion.host_ver != ar->arVersion.target_ver) { A_PRINTF("WARNING: Host version 0x%x does not match Target " " version 0x%x!\n", ar->arVersion.host_ver, ar->arVersion.target_ver); } #endif /* Indicate that WMI is enabled (although not ready yet) */ ar->arWmiEnabled = true; if ((ar->arWmi = wmi_init((void *) ar)) == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Failed to initialize WMI.\n", __func__)); ret = -EIO; goto ar6000_init_done; } AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Got WMI @ 0x%lx.\n", __func__, (unsigned long) ar->arWmi)); } do { struct htc_service_connect_req connect; /* the reason we have to wait for the target here is that the driver layer * has to init BMI in order to set the host block size, */ status = HTCWaitTarget(ar->arHtcTarget); if (status) { break; } A_MEMZERO(&connect,sizeof(connect)); /* meta data is unused for now */ connect.pMetaData = NULL; connect.MetaDataLength = 0; /* these fields are the same for all service endpoints */ connect.EpCallbacks.pContext = ar; connect.EpCallbacks.EpTxCompleteMultiple = ar6000_tx_complete; connect.EpCallbacks.EpRecv = ar6000_rx; connect.EpCallbacks.EpRecvRefill = ar6000_rx_refill; connect.EpCallbacks.EpSendFull = ar6000_tx_queue_full; /* set the max queue depth so that our ar6000_tx_queue_full handler gets called. * Linux has the peculiarity of not providing flow control between the * NIC and the network stack. There is no API to indicate that a TX packet * was sent which could provide some back pressure to the network stack. * Under linux you would have to wait till the network stack consumed all sk_buffs * before any back-flow kicked in. Which isn't very friendly. * So we have to manage this ourselves */ connect.MaxSendQueueDepth = MAX_DEFAULT_SEND_QUEUE_DEPTH; connect.EpCallbacks.RecvRefillWaterMark = AR6000_MAX_RX_BUFFERS / 4; /* set to 25 % */ if (0 == connect.EpCallbacks.RecvRefillWaterMark) { connect.EpCallbacks.RecvRefillWaterMark++; } /* connect to control service */ connect.ServiceID = WMI_CONTROL_SVC; status = ar6000_connectservice(ar, &connect, "WMI CONTROL"); if (status) { break; } connect.LocalConnectionFlags |= HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING; /* limit the HTC message size on the send path, although we can receive A-MSDU frames of * 4K, we will only send ethernet-sized (802.3) frames on the send path. */ connect.MaxSendMsgSize = WMI_MAX_TX_DATA_FRAME_LENGTH; /* to reduce the amount of committed memory for larger A_MSDU frames, use the recv-alloc threshold * mechanism for larger packets */ connect.EpCallbacks.RecvAllocThreshold = AR6000_BUFFER_SIZE; connect.EpCallbacks.EpRecvAllocThresh = ar6000_alloc_amsdu_rxbuf; /* for the remaining data services set the connection flag to reduce dribbling, * if configured to do so */ if (reduce_credit_dribble) { connect.ConnectionFlags |= HTC_CONNECT_FLAGS_REDUCE_CREDIT_DRIBBLE; /* the credit dribble trigger threshold is (reduce_credit_dribble - 1) for a value * of 0-3 */ connect.ConnectionFlags &= ~HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_MASK; connect.ConnectionFlags |= ((u16)reduce_credit_dribble - 1) & HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_MASK; } /* connect to best-effort service */ connect.ServiceID = WMI_DATA_BE_SVC; status = ar6000_connectservice(ar, &connect, "WMI DATA BE"); if (status) { break; } /* connect to back-ground * map this to WMI LOW_PRI */ connect.ServiceID = WMI_DATA_BK_SVC; status = ar6000_connectservice(ar, &connect, "WMI DATA BK"); if (status) { break; } /* connect to Video service, map this to * to HI PRI */ connect.ServiceID = WMI_DATA_VI_SVC; status = ar6000_connectservice(ar, &connect, "WMI DATA VI"); if (status) { break; } /* connect to VO service, this is currently not * mapped to a WMI priority stream due to historical reasons. * WMI originally defined 3 priorities over 3 mailboxes * We can change this when WMI is reworked so that priorities are not * dependent on mailboxes */ connect.ServiceID = WMI_DATA_VO_SVC; status = ar6000_connectservice(ar, &connect, "WMI DATA VO"); if (status) { break; } A_ASSERT(arAc2EndpointID(ar,WMM_AC_BE) != 0); A_ASSERT(arAc2EndpointID(ar,WMM_AC_BK) != 0); A_ASSERT(arAc2EndpointID(ar,WMM_AC_VI) != 0); A_ASSERT(arAc2EndpointID(ar,WMM_AC_VO) != 0); /* setup access class priority mappings */ ar->arAcStreamPriMap[WMM_AC_BK] = 0; /* lowest */ ar->arAcStreamPriMap[WMM_AC_BE] = 1; /* */ ar->arAcStreamPriMap[WMM_AC_VI] = 2; /* */ ar->arAcStreamPriMap[WMM_AC_VO] = 3; /* highest */ #ifdef EXPORT_HCI_BRIDGE_INTERFACE if (setuphci && (NULL != ar6kHciTransCallbacks.setupTransport)) { struct hci_transport_misc_handles hciHandles; hciHandles.netDevice = ar->arNetDev; hciHandles.hifDevice = ar->arHifDevice; hciHandles.htcHandle = ar->arHtcTarget; status = (int)(ar6kHciTransCallbacks.setupTransport(&hciHandles)); } #else if (setuphci) { /* setup HCI */ status = ar6000_setup_hci(ar); } #endif } while (false); if (status) { ret = -EIO; goto ar6000_init_done; } if (regscanmode) { u32 param; if (BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIReadMemory forsetting " "regscanmode failed\n")); return A_ERROR; } if (regscanmode == 1) param |= HI_OPTION_SKIP_REG_SCAN; else if (regscanmode == 2) param |= HI_OPTION_INIT_REG_SCAN; if (BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag), (u8 *)&param, 4) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIWriteMemory forsetting " "regscanmode failed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Regulatory scan mode set\n")); } /* * give our connected endpoints some buffers */ ar6000_rx_refill(ar, ar->arControlEp); ar6000_rx_refill(ar, arAc2EndpointID(ar,WMM_AC_BE)); /* * We will post the receive buffers only for SPE or endpoint ping testing so we are * making it conditional on the 'bypasswmi' flag. */ if (bypasswmi) { ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_BK)); ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_VI)); ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_VO)); } /* allocate some buffers that handle larger AMSDU frames */ ar6000_refill_amsdu_rxbufs(ar,AR6000_MAX_AMSDU_RX_BUFFERS); /* setup credit distribution */ ar6000_setup_credit_dist(ar->arHtcTarget, &ar->arCreditStateInfo); /* Since cookies are used for HTC transports, they should be */ /* initialized prior to enabling HTC. */ ar6000_cookie_init(ar); /* start HTC */ status = HTCStart(ar->arHtcTarget); if (status) { if (ar->arWmiEnabled == true) { wmi_shutdown(ar->arWmi); ar->arWmiEnabled = false; ar->arWmi = NULL; } ar6000_cookie_cleanup(ar); ret = -EIO; goto ar6000_init_done; } if (!bypasswmi) { /* Wait for Wmi event to be ready */ timeleft = wait_event_interruptible_timeout(arEvent, (ar->arWmiReady == true), wmitimeout * HZ); if (ar->arVersion.abi_ver != AR6K_ABI_VERSION) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ABI Version mismatch: Host(0x%x), Target(0x%x)\n", AR6K_ABI_VERSION, ar->arVersion.abi_ver)); #ifndef ATH6K_SKIP_ABI_VERSION_CHECK ret = -EIO; goto ar6000_init_done; #endif /* ATH6K_SKIP_ABI_VERSION_CHECK */ } if(!timeleft || signal_pending(current)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("WMI is not ready or wait was interrupted\n")); ret = -EIO; goto ar6000_init_done; } AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() WMI is ready\n", __func__)); /* Communicate the wmi protocol verision to the target */ if ((ar6000_set_host_app_area(ar)) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set the host app area\n")); } ar6000_target_config_wlan_params(ar); } ar->arNumDataEndPts = 1; if (bypasswmi) { /* for tests like endpoint ping, the MAC address needs to be non-zero otherwise * the data path through a raw socket is disabled */ dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x01; dev->dev_addr[2] = 0x02; dev->dev_addr[3] = 0xAA; dev->dev_addr[4] = 0xBB; dev->dev_addr[5] = 0xCC; } ar6000_init_done: rtnl_lock(); dev_put(dev); return ret; } void ar6000_bitrate_rx(void *devt, s32 rateKbps) { struct ar6_softc *ar = (struct ar6_softc *)devt; ar->arBitRate = rateKbps; wake_up(&arEvent); } void ar6000_ratemask_rx(void *devt, u32 ratemask) { struct ar6_softc *ar = (struct ar6_softc *)devt; ar->arRateMask = ratemask; wake_up(&arEvent); } void ar6000_txPwr_rx(void *devt, u8 txPwr) { struct ar6_softc *ar = (struct ar6_softc *)devt; ar->arTxPwr = txPwr; wake_up(&arEvent); } void ar6000_channelList_rx(void *devt, s8 numChan, u16 *chanList) { struct ar6_softc *ar = (struct ar6_softc *)devt; memcpy(ar->arChannelList, chanList, numChan * sizeof (u16)); ar->arNumChannels = numChan; wake_up(&arEvent); } u8 ar6000_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, u32 *mapNo) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); u8 *datap; ATH_MAC_HDR *macHdr; u32 i, eptMap; (*mapNo) = 0; datap = A_NETBUF_DATA(skb); macHdr = (ATH_MAC_HDR *)(datap + sizeof(WMI_DATA_HDR)); if (IEEE80211_IS_MULTICAST(macHdr->dstMac)) { return ENDPOINT_2; } eptMap = -1; for (i = 0; i < ar->arNodeNum; i ++) { if (IEEE80211_ADDR_EQ(macHdr->dstMac, ar->arNodeMap[i].macAddress)) { (*mapNo) = i + 1; ar->arNodeMap[i].txPending ++; return ar->arNodeMap[i].epId; } if ((eptMap == -1) && !ar->arNodeMap[i].txPending) { eptMap = i; } } if (eptMap == -1) { eptMap = ar->arNodeNum; ar->arNodeNum ++; A_ASSERT(ar->arNodeNum <= MAX_NODE_NUM); } memcpy(ar->arNodeMap[eptMap].macAddress, macHdr->dstMac, IEEE80211_ADDR_LEN); for (i = ENDPOINT_2; i <= ENDPOINT_5; i ++) { if (!ar->arTxPending[i]) { ar->arNodeMap[eptMap].epId = i; break; } // No free endpoint is available, start redistribution on the inuse endpoints. if (i == ENDPOINT_5) { ar->arNodeMap[eptMap].epId = ar->arNexEpId; ar->arNexEpId ++; if (ar->arNexEpId > ENDPOINT_5) { ar->arNexEpId = ENDPOINT_2; } } } (*mapNo) = eptMap + 1; ar->arNodeMap[eptMap].txPending ++; return ar->arNodeMap[eptMap].epId; } #ifdef DEBUG static void ar6000_dump_skb(struct sk_buff *skb) { u_char *ch; for (ch = A_NETBUF_DATA(skb); (unsigned long)ch < ((unsigned long)A_NETBUF_DATA(skb) + A_NETBUF_LEN(skb)); ch++) { AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("%2.2x ", *ch)); } AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("\n")); } #endif #ifdef HTC_TEST_SEND_PKTS static void DoHTCSendPktsTest(struct ar6_softc *ar, int MapNo, HTC_ENDPOINT_ID eid, struct sk_buff *skb); #endif static int ar6000_data_tx(struct sk_buff *skb, struct net_device *dev) { #define AC_NOT_MAPPED 99 struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); u8 ac = AC_NOT_MAPPED; HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED; u32 mapNo = 0; int len; struct ar_cookie *cookie; bool checkAdHocPsMapping = false,bMoreData = false; HTC_TX_TAG htc_tag = AR6K_DATA_PKT_TAG; u8 dot11Hdr = processDot11Hdr; #ifdef CONFIG_PM if (ar->arWowState != WLAN_WOW_STATE_NONE) { A_NETBUF_FREE(skb); return 0; } #endif /* CONFIG_PM */ AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar6000_data_tx start - skb=0x%lx, data=0x%lx, len=0x%x\n", (unsigned long)skb, (unsigned long)A_NETBUF_DATA(skb), A_NETBUF_LEN(skb))); /* If target is not associated */ if( (!ar->arConnected && !bypasswmi) #ifdef CONFIG_HOST_TCMD_SUPPORT /* TCMD doesn't support any data, free the buf and return */ || (ar->arTargetMode == AR6000_TCMD_MODE) #endif ) { A_NETBUF_FREE(skb); return 0; } do { if (ar->arWmiReady == false && bypasswmi == 0) { break; } #ifdef BLOCK_TX_PATH_FLAG if (blocktx) { break; } #endif /* BLOCK_TX_PATH_FLAG */ /* AP mode Power save processing */ /* If the dst STA is in sleep state, queue the pkt in its PS queue */ if (ar->arNetworkType == AP_NETWORK) { ATH_MAC_HDR *datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb); sta_t *conn = NULL; /* If the dstMac is a Multicast address & atleast one of the * associated STA is in PS mode, then queue the pkt to the * mcastq */ if (IEEE80211_IS_MULTICAST(datap->dstMac)) { u8 ctr=0; bool qMcast=false; for (ctr=0; ctr<AP_MAX_NUM_STA; ctr++) { if (STA_IS_PWR_SLEEP((&ar->sta_list[ctr]))) { qMcast = true; } } if(qMcast) { /* If this transmit is not because of a Dtim Expiry q it */ if (ar->DTIMExpired == false) { bool isMcastqEmpty = false; A_MUTEX_LOCK(&ar->mcastpsqLock); isMcastqEmpty = A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq); A_NETBUF_ENQUEUE(&ar->mcastpsq, skb); A_MUTEX_UNLOCK(&ar->mcastpsqLock); /* If this is the first Mcast pkt getting queued * indicate to the target to set the BitmapControl LSB * of the TIM IE. */ if (isMcastqEmpty) { wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 1); } return 0; } else { /* This transmit is because of Dtim expiry. Determine if * MoreData bit has to be set. */ A_MUTEX_LOCK(&ar->mcastpsqLock); if(!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) { bMoreData = true; } A_MUTEX_UNLOCK(&ar->mcastpsqLock); } } } else { conn = ieee80211_find_conn(ar, datap->dstMac); if (conn) { if (STA_IS_PWR_SLEEP(conn)) { /* If this transmit is not because of a PsPoll q it*/ if (!STA_IS_PS_POLLED(conn)) { bool isPsqEmpty = false; /* Queue the frames if the STA is sleeping */ A_MUTEX_LOCK(&conn->psqLock); isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq); A_NETBUF_ENQUEUE(&conn->psq, skb); A_MUTEX_UNLOCK(&conn->psqLock); /* If this is the first pkt getting queued * for this STA, update the PVB for this STA */ if (isPsqEmpty) { wmi_set_pvb_cmd(ar->arWmi, conn->aid, 1); } return 0; } else { /* This tx is because of a PsPoll. Determine if * MoreData bit has to be set */ A_MUTEX_LOCK(&conn->psqLock); if (!A_NETBUF_QUEUE_EMPTY(&conn->psq)) { bMoreData = true; } A_MUTEX_UNLOCK(&conn->psqLock); } } } else { /* non existent STA. drop the frame */ A_NETBUF_FREE(skb); return 0; } } } if (ar->arWmiEnabled) { u8 csumStart=0; u8 csumDest=0; u8 csum=skb->ip_summed; if(csumOffload && (csum==CHECKSUM_PARTIAL)){ csumStart = (skb->head + skb->csum_start - skb_network_header(skb) + sizeof(ATH_LLC_SNAP_HDR)); csumDest=skb->csum_offset+csumStart; } if (A_NETBUF_HEADROOM(skb) < dev->hard_header_len - LINUX_HACK_FUDGE_FACTOR) { struct sk_buff *newbuf; /* * We really should have gotten enough headroom but sometimes * we still get packets with not enough headroom. Copy the packet. */ len = A_NETBUF_LEN(skb); newbuf = A_NETBUF_ALLOC(len); if (newbuf == NULL) { break; } A_NETBUF_PUT(newbuf, len); memcpy(A_NETBUF_DATA(newbuf), A_NETBUF_DATA(skb), len); A_NETBUF_FREE(skb); skb = newbuf; /* fall through and assemble header */ } if (dot11Hdr) { if (wmi_dot11_hdr_add(ar->arWmi,skb,ar->arNetworkType) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx-wmi_dot11_hdr_add failed\n")); break; } } else { if (wmi_dix_2_dot3(ar->arWmi, skb) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_dix_2_dot3 failed\n")); break; } } if(csumOffload && (csum ==CHECKSUM_PARTIAL)){ WMI_TX_META_V2 metaV2; metaV2.csumStart =csumStart; metaV2.csumDest = csumDest; metaV2.csumFlags = 0x1;/*instruct target to calculate checksum*/ if (wmi_data_hdr_add(ar->arWmi, skb, DATA_MSGTYPE, bMoreData, dot11Hdr, WMI_META_VERSION_2,&metaV2) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_data_hdr_add failed\n")); break; } } else { if (wmi_data_hdr_add(ar->arWmi, skb, DATA_MSGTYPE, bMoreData, dot11Hdr,0,NULL) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_data_hdr_add failed\n")); break; } } if ((ar->arNetworkType == ADHOC_NETWORK) && ar->arIbssPsEnable && ar->arConnected) { /* flag to check adhoc mapping once we take the lock below: */ checkAdHocPsMapping = true; } else { /* get the stream mapping */ ac = wmi_implicit_create_pstream(ar->arWmi, skb, 0, ar->arWmmEnabled); } } else { EPPING_HEADER *eppingHdr; eppingHdr = A_NETBUF_DATA(skb); if (IS_EPPING_PACKET(eppingHdr)) { /* the stream ID is mapped to an access class */ ac = eppingHdr->StreamNo_h; /* some EPPING packets cannot be dropped no matter what access class it was * sent on. We can change the packet tag to guarantee it will not get dropped */ if (IS_EPING_PACKET_NO_DROP(eppingHdr)) { htc_tag = AR6K_CONTROL_PKT_TAG; } if (ac == HCI_TRANSPORT_STREAM_NUM) { /* pass this to HCI */ #ifndef EXPORT_HCI_BRIDGE_INTERFACE if (!hci_test_send(ar,skb)) { return 0; } #endif /* set AC to discard this skb */ ac = AC_NOT_MAPPED; } else { /* a quirk of linux, the payload of the frame is 32-bit aligned and thus the addition * of the HTC header will mis-align the start of the HTC frame, so we add some * padding which will be stripped off in the target */ if (EPPING_ALIGNMENT_PAD > 0) { A_NETBUF_PUSH(skb, EPPING_ALIGNMENT_PAD); } } } else { /* not a ping packet, drop it */ ac = AC_NOT_MAPPED; } } } while (false); /* did we succeed ? */ if ((ac == AC_NOT_MAPPED) && !checkAdHocPsMapping) { /* cleanup and exit */ A_NETBUF_FREE(skb); AR6000_STAT_INC(ar, tx_dropped); AR6000_STAT_INC(ar, tx_aborted_errors); return 0; } cookie = NULL; /* take the lock to protect driver data */ AR6000_SPIN_LOCK(&ar->arLock, 0); do { if (checkAdHocPsMapping) { eid = ar6000_ibss_map_epid(skb, dev, &mapNo); }else { eid = arAc2EndpointID (ar, ac); } /* validate that the endpoint is connected */ if (eid == 0 || eid == ENDPOINT_UNUSED ) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" eid %d is NOT mapped!\n", eid)); break; } /* allocate resource for this packet */ cookie = ar6000_alloc_cookie(ar); if (cookie != NULL) { /* update counts while the lock is held */ ar->arTxPending[eid]++; ar->arTotalTxDataPending++; } } while (false); AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (cookie != NULL) { cookie->arc_bp[0] = (unsigned long)skb; cookie->arc_bp[1] = mapNo; SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, cookie, A_NETBUF_DATA(skb), A_NETBUF_LEN(skb), eid, htc_tag); #ifdef DEBUG if (debugdriver >= 3) { ar6000_dump_skb(skb); } #endif #ifdef HTC_TEST_SEND_PKTS DoHTCSendPktsTest(ar,mapNo,eid,skb); #endif /* HTC interface is asynchronous, if this fails, cleanup will happen in * the ar6000_tx_complete callback */ HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt); } else { /* no packet to send, cleanup */ A_NETBUF_FREE(skb); AR6000_STAT_INC(ar, tx_dropped); AR6000_STAT_INC(ar, tx_aborted_errors); } return 0; } int ar6000_acl_data_tx(struct sk_buff *skb, struct net_device *dev) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); struct ar_cookie *cookie; HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED; cookie = NULL; AR6000_SPIN_LOCK(&ar->arLock, 0); /* For now we send ACL on BE endpoint: We can also have a dedicated EP */ eid = arAc2EndpointID (ar, 0); /* allocate resource for this packet */ cookie = ar6000_alloc_cookie(ar); if (cookie != NULL) { /* update counts while the lock is held */ ar->arTxPending[eid]++; ar->arTotalTxDataPending++; } AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (cookie != NULL) { cookie->arc_bp[0] = (unsigned long)skb; cookie->arc_bp[1] = 0; SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, cookie, A_NETBUF_DATA(skb), A_NETBUF_LEN(skb), eid, AR6K_DATA_PKT_TAG); /* HTC interface is asynchronous, if this fails, cleanup will happen in * the ar6000_tx_complete callback */ HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt); } else { /* no packet to send, cleanup */ A_NETBUF_FREE(skb); AR6000_STAT_INC(ar, tx_dropped); AR6000_STAT_INC(ar, tx_aborted_errors); } return 0; } #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL static void tvsub(register struct timeval *out, register struct timeval *in) { if((out->tv_usec -= in->tv_usec) < 0) { out->tv_sec--; out->tv_usec += 1000000; } out->tv_sec -= in->tv_sec; } void applyAPTCHeuristics(struct ar6_softc *ar) { u32 duration; u32 numbytes; u32 throughput; struct timeval ts; int status; AR6000_SPIN_LOCK(&ar->arLock, 0); if ((enableAPTCHeuristics) && (!aptcTR.timerScheduled)) { do_gettimeofday(&ts); tvsub(&ts, &aptcTR.samplingTS); duration = ts.tv_sec * 1000 + ts.tv_usec / 1000; /* ms */ numbytes = aptcTR.bytesTransmitted + aptcTR.bytesReceived; if (duration > APTC_TRAFFIC_SAMPLING_INTERVAL) { /* Initialize the time stamp and byte count */ aptcTR.bytesTransmitted = aptcTR.bytesReceived = 0; do_gettimeofday(&aptcTR.samplingTS); /* Calculate and decide based on throughput thresholds */ throughput = ((numbytes * 8) / duration); if (throughput > APTC_UPPER_THROUGHPUT_THRESHOLD) { /* Disable Sleep and schedule a timer */ A_ASSERT(ar->arWmiReady == true); AR6000_SPIN_UNLOCK(&ar->arLock, 0); status = wmi_powermode_cmd(ar->arWmi, MAX_PERF_POWER); AR6000_SPIN_LOCK(&ar->arLock, 0); A_TIMEOUT_MS(&aptcTimer, APTC_TRAFFIC_SAMPLING_INTERVAL, 0); aptcTR.timerScheduled = true; } } } AR6000_SPIN_UNLOCK(&ar->arLock, 0); } #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ static HTC_SEND_FULL_ACTION ar6000_tx_queue_full(void *Context, struct htc_packet *pPacket) { struct ar6_softc *ar = (struct ar6_softc *)Context; HTC_SEND_FULL_ACTION action = HTC_SEND_FULL_KEEP; bool stopNet = false; HTC_ENDPOINT_ID Endpoint = HTC_GET_ENDPOINT_FROM_PKT(pPacket); do { if (bypasswmi) { int accessClass; if (HTC_GET_TAG_FROM_PKT(pPacket) == AR6K_CONTROL_PKT_TAG) { /* don't drop special control packets */ break; } accessClass = arEndpoint2Ac(ar,Endpoint); /* for endpoint ping testing drop Best Effort and Background */ if ((accessClass == WMM_AC_BE) || (accessClass == WMM_AC_BK)) { action = HTC_SEND_FULL_DROP; stopNet = false; } else { /* keep but stop the netqueues */ stopNet = true; } break; } if (Endpoint == ar->arControlEp) { /* under normal WMI if this is getting full, then something is running rampant * the host should not be exhausting the WMI queue with too many commands * the only exception to this is during testing using endpointping */ AR6000_SPIN_LOCK(&ar->arLock, 0); /* set flag to handle subsequent messages */ ar->arWMIControlEpFull = true; AR6000_SPIN_UNLOCK(&ar->arLock, 0); AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("WMI Control Endpoint is FULL!!! \n")); /* no need to stop the network */ stopNet = false; break; } /* if we get here, we are dealing with data endpoints getting full */ if (HTC_GET_TAG_FROM_PKT(pPacket) == AR6K_CONTROL_PKT_TAG) { /* don't drop control packets issued on ANY data endpoint */ break; } if (ar->arNetworkType == ADHOC_NETWORK) { /* in adhoc mode, we cannot differentiate traffic priorities so there is no need to * continue, however we should stop the network */ stopNet = true; break; } /* the last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for the highest * active stream */ if (ar->arAcStreamPriMap[arEndpoint2Ac(ar,Endpoint)] < ar->arHiAcStreamActivePri && ar->arCookieCount <= MAX_HI_COOKIE_NUM) { /* this stream's priority is less than the highest active priority, we * give preference to the highest priority stream by directing * HTC to drop the packet that overflowed */ action = HTC_SEND_FULL_DROP; /* since we are dropping packets, no need to stop the network */ stopNet = false; break; } } while (false); if (stopNet) { AR6000_SPIN_LOCK(&ar->arLock, 0); ar->arNetQueueStopped = true; AR6000_SPIN_UNLOCK(&ar->arLock, 0); /* one of the data endpoints queues is getting full..need to stop network stack * the queue will resume in ar6000_tx_complete() */ netif_stop_queue(ar->arNetDev); } return action; } static void ar6000_tx_complete(void *Context, struct htc_packet_queue *pPacketQueue) { struct ar6_softc *ar = (struct ar6_softc *)Context; u32 mapNo = 0; int status; struct ar_cookie * ar_cookie; HTC_ENDPOINT_ID eid; bool wakeEvent = false; struct sk_buff_head skb_queue; struct htc_packet *pPacket; struct sk_buff *pktSkb; bool flushing = false; skb_queue_head_init(&skb_queue); /* lock the driver as we update internal state */ AR6000_SPIN_LOCK(&ar->arLock, 0); /* reap completed packets */ while (!HTC_QUEUE_EMPTY(pPacketQueue)) { pPacket = HTC_PACKET_DEQUEUE(pPacketQueue); ar_cookie = (struct ar_cookie *)pPacket->pPktContext; A_ASSERT(ar_cookie); status = pPacket->Status; pktSkb = (struct sk_buff *)ar_cookie->arc_bp[0]; eid = pPacket->Endpoint; mapNo = ar_cookie->arc_bp[1]; A_ASSERT(pktSkb); A_ASSERT(pPacket->pBuffer == A_NETBUF_DATA(pktSkb)); /* add this to the list, use faster non-lock API */ __skb_queue_tail(&skb_queue,pktSkb); if (!status) { A_ASSERT(pPacket->ActualLength == A_NETBUF_LEN(pktSkb)); } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar6000_tx_complete skb=0x%lx data=0x%lx len=0x%x eid=%d ", (unsigned long)pktSkb, (unsigned long)pPacket->pBuffer, pPacket->ActualLength, eid)); ar->arTxPending[eid]--; if ((eid != ar->arControlEp) || bypasswmi) { ar->arTotalTxDataPending--; } if (eid == ar->arControlEp) { if (ar->arWMIControlEpFull) { /* since this packet completed, the WMI EP is no longer full */ ar->arWMIControlEpFull = false; } if (ar->arTxPending[eid] == 0) { wakeEvent = true; } } if (status) { if (status == A_ECANCELED) { /* a packet was flushed */ flushing = true; } AR6000_STAT_INC(ar, tx_errors); if (status != A_NO_RESOURCE) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() -TX ERROR, status: 0x%x\n", __func__, status)); } } else { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("OK\n")); flushing = false; AR6000_STAT_INC(ar, tx_packets); ar->arNetStats.tx_bytes += A_NETBUF_LEN(pktSkb); #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL aptcTR.bytesTransmitted += a_netbuf_to_len(pktSkb); applyAPTCHeuristics(ar); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ } // TODO this needs to be looked at if ((ar->arNetworkType == ADHOC_NETWORK) && ar->arIbssPsEnable && (eid != ar->arControlEp) && mapNo) { mapNo --; ar->arNodeMap[mapNo].txPending --; if (!ar->arNodeMap[mapNo].txPending && (mapNo == (ar->arNodeNum - 1))) { u32 i; for (i = ar->arNodeNum; i > 0; i --) { if (!ar->arNodeMap[i - 1].txPending) { A_MEMZERO(&ar->arNodeMap[i - 1], sizeof(struct ar_node_mapping)); ar->arNodeNum --; } else { break; } } } } ar6000_free_cookie(ar, ar_cookie); if (ar->arNetQueueStopped) { ar->arNetQueueStopped = false; } } AR6000_SPIN_UNLOCK(&ar->arLock, 0); /* lock is released, we can freely call other kernel APIs */ /* free all skbs in our local list */ while (!skb_queue_empty(&skb_queue)) { /* use non-lock version */ pktSkb = __skb_dequeue(&skb_queue); A_NETBUF_FREE(pktSkb); } if ((ar->arConnected == true) || bypasswmi) { if (!flushing) { /* don't wake the queue if we are flushing, other wise it will just * keep queueing packets, which will keep failing */ netif_wake_queue(ar->arNetDev); } } if (wakeEvent) { wake_up(&arEvent); } } sta_t * ieee80211_find_conn(struct ar6_softc *ar, u8 *node_addr) { sta_t *conn = NULL; u8 i, max_conn; switch(ar->arNetworkType) { case AP_NETWORK: max_conn = AP_MAX_NUM_STA; break; default: max_conn=0; break; } for (i = 0; i < max_conn; i++) { if (IEEE80211_ADDR_EQ(node_addr, ar->sta_list[i].mac)) { conn = &ar->sta_list[i]; break; } } return conn; } sta_t *ieee80211_find_conn_for_aid(struct ar6_softc *ar, u8 aid) { sta_t *conn = NULL; u8 ctr; for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { if (ar->sta_list[ctr].aid == aid) { conn = &ar->sta_list[ctr]; break; } } return conn; } /* * Receive event handler. This is called by HTC when a packet is received */ int pktcount; static void ar6000_rx(void *Context, struct htc_packet *pPacket) { struct ar6_softc *ar = (struct ar6_softc *)Context; struct sk_buff *skb = (struct sk_buff *)pPacket->pPktContext; int minHdrLen; u8 containsDot11Hdr = 0; int status = pPacket->Status; HTC_ENDPOINT_ID ept = pPacket->Endpoint; A_ASSERT((status) || (pPacket->pBuffer == (A_NETBUF_DATA(skb) + HTC_HEADER_LEN))); AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_rx ar=0x%lx eid=%d, skb=0x%lx, data=0x%lx, len=0x%x status:%d", (unsigned long)ar, ept, (unsigned long)skb, (unsigned long)pPacket->pBuffer, pPacket->ActualLength, status)); if (status) { if (status != A_ECANCELED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("RX ERR (%d) \n",status)); } } /* take lock to protect buffer counts * and adaptive power throughput state */ AR6000_SPIN_LOCK(&ar->arLock, 0); if (!status) { AR6000_STAT_INC(ar, rx_packets); ar->arNetStats.rx_bytes += pPacket->ActualLength; #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL aptcTR.bytesReceived += a_netbuf_to_len(skb); applyAPTCHeuristics(ar); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ A_NETBUF_PUT(skb, pPacket->ActualLength + HTC_HEADER_LEN); A_NETBUF_PULL(skb, HTC_HEADER_LEN); #ifdef DEBUG if (debugdriver >= 2) { ar6000_dump_skb(skb); } #endif /* DEBUG */ } AR6000_SPIN_UNLOCK(&ar->arLock, 0); skb->dev = ar->arNetDev; if (status) { AR6000_STAT_INC(ar, rx_errors); A_NETBUF_FREE(skb); } else if (ar->arWmiEnabled == true) { if (ept == ar->arControlEp) { /* * this is a wmi control msg */ #ifdef CONFIG_PM ar6000_check_wow_status(ar, skb, true); #endif /* CONFIG_PM */ wmi_control_rx(ar->arWmi, skb); } else { WMI_DATA_HDR *dhdr = (WMI_DATA_HDR *)A_NETBUF_DATA(skb); bool is_amsdu; u8 tid; /* * This check can be removed if after a while we do not * see the warning. For now we leave it to ensure * we drop these frames accordingly in case the * target generates them for some reason. These * were used for an internal PAL but that's not * used or supported anymore. These frames should * not come up from the target. */ if (WARN_ON(WMI_DATA_HDR_GET_DATA_TYPE(dhdr) == WMI_DATA_HDR_DATA_TYPE_ACL)) { AR6000_STAT_INC(ar, rx_errors); A_NETBUF_FREE(skb); return; } #ifdef CONFIG_PM ar6000_check_wow_status(ar, NULL, false); #endif /* CONFIG_PM */ /* * this is a wmi data packet */ // NWF if (processDot11Hdr) { minHdrLen = sizeof(WMI_DATA_HDR) + sizeof(struct ieee80211_frame) + sizeof(ATH_LLC_SNAP_HDR); } else { minHdrLen = sizeof (WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR); } /* In the case of AP mode we may receive NULL data frames * that do not have LLC hdr. They are 16 bytes in size. * Allow these frames in the AP mode. * ACL data frames don't follow ethernet frame bounds for * min length */ if (ar->arNetworkType != AP_NETWORK && ((pPacket->ActualLength < minHdrLen) || (pPacket->ActualLength > AR6000_MAX_RX_MESSAGE_SIZE))) { /* * packet is too short or too long */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("TOO SHORT or TOO LONG\n")); AR6000_STAT_INC(ar, rx_errors); AR6000_STAT_INC(ar, rx_length_errors); A_NETBUF_FREE(skb); } else { u16 seq_no; u8 meta_type; #if 0 /* Access RSSI values here */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("RSSI %d\n", ((WMI_DATA_HDR *) A_NETBUF_DATA(skb))->rssi)); #endif /* Get the Power save state of the STA */ if (ar->arNetworkType == AP_NETWORK) { sta_t *conn = NULL; u8 psState=0,prevPsState; ATH_MAC_HDR *datap=NULL; u16 offset; meta_type = WMI_DATA_HDR_GET_META(dhdr); psState = (((WMI_DATA_HDR *)A_NETBUF_DATA(skb))->info >> WMI_DATA_HDR_PS_SHIFT) & WMI_DATA_HDR_PS_MASK; offset = sizeof(WMI_DATA_HDR); switch (meta_type) { case 0: break; case WMI_META_VERSION_1: offset += sizeof(WMI_RX_META_V1); break; case WMI_META_VERSION_2: offset += sizeof(WMI_RX_META_V2); break; default: break; } datap = (ATH_MAC_HDR *)(A_NETBUF_DATA(skb)+offset); conn = ieee80211_find_conn(ar, datap->srcMac); if (conn) { /* if there is a change in PS state of the STA, * take appropriate steps. * 1. If Sleep-->Awake, flush the psq for the STA * Clear the PVB for the STA. * 2. If Awake-->Sleep, Starting queueing frames * the STA. */ prevPsState = STA_IS_PWR_SLEEP(conn); if (psState) { STA_SET_PWR_SLEEP(conn); } else { STA_CLR_PWR_SLEEP(conn); } if (prevPsState ^ STA_IS_PWR_SLEEP(conn)) { if (!STA_IS_PWR_SLEEP(conn)) { A_MUTEX_LOCK(&conn->psqLock); while (!A_NETBUF_QUEUE_EMPTY(&conn->psq)) { struct sk_buff *skb=NULL; skb = A_NETBUF_DEQUEUE(&conn->psq); A_MUTEX_UNLOCK(&conn->psqLock); ar6000_data_tx(skb,ar->arNetDev); A_MUTEX_LOCK(&conn->psqLock); } A_MUTEX_UNLOCK(&conn->psqLock); /* Clear the PVB for this STA */ wmi_set_pvb_cmd(ar->arWmi, conn->aid, 0); } } } else { /* This frame is from a STA that is not associated*/ A_ASSERT(false); } /* Drop NULL data frames here */ if((pPacket->ActualLength < minHdrLen) || (pPacket->ActualLength > AR6000_MAX_RX_MESSAGE_SIZE)) { A_NETBUF_FREE(skb); goto rx_done; } } is_amsdu = WMI_DATA_HDR_IS_AMSDU(dhdr) ? true : false; tid = WMI_DATA_HDR_GET_UP(dhdr); seq_no = WMI_DATA_HDR_GET_SEQNO(dhdr); meta_type = WMI_DATA_HDR_GET_META(dhdr); containsDot11Hdr = WMI_DATA_HDR_GET_DOT11(dhdr); wmi_data_hdr_remove(ar->arWmi, skb); switch (meta_type) { case WMI_META_VERSION_1: { WMI_RX_META_V1 *pMeta = (WMI_RX_META_V1 *)A_NETBUF_DATA(skb); A_PRINTF("META %d %d %d %d %x\n", pMeta->status, pMeta->rix, pMeta->rssi, pMeta->channel, pMeta->flags); A_NETBUF_PULL((void*)skb, sizeof(WMI_RX_META_V1)); break; } case WMI_META_VERSION_2: { WMI_RX_META_V2 *pMeta = (WMI_RX_META_V2 *)A_NETBUF_DATA(skb); if(pMeta->csumFlags & 0x1){ skb->ip_summed=CHECKSUM_COMPLETE; skb->csum=(pMeta->csum); } A_NETBUF_PULL((void*)skb, sizeof(WMI_RX_META_V2)); break; } default: break; } A_ASSERT(status == 0); /* NWF: print the 802.11 hdr bytes */ if(containsDot11Hdr) { status = wmi_dot11_hdr_remove(ar->arWmi,skb); } else if(!is_amsdu) { status = wmi_dot3_2_dix(skb); } if (status) { /* Drop frames that could not be processed (lack of memory, etc.) */ A_NETBUF_FREE(skb); goto rx_done; } if ((ar->arNetDev->flags & IFF_UP) == IFF_UP) { if (ar->arNetworkType == AP_NETWORK) { struct sk_buff *skb1 = NULL; ATH_MAC_HDR *datap; datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb); if (IEEE80211_IS_MULTICAST(datap->dstMac)) { /* Bcast/Mcast frames should be sent to the OS * stack as well as on the air. */ skb1 = skb_copy(skb,GFP_ATOMIC); } else { /* Search for a connected STA with dstMac as * the Mac address. If found send the frame to * it on the air else send the frame up the * stack */ sta_t *conn = NULL; conn = ieee80211_find_conn(ar, datap->dstMac); if (conn && ar->intra_bss) { skb1 = skb; skb = NULL; } else if(conn && !ar->intra_bss) { A_NETBUF_FREE(skb); skb = NULL; } } if (skb1) { ar6000_data_tx(skb1, ar->arNetDev); } } } aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, is_amsdu, (void **)&skb); ar6000_deliver_frames_to_nw_stack((void *) ar->arNetDev, (void *)skb); } } } else { if (EPPING_ALIGNMENT_PAD > 0) { A_NETBUF_PULL(skb, EPPING_ALIGNMENT_PAD); } ar6000_deliver_frames_to_nw_stack((void *)ar->arNetDev, (void *)skb); } rx_done: return; } static void ar6000_deliver_frames_to_nw_stack(void *dev, void *osbuf) { struct sk_buff *skb = (struct sk_buff *)osbuf; if(skb) { skb->dev = dev; if ((skb->dev->flags & IFF_UP) == IFF_UP) { #ifdef CONFIG_PM ar6000_check_wow_status((struct ar6_softc *)ar6k_priv(dev), skb, false); #endif /* CONFIG_PM */ skb->protocol = eth_type_trans(skb, skb->dev); /* * If this routine is called on a ISR (Hard IRQ) or DSR (Soft IRQ) * or tasklet use the netif_rx to deliver the packet to the stack * netif_rx will queue the packet onto the receive queue and mark * the softirq thread has a pending action to complete. Kernel will * schedule the softIrq kernel thread after processing the DSR. * * If this routine is called on a process context, use netif_rx_ni * which will schedle the softIrq kernel thread after queuing the packet. */ if (in_interrupt()) { netif_rx(skb); } else { netif_rx_ni(skb); } } else { A_NETBUF_FREE(skb); } } } #if 0 static void ar6000_deliver_frames_to_bt_stack(void *dev, void *osbuf) { struct sk_buff *skb = (struct sk_buff *)osbuf; if(skb) { skb->dev = dev; if ((skb->dev->flags & IFF_UP) == IFF_UP) { skb->protocol = htons(ETH_P_CONTROL); netif_rx(skb); } else { A_NETBUF_FREE(skb); } } } #endif static void ar6000_rx_refill(void *Context, HTC_ENDPOINT_ID Endpoint) { struct ar6_softc *ar = (struct ar6_softc *)Context; void *osBuf; int RxBuffers; int buffersToRefill; struct htc_packet *pPacket; struct htc_packet_queue queue; buffersToRefill = (int)AR6000_MAX_RX_BUFFERS - HTCGetNumRecvBuffers(ar->arHtcTarget, Endpoint); if (buffersToRefill <= 0) { /* fast return, nothing to fill */ return; } INIT_HTC_PACKET_QUEUE(&queue); AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_rx_refill: providing htc with %d buffers at eid=%d\n", buffersToRefill, Endpoint)); for (RxBuffers = 0; RxBuffers < buffersToRefill; RxBuffers++) { osBuf = A_NETBUF_ALLOC(AR6000_BUFFER_SIZE); if (NULL == osBuf) { break; } /* the HTC packet wrapper is at the head of the reserved area * in the skb */ pPacket = (struct htc_packet *)(A_NETBUF_HEAD(osBuf)); /* set re-fill info */ SET_HTC_PACKET_INFO_RX_REFILL(pPacket,osBuf,A_NETBUF_DATA(osBuf),AR6000_BUFFER_SIZE,Endpoint); /* add to queue */ HTC_PACKET_ENQUEUE(&queue,pPacket); } if (!HTC_QUEUE_EMPTY(&queue)) { /* add packets */ HTCAddReceivePktMultiple(ar->arHtcTarget, &queue); } } /* clean up our amsdu buffer list */ static void ar6000_cleanup_amsdu_rxbufs(struct ar6_softc *ar) { struct htc_packet *pPacket; void *osBuf; /* empty AMSDU buffer queue and free OS bufs */ while (true) { AR6000_SPIN_LOCK(&ar->arLock, 0); pPacket = HTC_PACKET_DEQUEUE(&ar->amsdu_rx_buffer_queue); AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (NULL == pPacket) { break; } osBuf = pPacket->pPktContext; if (NULL == osBuf) { A_ASSERT(false); break; } A_NETBUF_FREE(osBuf); } } /* refill the amsdu buffer list */ static void ar6000_refill_amsdu_rxbufs(struct ar6_softc *ar, int Count) { struct htc_packet *pPacket; void *osBuf; while (Count > 0) { osBuf = A_NETBUF_ALLOC(AR6000_AMSDU_BUFFER_SIZE); if (NULL == osBuf) { break; } /* the HTC packet wrapper is at the head of the reserved area * in the skb */ pPacket = (struct htc_packet *)(A_NETBUF_HEAD(osBuf)); /* set re-fill info */ SET_HTC_PACKET_INFO_RX_REFILL(pPacket,osBuf,A_NETBUF_DATA(osBuf),AR6000_AMSDU_BUFFER_SIZE,0); AR6000_SPIN_LOCK(&ar->arLock, 0); /* put it in the list */ HTC_PACKET_ENQUEUE(&ar->amsdu_rx_buffer_queue,pPacket); AR6000_SPIN_UNLOCK(&ar->arLock, 0); Count--; } } /* callback to allocate a large receive buffer for a pending packet. This function is called when * an HTC packet arrives whose length exceeds a threshold value * * We use a pre-allocated list of buffers of maximum AMSDU size (4K). Under linux it is more optimal to * keep the allocation size the same to optimize cached-slab allocations. * * */ static struct htc_packet *ar6000_alloc_amsdu_rxbuf(void *Context, HTC_ENDPOINT_ID Endpoint, int Length) { struct htc_packet *pPacket = NULL; struct ar6_softc *ar = (struct ar6_softc *)Context; int refillCount = 0; AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_alloc_amsdu_rxbuf: eid=%d, Length:%d\n",Endpoint,Length)); do { if (Length <= AR6000_BUFFER_SIZE) { /* shouldn't be getting called on normal sized packets */ A_ASSERT(false); break; } if (Length > AR6000_AMSDU_BUFFER_SIZE) { A_ASSERT(false); break; } AR6000_SPIN_LOCK(&ar->arLock, 0); /* allocate a packet from the list */ pPacket = HTC_PACKET_DEQUEUE(&ar->amsdu_rx_buffer_queue); /* see if we need to refill again */ refillCount = AR6000_MAX_AMSDU_RX_BUFFERS - HTC_PACKET_QUEUE_DEPTH(&ar->amsdu_rx_buffer_queue); AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (NULL == pPacket) { break; } /* set actual endpoint ID */ pPacket->Endpoint = Endpoint; } while (false); if (refillCount >= AR6000_AMSDU_REFILL_THRESHOLD) { ar6000_refill_amsdu_rxbufs(ar,refillCount); } return pPacket; } static void ar6000_set_multicast_list(struct net_device *dev) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000: Multicast filter not supported\n")); } static struct net_device_stats * ar6000_get_stats(struct net_device *dev) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); return &ar->arNetStats; } void ar6000_ready_event(void *devt, u8 *datap, u8 phyCap, u32 sw_ver, u32 abi_ver) { struct ar6_softc *ar = (struct ar6_softc *)devt; struct net_device *dev = ar->arNetDev; memcpy(dev->dev_addr, datap, AR6000_ETH_ADDR_LEN); AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("mac address = %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5])); ar->arPhyCapability = phyCap; ar->arVersion.wlan_ver = sw_ver; ar->arVersion.abi_ver = abi_ver; /* Indicate to the waiting thread that the ready event was received */ ar->arWmiReady = true; wake_up(&arEvent); } void ar6000_install_static_wep_keys(struct ar6_softc *ar) { u8 index; u8 keyUsage; for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) { if (ar->arWepKeyList[index].arKeyLen) { keyUsage = GROUP_USAGE; if (index == ar->arDefTxKeyIndex) { keyUsage |= TX_USAGE; } wmi_addKey_cmd(ar->arWmi, index, WEP_CRYPT, keyUsage, ar->arWepKeyList[index].arKeyLen, NULL, ar->arWepKeyList[index].arKey, KEY_OP_INIT_VAL, NULL, NO_SYNC_WMIFLAG); } } } void add_new_sta(struct ar6_softc *ar, u8 *mac, u16 aid, u8 *wpaie, u8 ielen, u8 keymgmt, u8 ucipher, u8 auth) { u8 free_slot=aid-1; memcpy(ar->sta_list[free_slot].mac, mac, ATH_MAC_LEN); memcpy(ar->sta_list[free_slot].wpa_ie, wpaie, ielen); ar->sta_list[free_slot].aid = aid; ar->sta_list[free_slot].keymgmt = keymgmt; ar->sta_list[free_slot].ucipher = ucipher; ar->sta_list[free_slot].auth = auth; ar->sta_list_index = ar->sta_list_index | (1 << free_slot); ar->arAPStats.sta[free_slot].aid = aid; } void ar6000_connect_event(struct ar6_softc *ar, u16 channel, u8 *bssid, u16 listenInterval, u16 beaconInterval, NETWORK_TYPE networkType, u8 beaconIeLen, u8 assocReqLen, u8 assocRespLen, u8 *assocInfo) { union iwreq_data wrqu; int i, beacon_ie_pos, assoc_resp_ie_pos, assoc_req_ie_pos; static const char *tag1 = "ASSOCINFO(ReqIEs="; static const char *tag2 = "ASSOCRESPIE="; static const char *beaconIetag = "BEACONIE="; char buf[WMI_CONTROL_MSG_MAX_LEN * 2 + strlen(tag1) + 1]; char *pos; u8 key_op_ctrl; unsigned long flags; struct ieee80211req_key *ik; CRYPTO_TYPE keyType = NONE_CRYPT; if(ar->arNetworkType & AP_NETWORK) { struct net_device *dev = ar->arNetDev; if(memcmp(dev->dev_addr, bssid, ATH_MAC_LEN)==0) { ar->arACS = channel; ik = &ar->ap_mode_bkey; switch(ar->arAuthMode) { case NONE_AUTH: if(ar->arPairwiseCrypto == WEP_CRYPT) { ar6000_install_static_wep_keys(ar); } #ifdef WAPI_ENABLE else if(ar->arPairwiseCrypto == WAPI_CRYPT) { ap_set_wapi_key(ar, ik); } #endif break; case WPA_PSK_AUTH: case WPA2_PSK_AUTH: case (WPA_PSK_AUTH|WPA2_PSK_AUTH): switch (ik->ik_type) { case IEEE80211_CIPHER_TKIP: keyType = TKIP_CRYPT; break; case IEEE80211_CIPHER_AES_CCM: keyType = AES_CRYPT; break; default: goto skip_key; } wmi_addKey_cmd(ar->arWmi, ik->ik_keyix, keyType, GROUP_USAGE, ik->ik_keylen, (u8 *)&ik->ik_keyrsc, ik->ik_keydata, KEY_OP_INIT_VAL, ik->ik_macaddr, SYNC_BOTH_WMIFLAG); break; } skip_key: ar->arConnected = true; return; } A_PRINTF("NEW STA %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x \n " " AID=%d \n", bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5], channel); switch ((listenInterval>>8)&0xFF) { case OPEN_AUTH: A_PRINTF("AUTH: OPEN\n"); break; case SHARED_AUTH: A_PRINTF("AUTH: SHARED\n"); break; default: A_PRINTF("AUTH: Unknown\n"); break; } switch (listenInterval&0xFF) { case WPA_PSK_AUTH: A_PRINTF("KeyMgmt: WPA-PSK\n"); break; case WPA2_PSK_AUTH: A_PRINTF("KeyMgmt: WPA2-PSK\n"); break; default: A_PRINTF("KeyMgmt: NONE\n"); break; } switch (beaconInterval) { case AES_CRYPT: A_PRINTF("Cipher: AES\n"); break; case TKIP_CRYPT: A_PRINTF("Cipher: TKIP\n"); break; case WEP_CRYPT: A_PRINTF("Cipher: WEP\n"); break; #ifdef WAPI_ENABLE case WAPI_CRYPT: A_PRINTF("Cipher: WAPI\n"); break; #endif default: A_PRINTF("Cipher: NONE\n"); break; } add_new_sta(ar, bssid, channel /*aid*/, assocInfo /* WPA IE */, assocRespLen /* IE len */, listenInterval&0xFF /* Keymgmt */, beaconInterval /* cipher */, (listenInterval>>8)&0xFF /* auth alg */); /* Send event to application */ A_MEMZERO(&wrqu, sizeof(wrqu)); memcpy(wrqu.addr.sa_data, bssid, ATH_MAC_LEN); wireless_send_event(ar->arNetDev, IWEVREGISTERED, &wrqu, NULL); /* In case the queue is stopped when we switch modes, this will * wake it up */ netif_wake_queue(ar->arNetDev); return; } ar6k_cfg80211_connect_event(ar, channel, bssid, listenInterval, beaconInterval, networkType, beaconIeLen, assocReqLen, assocRespLen, assocInfo); memcpy(ar->arBssid, bssid, sizeof(ar->arBssid)); ar->arBssChannel = channel; A_PRINTF("AR6000 connected event on freq %d ", channel); A_PRINTF("with bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " " listenInterval=%d, beaconInterval = %d, beaconIeLen = %d assocReqLen=%d" " assocRespLen =%d\n", bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5], listenInterval, beaconInterval, beaconIeLen, assocReqLen, assocRespLen); if (networkType & ADHOC_NETWORK) { if (networkType & ADHOC_CREATOR) { A_PRINTF("Network: Adhoc (Creator)\n"); } else { A_PRINTF("Network: Adhoc (Joiner)\n"); } } else { A_PRINTF("Network: Infrastructure\n"); } if ((ar->arNetworkType == INFRA_NETWORK)) { wmi_listeninterval_cmd(ar->arWmi, ar->arListenIntervalT, ar->arListenIntervalB); } if (beaconIeLen && (sizeof(buf) > (9 + beaconIeLen * 2))) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nBeaconIEs= ")); beacon_ie_pos = 0; A_MEMZERO(buf, sizeof(buf)); sprintf(buf, "%s", beaconIetag); pos = buf + 9; for (i = beacon_ie_pos; i < beacon_ie_pos + beaconIeLen; i++) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i])); sprintf(pos, "%2.2x", assocInfo[i]); pos += 2; } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n")); A_MEMZERO(&wrqu, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); } if (assocRespLen && (sizeof(buf) > (12 + (assocRespLen * 2)))) { assoc_resp_ie_pos = beaconIeLen + assocReqLen + sizeof(u16) + /* capinfo*/ sizeof(u16) + /* status Code */ sizeof(u16) ; /* associd */ A_MEMZERO(buf, sizeof(buf)); sprintf(buf, "%s", tag2); pos = buf + 12; AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nAssocRespIEs= ")); /* * The Association Response Frame w.o. the WLAN header is delivered to * the host, so skip over to the IEs */ for (i = assoc_resp_ie_pos; i < assoc_resp_ie_pos + assocRespLen - 6; i++) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i])); sprintf(pos, "%2.2x", assocInfo[i]); pos += 2; } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n")); A_MEMZERO(&wrqu, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); } if (assocReqLen && (sizeof(buf) > (17 + (assocReqLen * 2)))) { /* * assoc Request includes capability and listen interval. Skip these. */ assoc_req_ie_pos = beaconIeLen + sizeof(u16) + /* capinfo*/ sizeof(u16); /* listen interval */ A_MEMZERO(buf, sizeof(buf)); sprintf(buf, "%s", tag1); pos = buf + 17; AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("AssocReqIEs= ")); for (i = assoc_req_ie_pos; i < assoc_req_ie_pos + assocReqLen - 4; i++) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i])); sprintf(pos, "%2.2x", assocInfo[i]); pos += 2; } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n")); A_MEMZERO(&wrqu, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); } if (ar->user_savedkeys_stat == USER_SAVEDKEYS_STAT_RUN && ar->user_saved_keys.keyOk == true) { key_op_ctrl = KEY_OP_VALID_MASK & ~KEY_OP_INIT_TSC; if (ar->user_key_ctrl & AR6000_USER_SETKEYS_RSC_UNCHANGED) { key_op_ctrl &= ~KEY_OP_INIT_RSC; } else { key_op_ctrl |= KEY_OP_INIT_RSC; } ar6000_reinstall_keys(ar, key_op_ctrl); } netif_wake_queue(ar->arNetDev); /* Update connect & link status atomically */ spin_lock_irqsave(&ar->arLock, flags); ar->arConnected = true; ar->arConnectPending = false; netif_carrier_on(ar->arNetDev); spin_unlock_irqrestore(&ar->arLock, flags); /* reset the rx aggr state */ aggr_reset_state(ar->aggr_cntxt); reconnect_flag = 0; A_MEMZERO(&wrqu, sizeof(wrqu)); memcpy(wrqu.addr.sa_data, bssid, IEEE80211_ADDR_LEN); wrqu.addr.sa_family = ARPHRD_ETHER; wireless_send_event(ar->arNetDev, SIOCGIWAP, &wrqu, NULL); if ((ar->arNetworkType == ADHOC_NETWORK) && ar->arIbssPsEnable) { A_MEMZERO(ar->arNodeMap, sizeof(ar->arNodeMap)); ar->arNodeNum = 0; ar->arNexEpId = ENDPOINT_2; } if (!ar->arUserBssFilter) { wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0); } } void ar6000_set_numdataendpts(struct ar6_softc *ar, u32 num) { A_ASSERT(num <= (HTC_MAILBOX_NUM_MAX - 1)); ar->arNumDataEndPts = num; } void sta_cleanup(struct ar6_softc *ar, u8 i) { struct sk_buff *skb; /* empty the queued pkts in the PS queue if any */ A_MUTEX_LOCK(&ar->sta_list[i].psqLock); while (!A_NETBUF_QUEUE_EMPTY(&ar->sta_list[i].psq)) { skb = A_NETBUF_DEQUEUE(&ar->sta_list[i].psq); A_NETBUF_FREE(skb); } A_MUTEX_UNLOCK(&ar->sta_list[i].psqLock); /* Zero out the state fields */ A_MEMZERO(&ar->arAPStats.sta[ar->sta_list[i].aid-1], sizeof(WMI_PER_STA_STAT)); A_MEMZERO(&ar->sta_list[i].mac, ATH_MAC_LEN); A_MEMZERO(&ar->sta_list[i].wpa_ie, IEEE80211_MAX_IE); ar->sta_list[i].aid = 0; ar->sta_list[i].flags = 0; ar->sta_list_index = ar->sta_list_index & ~(1 << i); } u8 remove_sta(struct ar6_softc *ar, u8 *mac, u16 reason) { u8 i, removed=0; if(IS_MAC_NULL(mac)) { return removed; } if(IS_MAC_BCAST(mac)) { A_PRINTF("DEL ALL STA\n"); for(i=0; i < AP_MAX_NUM_STA; i++) { if(!IS_MAC_NULL(ar->sta_list[i].mac)) { sta_cleanup(ar, i); removed = 1; } } } else { for(i=0; i < AP_MAX_NUM_STA; i++) { if(memcmp(ar->sta_list[i].mac, mac, ATH_MAC_LEN)==0) { A_PRINTF("DEL STA %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " " aid=%d REASON=%d\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], ar->sta_list[i].aid, reason); sta_cleanup(ar, i); removed = 1; break; } } } return removed; } void ar6000_disconnect_event(struct ar6_softc *ar, u8 reason, u8 *bssid, u8 assocRespLen, u8 *assocInfo, u16 protocolReasonStatus) { u8 i; unsigned long flags; union iwreq_data wrqu; if(ar->arNetworkType & AP_NETWORK) { union iwreq_data wrqu; struct sk_buff *skb; if(!remove_sta(ar, bssid, protocolReasonStatus)) { return; } /* If there are no more associated STAs, empty the mcast PS q */ if (ar->sta_list_index == 0) { A_MUTEX_LOCK(&ar->mcastpsqLock); while (!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) { skb = A_NETBUF_DEQUEUE(&ar->mcastpsq); A_NETBUF_FREE(skb); } A_MUTEX_UNLOCK(&ar->mcastpsqLock); /* Clear the LSB of the BitMapCtl field of the TIM IE */ if (ar->arWmiReady) { wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 0); } } if(!IS_MAC_BCAST(bssid)) { /* Send event to application */ A_MEMZERO(&wrqu, sizeof(wrqu)); memcpy(wrqu.addr.sa_data, bssid, ATH_MAC_LEN); wireless_send_event(ar->arNetDev, IWEVEXPIRED, &wrqu, NULL); } ar->arConnected = false; return; } ar6k_cfg80211_disconnect_event(ar, reason, bssid, assocRespLen, assocInfo, protocolReasonStatus); /* Send disconnect event to supplicant */ A_MEMZERO(&wrqu, sizeof(wrqu)); wrqu.addr.sa_family = ARPHRD_ETHER; wireless_send_event(ar->arNetDev, SIOCGIWAP, &wrqu, NULL); /* it is necessary to clear the host-side rx aggregation state */ aggr_reset_state(ar->aggr_cntxt); A_UNTIMEOUT(&ar->disconnect_timer); A_PRINTF("AR6000 disconnected"); if (bssid[0] || bssid[1] || bssid[2] || bssid[3] || bssid[4] || bssid[5]) { A_PRINTF(" from %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ", bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5]); } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nDisconnect Reason is %d", reason)); AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nProtocol Reason/Status Code is %d", protocolReasonStatus)); AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nAssocResp Frame = %s", assocRespLen ? " " : "NULL")); for (i = 0; i < assocRespLen; i++) { if (!(i % 0x10)) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n")); } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i])); } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n")); /* * If the event is due to disconnect cmd from the host, only they the target * would stop trying to connect. Under any other condition, target would * keep trying to connect. * */ if( reason == DISCONNECT_CMD) { if ((!ar->arUserBssFilter) && (ar->arWmiReady)) { wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0); } } else { ar->arConnectPending = true; if (((reason == ASSOC_FAILED) && (protocolReasonStatus == 0x11)) || ((reason == ASSOC_FAILED) && (protocolReasonStatus == 0x0) && (reconnect_flag == 1))) { ar->arConnected = true; return; } } if ((reason == NO_NETWORK_AVAIL) && (ar->arWmiReady)) { bss_t *pWmiSsidnode = NULL; /* remove the current associated bssid node */ wmi_free_node (ar->arWmi, bssid); /* * In case any other same SSID nodes are present * remove it, since those nodes also not available now */ do { /* * Find the nodes based on SSID and remove it * NOTE :: This case will not work out for Hidden-SSID */ pWmiSsidnode = wmi_find_Ssidnode (ar->arWmi, ar->arSsid, ar->arSsidLen, false, true); if (pWmiSsidnode) { wmi_free_node (ar->arWmi, pWmiSsidnode->ni_macaddr); } } while (pWmiSsidnode); } /* Update connect & link status atomically */ spin_lock_irqsave(&ar->arLock, flags); ar->arConnected = false; netif_carrier_off(ar->arNetDev); spin_unlock_irqrestore(&ar->arLock, flags); if( (reason != CSERV_DISCONNECT) || (reconnect_flag != 1) ) { reconnect_flag = 0; } if (reason != CSERV_DISCONNECT) { ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT; ar->user_key_ctrl = 0; } netif_stop_queue(ar->arNetDev); A_MEMZERO(ar->arBssid, sizeof(ar->arBssid)); ar->arBssChannel = 0; ar->arBeaconInterval = 0; ar6000_TxDataCleanup(ar); } void ar6000_regDomain_event(struct ar6_softc *ar, u32 regCode) { A_PRINTF("AR6000 Reg Code = 0x%x\n", regCode); ar->arRegCode = regCode; } void ar6000_aggr_rcv_addba_req_evt(struct ar6_softc *ar, WMI_ADDBA_REQ_EVENT *evt) { if(evt->status == 0) { aggr_recv_addba_req_evt(ar->aggr_cntxt, evt->tid, evt->st_seq_no, evt->win_sz); } } void ar6000_aggr_rcv_addba_resp_evt(struct ar6_softc *ar, WMI_ADDBA_RESP_EVENT *evt) { A_PRINTF("ADDBA RESP. tid %d status %d, sz %d\n", evt->tid, evt->status, evt->amsdu_sz); if(evt->status == 0) { } } void ar6000_aggr_rcv_delba_req_evt(struct ar6_softc *ar, WMI_DELBA_EVENT *evt) { aggr_recv_delba_req_evt(ar->aggr_cntxt, evt->tid); } void register_pal_cb(ar6k_pal_config_t *palConfig_p) { ar6k_pal_config_g = *palConfig_p; } void ar6000_hci_event_rcv_evt(struct ar6_softc *ar, WMI_HCI_EVENT *cmd) { void *osbuf = NULL; s8 i; u8 size, *buf; int ret = 0; size = cmd->evt_buf_sz + 4; osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { ret = A_NO_MEMORY; A_PRINTF("Error in allocating netbuf \n"); return; } A_NETBUF_PUT(osbuf, size); buf = (u8 *)A_NETBUF_DATA(osbuf); /* First 2-bytes carry HCI event/ACL data type * the next 2 are free */ *((short *)buf) = WMI_HCI_EVENT_EVENTID; buf += sizeof(int); memcpy(buf, cmd->buf, cmd->evt_buf_sz); ar6000_deliver_frames_to_nw_stack(ar->arNetDev, osbuf); if(loghci) { A_PRINTF_LOG("HCI Event From PAL <-- \n"); for(i = 0; i < cmd->evt_buf_sz; i++) { A_PRINTF_LOG("0x%02x ", cmd->buf[i]); if((i % 10) == 0) { A_PRINTF_LOG("\n"); } } A_PRINTF_LOG("\n"); A_PRINTF_LOG("==================================\n"); } } void ar6000_neighborReport_event(struct ar6_softc *ar, int numAps, WMI_NEIGHBOR_INFO *info) { #if WIRELESS_EXT >= 18 struct iw_pmkid_cand *pmkcand; #else /* WIRELESS_EXT >= 18 */ static const char *tag = "PRE-AUTH"; char buf[128]; #endif /* WIRELESS_EXT >= 18 */ union iwreq_data wrqu; int i; AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("AR6000 Neighbor Report Event\n")); for (i=0; i < numAps; info++, i++) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ", info->bssid[0], info->bssid[1], info->bssid[2], info->bssid[3], info->bssid[4], info->bssid[5])); if (info->bssFlags & WMI_PREAUTH_CAPABLE_BSS) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("preauth-cap")); } if (info->bssFlags & WMI_PMKID_VALID_BSS) { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,(" pmkid-valid\n")); continue; /* we skip bss if the pmkid is already valid */ } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("\n")); A_MEMZERO(&wrqu, sizeof(wrqu)); #if WIRELESS_EXT >= 18 pmkcand = A_MALLOC_NOWAIT(sizeof(struct iw_pmkid_cand)); A_MEMZERO(pmkcand, sizeof(struct iw_pmkid_cand)); pmkcand->index = i; pmkcand->flags = info->bssFlags; memcpy(pmkcand->bssid.sa_data, info->bssid, ATH_MAC_LEN); wrqu.data.length = sizeof(struct iw_pmkid_cand); wireless_send_event(ar->arNetDev, IWEVPMKIDCAND, &wrqu, (char *)pmkcand); kfree(pmkcand); #else /* WIRELESS_EXT >= 18 */ snprintf(buf, sizeof(buf), "%s%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x", tag, info->bssid[0], info->bssid[1], info->bssid[2], info->bssid[3], info->bssid[4], info->bssid[5], i, info->bssFlags); wrqu.data.length = strlen(buf); wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); #endif /* WIRELESS_EXT >= 18 */ } } void ar6000_tkip_micerr_event(struct ar6_softc *ar, u8 keyid, bool ismcast) { static const char *tag = "MLME-MICHAELMICFAILURE.indication"; char buf[128]; union iwreq_data wrqu; /* * For AP case, keyid will have aid of STA which sent pkt with * MIC error. Use this aid to get MAC & send it to hostapd. */ if (ar->arNetworkType == AP_NETWORK) { sta_t *s = ieee80211_find_conn_for_aid(ar, (keyid >> 2)); if(!s){ A_PRINTF("AP TKIP MIC error received from Invalid aid / STA not found =%d\n", keyid); return; } A_PRINTF("AP TKIP MIC error received from aid=%d\n", keyid); snprintf(buf,sizeof(buf), "%s addr=%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", tag, s->mac[0],s->mac[1],s->mac[2],s->mac[3],s->mac[4],s->mac[5]); } else { ar6k_cfg80211_tkip_micerr_event(ar, keyid, ismcast); A_PRINTF("AR6000 TKIP MIC error received for keyid %d %scast\n", keyid & 0x3, ismcast ? "multi": "uni"); snprintf(buf, sizeof(buf), "%s(keyid=%d %sicast)", tag, keyid & 0x3, ismcast ? "mult" : "un"); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); } void ar6000_scanComplete_event(struct ar6_softc *ar, int status) { ar6k_cfg80211_scanComplete_event(ar, status); if (!ar->arUserBssFilter) { wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0); } if (ar->scan_triggered) { if (status== 0) { union iwreq_data wrqu; A_MEMZERO(&wrqu, sizeof(wrqu)); wireless_send_event(ar->arNetDev, SIOCGIWSCAN, &wrqu, NULL); } ar->scan_triggered = 0; } AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,( "AR6000 scan complete: %d\n", status)); } void ar6000_targetStats_event(struct ar6_softc *ar, u8 *ptr, u32 len) { u8 ac; if(ar->arNetworkType == AP_NETWORK) { WMI_AP_MODE_STAT *p = (WMI_AP_MODE_STAT *)ptr; WMI_AP_MODE_STAT *ap = &ar->arAPStats; if (len < sizeof(*p)) { return; } for(ac=0;ac<AP_MAX_NUM_STA;ac++) { ap->sta[ac].tx_bytes += p->sta[ac].tx_bytes; ap->sta[ac].tx_pkts += p->sta[ac].tx_pkts; ap->sta[ac].tx_error += p->sta[ac].tx_error; ap->sta[ac].tx_discard += p->sta[ac].tx_discard; ap->sta[ac].rx_bytes += p->sta[ac].rx_bytes; ap->sta[ac].rx_pkts += p->sta[ac].rx_pkts; ap->sta[ac].rx_error += p->sta[ac].rx_error; ap->sta[ac].rx_discard += p->sta[ac].rx_discard; } } else { WMI_TARGET_STATS *pTarget = (WMI_TARGET_STATS *)ptr; TARGET_STATS *pStats = &ar->arTargetStats; if (len < sizeof(*pTarget)) { return; } // Update the RSSI of the connected bss. if (ar->arConnected) { bss_t *pConnBss = NULL; pConnBss = wmi_find_node(ar->arWmi,ar->arBssid); if (pConnBss) { pConnBss->ni_rssi = pTarget->cservStats.cs_aveBeacon_rssi; pConnBss->ni_snr = pTarget->cservStats.cs_aveBeacon_snr; wmi_node_return(ar->arWmi, pConnBss); } } AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 updating target stats\n")); pStats->tx_packets += pTarget->txrxStats.tx_stats.tx_packets; pStats->tx_bytes += pTarget->txrxStats.tx_stats.tx_bytes; pStats->tx_unicast_pkts += pTarget->txrxStats.tx_stats.tx_unicast_pkts; pStats->tx_unicast_bytes += pTarget->txrxStats.tx_stats.tx_unicast_bytes; pStats->tx_multicast_pkts += pTarget->txrxStats.tx_stats.tx_multicast_pkts; pStats->tx_multicast_bytes += pTarget->txrxStats.tx_stats.tx_multicast_bytes; pStats->tx_broadcast_pkts += pTarget->txrxStats.tx_stats.tx_broadcast_pkts; pStats->tx_broadcast_bytes += pTarget->txrxStats.tx_stats.tx_broadcast_bytes; pStats->tx_rts_success_cnt += pTarget->txrxStats.tx_stats.tx_rts_success_cnt; for(ac = 0; ac < WMM_NUM_AC; ac++) pStats->tx_packet_per_ac[ac] += pTarget->txrxStats.tx_stats.tx_packet_per_ac[ac]; pStats->tx_errors += pTarget->txrxStats.tx_stats.tx_errors; pStats->tx_failed_cnt += pTarget->txrxStats.tx_stats.tx_failed_cnt; pStats->tx_retry_cnt += pTarget->txrxStats.tx_stats.tx_retry_cnt; pStats->tx_mult_retry_cnt += pTarget->txrxStats.tx_stats.tx_mult_retry_cnt; pStats->tx_rts_fail_cnt += pTarget->txrxStats.tx_stats.tx_rts_fail_cnt; pStats->tx_unicast_rate = wmi_get_rate(pTarget->txrxStats.tx_stats.tx_unicast_rate); pStats->rx_packets += pTarget->txrxStats.rx_stats.rx_packets; pStats->rx_bytes += pTarget->txrxStats.rx_stats.rx_bytes; pStats->rx_unicast_pkts += pTarget->txrxStats.rx_stats.rx_unicast_pkts; pStats->rx_unicast_bytes += pTarget->txrxStats.rx_stats.rx_unicast_bytes; pStats->rx_multicast_pkts += pTarget->txrxStats.rx_stats.rx_multicast_pkts; pStats->rx_multicast_bytes += pTarget->txrxStats.rx_stats.rx_multicast_bytes; pStats->rx_broadcast_pkts += pTarget->txrxStats.rx_stats.rx_broadcast_pkts; pStats->rx_broadcast_bytes += pTarget->txrxStats.rx_stats.rx_broadcast_bytes; pStats->rx_fragment_pkt += pTarget->txrxStats.rx_stats.rx_fragment_pkt; pStats->rx_errors += pTarget->txrxStats.rx_stats.rx_errors; pStats->rx_crcerr += pTarget->txrxStats.rx_stats.rx_crcerr; pStats->rx_key_cache_miss += pTarget->txrxStats.rx_stats.rx_key_cache_miss; pStats->rx_decrypt_err += pTarget->txrxStats.rx_stats.rx_decrypt_err; pStats->rx_duplicate_frames += pTarget->txrxStats.rx_stats.rx_duplicate_frames; pStats->rx_unicast_rate = wmi_get_rate(pTarget->txrxStats.rx_stats.rx_unicast_rate); pStats->tkip_local_mic_failure += pTarget->txrxStats.tkipCcmpStats.tkip_local_mic_failure; pStats->tkip_counter_measures_invoked += pTarget->txrxStats.tkipCcmpStats.tkip_counter_measures_invoked; pStats->tkip_replays += pTarget->txrxStats.tkipCcmpStats.tkip_replays; pStats->tkip_format_errors += pTarget->txrxStats.tkipCcmpStats.tkip_format_errors; pStats->ccmp_format_errors += pTarget->txrxStats.tkipCcmpStats.ccmp_format_errors; pStats->ccmp_replays += pTarget->txrxStats.tkipCcmpStats.ccmp_replays; pStats->power_save_failure_cnt += pTarget->pmStats.power_save_failure_cnt; pStats->noise_floor_calibation = pTarget->noise_floor_calibation; pStats->cs_bmiss_cnt += pTarget->cservStats.cs_bmiss_cnt; pStats->cs_lowRssi_cnt += pTarget->cservStats.cs_lowRssi_cnt; pStats->cs_connect_cnt += pTarget->cservStats.cs_connect_cnt; pStats->cs_disconnect_cnt += pTarget->cservStats.cs_disconnect_cnt; pStats->cs_aveBeacon_snr = pTarget->cservStats.cs_aveBeacon_snr; pStats->cs_aveBeacon_rssi = pTarget->cservStats.cs_aveBeacon_rssi; if (enablerssicompensation) { pStats->cs_aveBeacon_rssi = rssi_compensation_calc(ar, pStats->cs_aveBeacon_rssi); } pStats->cs_lastRoam_msec = pTarget->cservStats.cs_lastRoam_msec; pStats->cs_snr = pTarget->cservStats.cs_snr; pStats->cs_rssi = pTarget->cservStats.cs_rssi; pStats->lq_val = pTarget->lqVal; pStats->wow_num_pkts_dropped += pTarget->wowStats.wow_num_pkts_dropped; pStats->wow_num_host_pkt_wakeups += pTarget->wowStats.wow_num_host_pkt_wakeups; pStats->wow_num_host_event_wakeups += pTarget->wowStats.wow_num_host_event_wakeups; pStats->wow_num_events_discarded += pTarget->wowStats.wow_num_events_discarded; pStats->arp_received += pTarget->arpStats.arp_received; pStats->arp_matched += pTarget->arpStats.arp_matched; pStats->arp_replied += pTarget->arpStats.arp_replied; if (ar->statsUpdatePending) { ar->statsUpdatePending = false; wake_up(&arEvent); } } } void ar6000_rssiThreshold_event(struct ar6_softc *ar, WMI_RSSI_THRESHOLD_VAL newThreshold, s16 rssi) { USER_RSSI_THOLD userRssiThold; rssi = rssi + SIGNAL_QUALITY_NOISE_FLOOR; if (enablerssicompensation) { rssi = rssi_compensation_calc(ar, rssi); } /* Send an event to the app */ userRssiThold.tag = ar->rssi_map[newThreshold].tag; userRssiThold.rssi = rssi; A_PRINTF("rssi Threshold range = %d tag = %d rssi = %d\n", newThreshold, userRssiThold.tag, userRssiThold.rssi); } void ar6000_hbChallengeResp_event(struct ar6_softc *ar, u32 cookie, u32 source) { if (source != APP_HB_CHALLENGE) { /* This would ignore the replys that come in after their due time */ if (cookie == ar->arHBChallengeResp.seqNum) { ar->arHBChallengeResp.outstanding = false; } } } void ar6000_reportError_event(struct ar6_softc *ar, WMI_TARGET_ERROR_VAL errorVal) { static const char * const errString[] = { [WMI_TARGET_PM_ERR_FAIL] "WMI_TARGET_PM_ERR_FAIL", [WMI_TARGET_KEY_NOT_FOUND] "WMI_TARGET_KEY_NOT_FOUND", [WMI_TARGET_DECRYPTION_ERR] "WMI_TARGET_DECRYPTION_ERR", [WMI_TARGET_BMISS] "WMI_TARGET_BMISS", [WMI_PSDISABLE_NODE_JOIN] "WMI_PSDISABLE_NODE_JOIN" }; A_PRINTF("AR6000 Error on Target. Error = 0x%x\n", errorVal); /* One error is reported at a time, and errorval is a bitmask */ if(errorVal & (errorVal - 1)) return; A_PRINTF("AR6000 Error type = "); switch(errorVal) { case WMI_TARGET_PM_ERR_FAIL: case WMI_TARGET_KEY_NOT_FOUND: case WMI_TARGET_DECRYPTION_ERR: case WMI_TARGET_BMISS: case WMI_PSDISABLE_NODE_JOIN: A_PRINTF("%s\n", errString[errorVal]); break; default: A_PRINTF("INVALID\n"); break; } } void ar6000_cac_event(struct ar6_softc *ar, u8 ac, u8 cacIndication, u8 statusCode, u8 *tspecSuggestion) { WMM_TSPEC_IE *tspecIe; /* * This is the TSPEC IE suggestion from AP. * Suggestion provided by AP under some error * cases, could be helpful for the host app. * Check documentation. */ tspecIe = (WMM_TSPEC_IE *)tspecSuggestion; /* * What do we do, if we get TSPEC rejection? One thought * that comes to mind is implictly delete the pstream... */ A_PRINTF("AR6000 CAC notification. " "AC = %d, cacIndication = 0x%x, statusCode = 0x%x\n", ac, cacIndication, statusCode); } void ar6000_channel_change_event(struct ar6_softc *ar, u16 oldChannel, u16 newChannel) { A_PRINTF("Channel Change notification\nOld Channel: %d, New Channel: %d\n", oldChannel, newChannel); } #define AR6000_PRINT_BSSID(_pBss) do { \ A_PRINTF("%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ",\ (_pBss)[0],(_pBss)[1],(_pBss)[2],(_pBss)[3],\ (_pBss)[4],(_pBss)[5]); \ } while(0) void ar6000_roam_tbl_event(struct ar6_softc *ar, WMI_TARGET_ROAM_TBL *pTbl) { u8 i; A_PRINTF("ROAM TABLE NO OF ENTRIES is %d ROAM MODE is %d\n", pTbl->numEntries, pTbl->roamMode); for (i= 0; i < pTbl->numEntries; i++) { A_PRINTF("[%d]bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ", i, pTbl->bssRoamInfo[i].bssid[0], pTbl->bssRoamInfo[i].bssid[1], pTbl->bssRoamInfo[i].bssid[2], pTbl->bssRoamInfo[i].bssid[3], pTbl->bssRoamInfo[i].bssid[4], pTbl->bssRoamInfo[i].bssid[5]); A_PRINTF("RSSI %d RSSIDT %d LAST RSSI %d UTIL %d ROAM_UTIL %d" " BIAS %d\n", pTbl->bssRoamInfo[i].rssi, pTbl->bssRoamInfo[i].rssidt, pTbl->bssRoamInfo[i].last_rssi, pTbl->bssRoamInfo[i].util, pTbl->bssRoamInfo[i].roam_util, pTbl->bssRoamInfo[i].bias); } } void ar6000_wow_list_event(struct ar6_softc *ar, u8 num_filters, WMI_GET_WOW_LIST_REPLY *wow_reply) { u8 i,j; /*Each event now contains exactly one filter, see bug 26613*/ A_PRINTF("WOW pattern %d of %d patterns\n", wow_reply->this_filter_num, wow_reply->num_filters); A_PRINTF("wow mode = %s host mode = %s\n", (wow_reply->wow_mode == 0? "disabled":"enabled"), (wow_reply->host_mode == 1 ? "awake":"asleep")); /*If there are no patterns, the reply will only contain generic WoW information. Pattern information will exist only if there are patterns present. Bug 26716*/ /* If this event contains pattern information, display it*/ if (wow_reply->this_filter_num) { i=0; A_PRINTF("id=%d size=%d offset=%d\n", wow_reply->wow_filters[i].wow_filter_id, wow_reply->wow_filters[i].wow_filter_size, wow_reply->wow_filters[i].wow_filter_offset); A_PRINTF("wow pattern = "); for (j=0; j< wow_reply->wow_filters[i].wow_filter_size; j++) { A_PRINTF("%2.2x",wow_reply->wow_filters[i].wow_filter_pattern[j]); } A_PRINTF("\nwow mask = "); for (j=0; j< wow_reply->wow_filters[i].wow_filter_size; j++) { A_PRINTF("%2.2x",wow_reply->wow_filters[i].wow_filter_mask[j]); } A_PRINTF("\n"); } } /* * Report the Roaming related data collected on the target */ void ar6000_display_roam_time(WMI_TARGET_ROAM_TIME *p) { A_PRINTF("Disconnect Data : BSSID: "); AR6000_PRINT_BSSID(p->disassoc_bssid); A_PRINTF(" RSSI %d DISASSOC Time %d NO_TXRX_TIME %d\n", p->disassoc_bss_rssi,p->disassoc_time, p->no_txrx_time); A_PRINTF("Connect Data: BSSID: "); AR6000_PRINT_BSSID(p->assoc_bssid); A_PRINTF(" RSSI %d ASSOC Time %d TXRX_TIME %d\n", p->assoc_bss_rssi,p->assoc_time, p->allow_txrx_time); } void ar6000_roam_data_event(struct ar6_softc *ar, WMI_TARGET_ROAM_DATA *p) { switch (p->roamDataType) { case ROAM_DATA_TIME: ar6000_display_roam_time(&p->u.roamTime); break; default: break; } } void ar6000_bssInfo_event_rx(struct ar6_softc *ar, u8 *datap, int len) { struct sk_buff *skb; WMI_BSS_INFO_HDR *bih = (WMI_BSS_INFO_HDR *)datap; if (!ar->arMgmtFilter) { return; } if (((ar->arMgmtFilter & IEEE80211_FILTER_TYPE_BEACON) && (bih->frameType != BEACON_FTYPE)) || ((ar->arMgmtFilter & IEEE80211_FILTER_TYPE_PROBE_RESP) && (bih->frameType != PROBERESP_FTYPE))) { return; } if ((skb = A_NETBUF_ALLOC_RAW(len)) != NULL) { A_NETBUF_PUT(skb, len); memcpy(A_NETBUF_DATA(skb), datap, len); skb->dev = ar->arNetDev; memcpy(skb_mac_header(skb), A_NETBUF_DATA(skb), 6); skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(0x0019); netif_rx(skb); } } u32 wmiSendCmdNum; int ar6000_control_tx(void *devt, void *osbuf, HTC_ENDPOINT_ID eid) { struct ar6_softc *ar = (struct ar6_softc *)devt; int status = 0; struct ar_cookie *cookie = NULL; int i; #ifdef CONFIG_PM if (ar->arWowState != WLAN_WOW_STATE_NONE) { A_NETBUF_FREE(osbuf); return A_EACCES; } #endif /* CONFIG_PM */ /* take lock to protect ar6000_alloc_cookie() */ AR6000_SPIN_LOCK(&ar->arLock, 0); do { AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar_contrstatus = ol_tx: skb=0x%lx, len=0x%x eid =%d\n", (unsigned long)osbuf, A_NETBUF_LEN(osbuf), eid)); if (ar->arWMIControlEpFull && (eid == ar->arControlEp)) { /* control endpoint is full, don't allocate resources, we * are just going to drop this packet */ cookie = NULL; AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" WMI Control EP full, dropping packet : 0x%lX, len:%d \n", (unsigned long)osbuf, A_NETBUF_LEN(osbuf))); } else { cookie = ar6000_alloc_cookie(ar); } if (cookie == NULL) { status = A_NO_MEMORY; break; } if(logWmiRawMsgs) { A_PRINTF("WMI cmd send, msgNo %d :", wmiSendCmdNum); for(i = 0; i < a_netbuf_to_len(osbuf); i++) A_PRINTF("%x ", ((u8 *)a_netbuf_to_data(osbuf))[i]); A_PRINTF("\n"); } wmiSendCmdNum++; } while (false); if (cookie != NULL) { /* got a structure to send it out on */ ar->arTxPending[eid]++; if (eid != ar->arControlEp) { ar->arTotalTxDataPending++; } } AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (cookie != NULL) { cookie->arc_bp[0] = (unsigned long)osbuf; cookie->arc_bp[1] = 0; SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, cookie, A_NETBUF_DATA(osbuf), A_NETBUF_LEN(osbuf), eid, AR6K_CONTROL_PKT_TAG); /* this interface is asynchronous, if there is an error, cleanup will happen in the * TX completion callback */ HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt); status = 0; } if (status) { A_NETBUF_FREE(osbuf); } return status; } /* indicate tx activity or inactivity on a WMI stream */ void ar6000_indicate_tx_activity(void *devt, u8 TrafficClass, bool Active) { struct ar6_softc *ar = (struct ar6_softc *)devt; HTC_ENDPOINT_ID eid ; int i; if (ar->arWmiEnabled) { eid = arAc2EndpointID(ar, TrafficClass); AR6000_SPIN_LOCK(&ar->arLock, 0); ar->arAcStreamActive[TrafficClass] = Active; if (Active) { /* when a stream goes active, keep track of the active stream with the highest priority */ if (ar->arAcStreamPriMap[TrafficClass] > ar->arHiAcStreamActivePri) { /* set the new highest active priority */ ar->arHiAcStreamActivePri = ar->arAcStreamPriMap[TrafficClass]; } } else { /* when a stream goes inactive, we may have to search for the next active stream * that is the highest priority */ if (ar->arHiAcStreamActivePri == ar->arAcStreamPriMap[TrafficClass]) { /* the highest priority stream just went inactive */ /* reset and search for the "next" highest "active" priority stream */ ar->arHiAcStreamActivePri = 0; for (i = 0; i < WMM_NUM_AC; i++) { if (ar->arAcStreamActive[i]) { if (ar->arAcStreamPriMap[i] > ar->arHiAcStreamActivePri) { /* set the new highest active priority */ ar->arHiAcStreamActivePri = ar->arAcStreamPriMap[i]; } } } } } AR6000_SPIN_UNLOCK(&ar->arLock, 0); } else { /* for mbox ping testing, the traffic class is mapped directly as a stream ID, * see handling of AR6000_XIOCTL_TRAFFIC_ACTIVITY_CHANGE in ioctl.c * convert the stream ID to a endpoint */ eid = arAc2EndpointID(ar, TrafficClass); } /* notify HTC, this may cause credit distribution changes */ HTCIndicateActivityChange(ar->arHtcTarget, eid, Active); } void ar6000_btcoex_config_event(struct ar6_softc *ar, u8 *ptr, u32 len) { WMI_BTCOEX_CONFIG_EVENT *pBtcoexConfig = (WMI_BTCOEX_CONFIG_EVENT *)ptr; WMI_BTCOEX_CONFIG_EVENT *pArbtcoexConfig =&ar->arBtcoexConfig; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 BTCOEX CONFIG EVENT \n")); A_PRINTF("received config event\n"); pArbtcoexConfig->btProfileType = pBtcoexConfig->btProfileType; pArbtcoexConfig->linkId = pBtcoexConfig->linkId; switch (pBtcoexConfig->btProfileType) { case WMI_BTCOEX_BT_PROFILE_SCO: memcpy(&pArbtcoexConfig->info.scoConfigCmd, &pBtcoexConfig->info.scoConfigCmd, sizeof(WMI_SET_BTCOEX_SCO_CONFIG_CMD)); break; case WMI_BTCOEX_BT_PROFILE_A2DP: memcpy(&pArbtcoexConfig->info.a2dpConfigCmd, &pBtcoexConfig->info.a2dpConfigCmd, sizeof(WMI_SET_BTCOEX_A2DP_CONFIG_CMD)); break; case WMI_BTCOEX_BT_PROFILE_ACLCOEX: memcpy(&pArbtcoexConfig->info.aclcoexConfig, &pBtcoexConfig->info.aclcoexConfig, sizeof(WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD)); break; case WMI_BTCOEX_BT_PROFILE_INQUIRY_PAGE: memcpy(&pArbtcoexConfig->info.btinquiryPageConfigCmd, &pBtcoexConfig->info.btinquiryPageConfigCmd, sizeof(WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD)); break; } if (ar->statsUpdatePending) { ar->statsUpdatePending = false; wake_up(&arEvent); } } void ar6000_btcoex_stats_event(struct ar6_softc *ar, u8 *ptr, u32 len) { WMI_BTCOEX_STATS_EVENT *pBtcoexStats = (WMI_BTCOEX_STATS_EVENT *)ptr; AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 BTCOEX CONFIG EVENT \n")); memcpy(&ar->arBtcoexStats, pBtcoexStats, sizeof(WMI_BTCOEX_STATS_EVENT)); if (ar->statsUpdatePending) { ar->statsUpdatePending = false; wake_up(&arEvent); } } module_init(ar6000_init_module); module_exit(ar6000_cleanup_module); /* Init cookie queue */ static void ar6000_cookie_init(struct ar6_softc *ar) { u32 i; ar->arCookieList = NULL; ar->arCookieCount = 0; A_MEMZERO(s_ar_cookie_mem, sizeof(s_ar_cookie_mem)); for (i = 0; i < MAX_COOKIE_NUM; i++) { ar6000_free_cookie(ar, &s_ar_cookie_mem[i]); } } /* cleanup cookie queue */ static void ar6000_cookie_cleanup(struct ar6_softc *ar) { /* It is gone .... */ ar->arCookieList = NULL; ar->arCookieCount = 0; } /* Init cookie queue */ static void ar6000_free_cookie(struct ar6_softc *ar, struct ar_cookie * cookie) { /* Insert first */ A_ASSERT(ar != NULL); A_ASSERT(cookie != NULL); cookie->arc_list_next = ar->arCookieList; ar->arCookieList = cookie; ar->arCookieCount++; } /* cleanup cookie queue */ static struct ar_cookie * ar6000_alloc_cookie(struct ar6_softc *ar) { struct ar_cookie *cookie; cookie = ar->arCookieList; if(cookie != NULL) { ar->arCookieList = cookie->arc_list_next; ar->arCookieCount--; } return cookie; } void ar6000_tx_retry_err_event(void *devt) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Tx retries reach maximum!\n")); } void ar6000_snrThresholdEvent_rx(void *devt, WMI_SNR_THRESHOLD_VAL newThreshold, u8 snr) { WMI_SNR_THRESHOLD_EVENT event; event.range = newThreshold; event.snr = snr; } void ar6000_lqThresholdEvent_rx(void *devt, WMI_LQ_THRESHOLD_VAL newThreshold, u8 lq) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("lq threshold range %d, lq %d\n", newThreshold, lq)); } u32 a_copy_to_user(void *to, const void *from, u32 n) { return(copy_to_user(to, from, n)); } u32 a_copy_from_user(void *to, const void *from, u32 n) { return(copy_from_user(to, from, n)); } int ar6000_get_driver_cfg(struct net_device *dev, u16 cfgParam, void *result) { int ret = 0; switch(cfgParam) { case AR6000_DRIVER_CFG_GET_WLANNODECACHING: *((u32 *)result) = wlanNodeCaching; break; case AR6000_DRIVER_CFG_LOG_RAW_WMI_MSGS: *((u32 *)result) = logWmiRawMsgs; break; default: ret = EINVAL; break; } return ret; } void ar6000_keepalive_rx(void *devt, u8 configured) { struct ar6_softc *ar = (struct ar6_softc *)devt; ar->arKeepaliveConfigured = configured; wake_up(&arEvent); } void ar6000_pmkid_list_event(void *devt, u8 numPMKID, WMI_PMKID *pmkidList, u8 *bssidList) { u8 i, j; A_PRINTF("Number of Cached PMKIDs is %d\n", numPMKID); for (i = 0; i < numPMKID; i++) { A_PRINTF("\nBSSID %d ", i); for (j = 0; j < ATH_MAC_LEN; j++) { A_PRINTF("%2.2x", bssidList[j]); } bssidList += (ATH_MAC_LEN + WMI_PMKID_LEN); A_PRINTF("\nPMKID %d ", i); for (j = 0; j < WMI_PMKID_LEN; j++) { A_PRINTF("%2.2x", pmkidList->pmkid[j]); } pmkidList = (WMI_PMKID *)((u8 *)pmkidList + ATH_MAC_LEN + WMI_PMKID_LEN); } } void ar6000_pspoll_event(struct ar6_softc *ar,u8 aid) { sta_t *conn=NULL; bool isPsqEmpty = false; conn = ieee80211_find_conn_for_aid(ar, aid); /* If the PS q for this STA is not empty, dequeue and send a pkt from * the head of the q. Also update the More data bit in the WMI_DATA_HDR * if there are more pkts for this STA in the PS q. If there are no more * pkts for this STA, update the PVB for this STA. */ A_MUTEX_LOCK(&conn->psqLock); isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq); A_MUTEX_UNLOCK(&conn->psqLock); if (isPsqEmpty) { /* TODO:No buffered pkts for this STA. Send out a NULL data frame */ } else { struct sk_buff *skb = NULL; A_MUTEX_LOCK(&conn->psqLock); skb = A_NETBUF_DEQUEUE(&conn->psq); A_MUTEX_UNLOCK(&conn->psqLock); /* Set the STA flag to PSPolled, so that the frame will go out */ STA_SET_PS_POLLED(conn); ar6000_data_tx(skb, ar->arNetDev); STA_CLR_PS_POLLED(conn); /* Clear the PVB for this STA if the queue has become empty */ A_MUTEX_LOCK(&conn->psqLock); isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq); A_MUTEX_UNLOCK(&conn->psqLock); if (isPsqEmpty) { wmi_set_pvb_cmd(ar->arWmi, conn->aid, 0); } } } void ar6000_dtimexpiry_event(struct ar6_softc *ar) { bool isMcastQueued = false; struct sk_buff *skb = NULL; /* If there are no associated STAs, ignore the DTIM expiry event. * There can be potential race conditions where the last associated * STA may disconnect & before the host could clear the 'Indicate DTIM' * request to the firmware, the firmware would have just indicated a DTIM * expiry event. The race is between 'clear DTIM expiry cmd' going * from the host to the firmware & the DTIM expiry event happening from * the firmware to the host. */ if (ar->sta_list_index == 0) { return; } A_MUTEX_LOCK(&ar->mcastpsqLock); isMcastQueued = A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq); A_MUTEX_UNLOCK(&ar->mcastpsqLock); A_ASSERT(isMcastQueued == false); /* Flush the mcast psq to the target */ /* Set the STA flag to DTIMExpired, so that the frame will go out */ ar->DTIMExpired = true; A_MUTEX_LOCK(&ar->mcastpsqLock); while (!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) { skb = A_NETBUF_DEQUEUE(&ar->mcastpsq); A_MUTEX_UNLOCK(&ar->mcastpsqLock); ar6000_data_tx(skb, ar->arNetDev); A_MUTEX_LOCK(&ar->mcastpsqLock); } A_MUTEX_UNLOCK(&ar->mcastpsqLock); /* Reset the DTIMExpired flag back to 0 */ ar->DTIMExpired = false; /* Clear the LSB of the BitMapCtl field of the TIM IE */ wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 0); } void read_rssi_compensation_param(struct ar6_softc *ar) { u8 *cust_data_ptr; //#define RSSICOMPENSATION_PRINT #ifdef RSSICOMPENSATION_PRINT s16 i; cust_data_ptr = ar6000_get_cust_data_buffer(ar->arTargetType); for (i=0; i<16; i++) { A_PRINTF("cust_data_%d = %x \n", i, *(u8 *)cust_data_ptr); cust_data_ptr += 1; } #endif cust_data_ptr = ar6000_get_cust_data_buffer(ar->arTargetType); rssi_compensation_param.customerID = *(u16 *)cust_data_ptr & 0xffff; rssi_compensation_param.enable = *(u16 *)(cust_data_ptr+2) & 0xffff; rssi_compensation_param.bg_param_a = *(u16 *)(cust_data_ptr+4) & 0xffff; rssi_compensation_param.bg_param_b = *(u16 *)(cust_data_ptr+6) & 0xffff; rssi_compensation_param.a_param_a = *(u16 *)(cust_data_ptr+8) & 0xffff; rssi_compensation_param.a_param_b = *(u16 *)(cust_data_ptr+10) &0xffff; rssi_compensation_param.reserved = *(u32 *)(cust_data_ptr+12); #ifdef RSSICOMPENSATION_PRINT A_PRINTF("customerID = 0x%x \n", rssi_compensation_param.customerID); A_PRINTF("enable = 0x%x \n", rssi_compensation_param.enable); A_PRINTF("bg_param_a = 0x%x and %d \n", rssi_compensation_param.bg_param_a, rssi_compensation_param.bg_param_a); A_PRINTF("bg_param_b = 0x%x and %d \n", rssi_compensation_param.bg_param_b, rssi_compensation_param.bg_param_b); A_PRINTF("a_param_a = 0x%x and %d \n", rssi_compensation_param.a_param_a, rssi_compensation_param.a_param_a); A_PRINTF("a_param_b = 0x%x and %d \n", rssi_compensation_param.a_param_b, rssi_compensation_param.a_param_b); A_PRINTF("Last 4 bytes = 0x%x \n", rssi_compensation_param.reserved); #endif if (rssi_compensation_param.enable != 0x1) { rssi_compensation_param.enable = 0; } return; } s32 rssi_compensation_calc_tcmd(u32 freq, s32 rssi, u32 totalPkt) { if (freq > 5000) { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d, totalPkt = %d\n", rssi,totalPkt)); rssi = rssi * rssi_compensation_param.a_param_a + totalPkt * rssi_compensation_param.a_param_b; rssi = (rssi-50) /100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi)); } } else { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d, totalPkt = %d\n", rssi,totalPkt)); rssi = rssi * rssi_compensation_param.bg_param_a + totalPkt * rssi_compensation_param.bg_param_b; rssi = (rssi-50) /100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi)); } } return rssi; } s16 rssi_compensation_calc(struct ar6_softc *ar, s16 rssi) { if (ar->arBssChannel > 5000) { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d\n", rssi)); rssi = rssi * rssi_compensation_param.a_param_a + rssi_compensation_param.a_param_b; rssi = (rssi-50) /100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi)); } } else { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d\n", rssi)); rssi = rssi * rssi_compensation_param.bg_param_a + rssi_compensation_param.bg_param_b; rssi = (rssi-50) /100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi)); } } return rssi; } s16 rssi_compensation_reverse_calc(struct ar6_softc *ar, s16 rssi, bool Above) { s16 i; if (ar->arBssChannel > 5000) { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before rev compensation = %d\n", rssi)); rssi = rssi * 100; rssi = (rssi - rssi_compensation_param.a_param_b) / rssi_compensation_param.a_param_a; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after rev compensation = %d\n", rssi)); } } else { if (rssi_compensation_param.enable) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n")); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before rev compensation = %d\n", rssi)); if (Above) { for (i=95; i>=0; i--) { if (rssi <= rssi_compensation_table[i]) { rssi = 0 - i; break; } } } else { for (i=0; i<=95; i++) { if (rssi >= rssi_compensation_table[i]) { rssi = 0 - i; break; } } } AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after rev compensation = %d\n", rssi)); } } return rssi; } #ifdef WAPI_ENABLE void ap_wapi_rekey_event(struct ar6_softc *ar, u8 type, u8 *mac) { union iwreq_data wrqu; char buf[20]; A_MEMZERO(buf, sizeof(buf)); strcpy(buf, "WAPI_REKEY"); buf[10] = type; memcpy(&buf[11], mac, ATH_MAC_LEN); A_MEMZERO(&wrqu, sizeof(wrqu)); wrqu.data.length = 10+1+ATH_MAC_LEN; wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf); A_PRINTF("WAPI REKEY - %d - %02x:%02x\n", type, mac[4], mac[5]); } #endif static int ar6000_reinstall_keys(struct ar6_softc *ar, u8 key_op_ctrl) { int status = 0; struct ieee80211req_key *uik = &ar->user_saved_keys.ucast_ik; struct ieee80211req_key *bik = &ar->user_saved_keys.bcast_ik; CRYPTO_TYPE keyType = ar->user_saved_keys.keyType; if (IEEE80211_CIPHER_CCKM_KRK != uik->ik_type) { if (NONE_CRYPT == keyType) { goto _reinstall_keys_out; } if (uik->ik_keylen) { status = wmi_addKey_cmd(ar->arWmi, uik->ik_keyix, ar->user_saved_keys.keyType, PAIRWISE_USAGE, uik->ik_keylen, (u8 *)&uik->ik_keyrsc, uik->ik_keydata, key_op_ctrl, uik->ik_macaddr, SYNC_BEFORE_WMIFLAG); } } else { status = wmi_add_krk_cmd(ar->arWmi, uik->ik_keydata); } if (IEEE80211_CIPHER_CCKM_KRK != bik->ik_type) { if (NONE_CRYPT == keyType) { goto _reinstall_keys_out; } if (bik->ik_keylen) { status = wmi_addKey_cmd(ar->arWmi, bik->ik_keyix, ar->user_saved_keys.keyType, GROUP_USAGE, bik->ik_keylen, (u8 *)&bik->ik_keyrsc, bik->ik_keydata, key_op_ctrl, bik->ik_macaddr, NO_SYNC_WMIFLAG); } } else { status = wmi_add_krk_cmd(ar->arWmi, bik->ik_keydata); } _reinstall_keys_out: ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT; ar->user_key_ctrl = 0; return status; } void ar6000_dset_open_req( void *context, u32 id, u32 targHandle, u32 targReplyFn, u32 targReplyArg) { } void ar6000_dset_close( void *context, u32 access_cookie) { return; } void ar6000_dset_data_req( void *context, u32 accessCookie, u32 offset, u32 length, u32 targBuf, u32 targReplyFn, u32 targReplyArg) { } int ar6000_ap_mode_profile_commit(struct ar6_softc *ar) { WMI_CONNECT_CMD p; unsigned long flags; /* No change in AP's profile configuration */ if(ar->ap_profile_flag==0) { A_PRINTF("COMMIT: No change in profile!!!\n"); return -ENODATA; } if(!ar->arSsidLen) { A_PRINTF("SSID not set!!!\n"); return -ECHRNG; } switch(ar->arAuthMode) { case NONE_AUTH: if((ar->arPairwiseCrypto != NONE_CRYPT) && #ifdef WAPI_ENABLE (ar->arPairwiseCrypto != WAPI_CRYPT) && #endif (ar->arPairwiseCrypto != WEP_CRYPT)) { A_PRINTF("Cipher not supported in AP mode Open auth\n"); return -EOPNOTSUPP; } break; case WPA_PSK_AUTH: case WPA2_PSK_AUTH: case (WPA_PSK_AUTH|WPA2_PSK_AUTH): break; default: A_PRINTF("This key mgmt type not supported in AP mode\n"); return -EOPNOTSUPP; } /* Update the arNetworkType */ ar->arNetworkType = ar->arNextMode; A_MEMZERO(&p,sizeof(p)); p.ssidLength = ar->arSsidLen; memcpy(p.ssid,ar->arSsid,p.ssidLength); p.channel = ar->arChannelHint; p.networkType = ar->arNetworkType; p.dot11AuthMode = ar->arDot11AuthMode; p.authMode = ar->arAuthMode; p.pairwiseCryptoType = ar->arPairwiseCrypto; p.pairwiseCryptoLen = ar->arPairwiseCryptoLen; p.groupCryptoType = ar->arGroupCrypto; p.groupCryptoLen = ar->arGroupCryptoLen; p.ctrl_flags = ar->arConnectCtrlFlags; wmi_ap_profile_commit(ar->arWmi, &p); spin_lock_irqsave(&ar->arLock, flags); ar->arConnected = true; netif_carrier_on(ar->arNetDev); spin_unlock_irqrestore(&ar->arLock, flags); ar->ap_profile_flag = 0; return 0; } int ar6000_connect_to_ap(struct ar6_softc *ar) { /* The ssid length check prevents second "essid off" from the user, to be treated as a connect cmd. The second "essid off" is ignored. */ if((ar->arWmiReady == true) && (ar->arSsidLen > 0) && ar->arNetworkType!=AP_NETWORK) { int status; if((ADHOC_NETWORK != ar->arNetworkType) && (NONE_AUTH==ar->arAuthMode) && (WEP_CRYPT==ar->arPairwiseCrypto)) { ar6000_install_static_wep_keys(ar); } if (!ar->arUserBssFilter) { if (wmi_bssfilter_cmd(ar->arWmi, ALL_BSS_FILTER, 0) != 0) { return -EIO; } } #ifdef WAPI_ENABLE if (ar->arWapiEnable) { ar->arPairwiseCrypto = WAPI_CRYPT; ar->arPairwiseCryptoLen = 0; ar->arGroupCrypto = WAPI_CRYPT; ar->arGroupCryptoLen = 0; ar->arAuthMode = NONE_AUTH; ar->arConnectCtrlFlags |= CONNECT_IGNORE_WPAx_GROUP_CIPHER; } #endif AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("Connect called with authmode %d dot11 auth %d"\ " PW crypto %d PW crypto Len %d GRP crypto %d"\ " GRP crypto Len %d\n", ar->arAuthMode, ar->arDot11AuthMode, ar->arPairwiseCrypto, ar->arPairwiseCryptoLen, ar->arGroupCrypto, ar->arGroupCryptoLen)); reconnect_flag = 0; /* Set the listen interval into 1000TUs or more. This value will be indicated to Ap in the conn. later set it back locally at the STA to 100/1000 TUs depending on the power mode */ if ((ar->arNetworkType == INFRA_NETWORK)) { wmi_listeninterval_cmd(ar->arWmi, max(ar->arListenIntervalT, (u16)A_MAX_WOW_LISTEN_INTERVAL), 0); } status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType, ar->arDot11AuthMode, ar->arAuthMode, ar->arPairwiseCrypto, ar->arPairwiseCryptoLen, ar->arGroupCrypto,ar->arGroupCryptoLen, ar->arSsidLen, ar->arSsid, ar->arReqBssid, ar->arChannelHint, ar->arConnectCtrlFlags); if (status) { wmi_listeninterval_cmd(ar->arWmi, ar->arListenIntervalT, ar->arListenIntervalB); if (!ar->arUserBssFilter) { wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0); } return status; } if ((!(ar->arConnectCtrlFlags & CONNECT_DO_WPA_OFFLOAD)) && ((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode))) { A_TIMEOUT_MS(&ar->disconnect_timer, A_DISCONNECT_TIMER_INTERVAL, 0); } ar->arConnectCtrlFlags &= ~CONNECT_DO_WPA_OFFLOAD; ar->arConnectPending = true; return status; } return A_ERROR; } int ar6000_disconnect(struct ar6_softc *ar) { if ((ar->arConnected == true) || (ar->arConnectPending == true)) { wmi_disconnect_cmd(ar->arWmi); /* * Disconnect cmd is issued, clear connectPending. * arConnected will be cleard in disconnect_event notification. */ ar->arConnectPending = false; } return 0; } int ar6000_ap_mode_get_wpa_ie(struct ar6_softc *ar, struct ieee80211req_wpaie *wpaie) { sta_t *conn = NULL; conn = ieee80211_find_conn(ar, wpaie->wpa_macaddr); A_MEMZERO(wpaie->wpa_ie, IEEE80211_MAX_IE); A_MEMZERO(wpaie->rsn_ie, IEEE80211_MAX_IE); if(conn) { memcpy(wpaie->wpa_ie, conn->wpa_ie, IEEE80211_MAX_IE); } return 0; } int is_iwioctl_allowed(u8 mode, u16 cmd) { if(cmd >= SIOCSIWCOMMIT && cmd <= SIOCGIWPOWER) { cmd -= SIOCSIWCOMMIT; if(sioctl_filter[cmd] == 0xFF) return 0; if(sioctl_filter[cmd] & mode) return 0; } else if(cmd >= SIOCIWFIRSTPRIV && cmd <= (SIOCIWFIRSTPRIV+30)) { cmd -= SIOCIWFIRSTPRIV; if(pioctl_filter[cmd] == 0xFF) return 0; if(pioctl_filter[cmd] & mode) return 0; } else { return A_ERROR; } return A_ENOTSUP; } int is_xioctl_allowed(u8 mode, int cmd) { if(sizeof(xioctl_filter)-1 < cmd) { A_PRINTF("Filter for this cmd=%d not defined\n",cmd); return 0; } if(xioctl_filter[cmd] == 0xFF) return 0; if(xioctl_filter[cmd] & mode) return 0; return A_ERROR; } #ifdef WAPI_ENABLE int ap_set_wapi_key(struct ar6_softc *ar, void *ikey) { struct ieee80211req_key *ik = (struct ieee80211req_key *)ikey; KEY_USAGE keyUsage = 0; int status; if (memcmp(ik->ik_macaddr, bcast_mac, IEEE80211_ADDR_LEN) == 0) { keyUsage = GROUP_USAGE; } else { keyUsage = PAIRWISE_USAGE; } A_PRINTF("WAPI_KEY: Type:%d ix:%d mac:%02x:%02x len:%d\n", keyUsage, ik->ik_keyix, ik->ik_macaddr[4], ik->ik_macaddr[5], ik->ik_keylen); status = wmi_addKey_cmd(ar->arWmi, ik->ik_keyix, WAPI_CRYPT, keyUsage, ik->ik_keylen, (u8 *)&ik->ik_keyrsc, ik->ik_keydata, KEY_OP_INIT_VAL, ik->ik_macaddr, SYNC_BOTH_WMIFLAG); if (0 != status) { return -EIO; } return 0; } #endif void ar6000_peer_event( void *context, u8 eventCode, u8 *macAddr) { u8 pos; for (pos=0;pos<6;pos++) printk("%02x: ",*(macAddr+pos)); printk("\n"); } #ifdef HTC_TEST_SEND_PKTS #define HTC_TEST_DUPLICATE 8 static void DoHTCSendPktsTest(struct ar6_softc *ar, int MapNo, HTC_ENDPOINT_ID eid, struct sk_buff *dupskb) { struct ar_cookie *cookie; struct ar_cookie *cookieArray[HTC_TEST_DUPLICATE]; struct sk_buff *new_skb; int i; int pkts = 0; struct htc_packet_queue pktQueue; EPPING_HEADER *eppingHdr; eppingHdr = A_NETBUF_DATA(dupskb); if (eppingHdr->Cmd_h == EPPING_CMD_NO_ECHO) { /* skip test if this is already a tx perf test */ return; } for (i = 0; i < HTC_TEST_DUPLICATE; i++,pkts++) { AR6000_SPIN_LOCK(&ar->arLock, 0); cookie = ar6000_alloc_cookie(ar); if (cookie != NULL) { ar->arTxPending[eid]++; ar->arTotalTxDataPending++; } AR6000_SPIN_UNLOCK(&ar->arLock, 0); if (NULL == cookie) { break; } new_skb = A_NETBUF_ALLOC(A_NETBUF_LEN(dupskb)); if (new_skb == NULL) { AR6000_SPIN_LOCK(&ar->arLock, 0); ar6000_free_cookie(ar,cookie); AR6000_SPIN_UNLOCK(&ar->arLock, 0); break; } A_NETBUF_PUT_DATA(new_skb, A_NETBUF_DATA(dupskb), A_NETBUF_LEN(dupskb)); cookie->arc_bp[0] = (unsigned long)new_skb; cookie->arc_bp[1] = MapNo; SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, cookie, A_NETBUF_DATA(new_skb), A_NETBUF_LEN(new_skb), eid, AR6K_DATA_PKT_TAG); cookieArray[i] = cookie; { EPPING_HEADER *pHdr = (EPPING_HEADER *)A_NETBUF_DATA(new_skb); pHdr->Cmd_h = EPPING_CMD_NO_ECHO; /* do not echo the packet */ } } if (pkts == 0) { return; } INIT_HTC_PACKET_QUEUE(&pktQueue); for (i = 0; i < pkts; i++) { HTC_PACKET_ENQUEUE(&pktQueue,&cookieArray[i]->HtcPkt); } HTCSendPktsMultiple(ar->arHtcTarget, &pktQueue); } #endif #ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT /* * Add support for adding and removing a virtual adapter for soft AP. * Some OS requires different adapters names for station and soft AP mode. * To support these requirement, create and destroy a netdevice instance * when the AP mode is operational. A full fledged support for virual device * is not implemented. Rather a virtual interface is created and is linked * with the existing physical device instance during the operation of the * AP mode. */ int ar6000_start_ap_interface(struct ar6_softc *ar) { struct ar_virtual_interface *arApDev; /* Change net_device to point to AP instance */ arApDev = (struct ar_virtual_interface *)ar->arApDev; ar->arNetDev = arApDev->arNetDev; return 0; } int ar6000_stop_ap_interface(struct ar6_softc *ar) { struct ar_virtual_interface *arApDev; /* Change net_device to point to sta instance */ arApDev = (struct ar_virtual_interface *)ar->arApDev; if (arApDev) { ar->arNetDev = arApDev->arStaNetDev; } return 0; } int ar6000_create_ap_interface(struct ar6_softc *ar, char *ap_ifname) { struct net_device *dev; struct ar_virtual_interface *arApDev; dev = alloc_etherdev(sizeof(struct ar_virtual_interface)); if (dev == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: can't alloc etherdev\n")); return A_ERROR; } ether_setup(dev); init_netdev(dev, ap_ifname); if (register_netdev(dev)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: register_netdev failed\n")); return A_ERROR; } arApDev = netdev_priv(dev); arApDev->arDev = ar; arApDev->arNetDev = dev; arApDev->arStaNetDev = ar->arNetDev; ar->arApDev = arApDev; arApNetDev = dev; /* Copy the MAC address */ memcpy(dev->dev_addr, ar->arNetDev->dev_addr, AR6000_ETH_ADDR_LEN); return 0; } int ar6000_add_ap_interface(struct ar6_softc *ar, char *ap_ifname) { /* Interface already added, need not proceed further */ if (ar->arApDev != NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_add_ap_interface: interface already present \n")); return 0; } if (ar6000_create_ap_interface(ar, ap_ifname) != 0) { return A_ERROR; } A_PRINTF("Add AP interface %s \n",ap_ifname); return ar6000_start_ap_interface(ar); } int ar6000_remove_ap_interface(struct ar6_softc *ar) { if (arApNetDev) { ar6000_stop_ap_interface(ar); unregister_netdev(arApNetDev); free_netdev(apApNetDev); A_PRINTF("Remove AP interface\n"); } ar->arApDev = NULL; arApNetDev = NULL; return 0; } #endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */ #ifdef EXPORT_HCI_BRIDGE_INTERFACE EXPORT_SYMBOL(setupbtdev); #endif
./CrossVul/dataset_final_sorted/CWE-264/c/bad_3524_9
crossvul-cpp_data_good_2287_2
/* * linux/fs/ext2/file.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext2 fs regular file handling primitives * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) */ #include <linux/time.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include "ext2.h" #include "xattr.h" #include "acl.h" /* * Called when filp is released. This happens when all file descriptors * for a single struct file are closed. Note that different open() calls * for the same file yield different struct file structures. */ static int ext2_release_file (struct inode * inode, struct file * filp) { if (filp->f_mode & FMODE_WRITE) { mutex_lock(&EXT2_I(inode)->truncate_mutex); ext2_discard_reservation(inode); mutex_unlock(&EXT2_I(inode)->truncate_mutex); } return 0; } int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int ret; struct super_block *sb = file->f_mapping->host->i_sb; struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; ret = generic_file_fsync(file, start, end, datasync); if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) { /* We don't really know where the IO error happened... */ ext2_error(sb, __func__, "detected IO error when writing metadata buffers"); ret = -EIO; } return ret; } /* * We have mostly NULL's here: the current defaults are ok for * the ext2 filesystem. */ const struct file_operations ext2_file_operations = { .llseek = generic_file_llseek, .read = new_sync_read, .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, #endif .mmap = generic_file_mmap, .open = dquot_file_open, .release = ext2_release_file, .fsync = ext2_fsync, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, }; #ifdef CONFIG_EXT2_FS_XIP const struct file_operations ext2_xip_file_operations = { .llseek = generic_file_llseek, .read = xip_file_read, .write = xip_file_write, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, #endif .mmap = xip_file_mmap, .open = dquot_file_open, .release = ext2_release_file, .fsync = ext2_fsync, }; #endif const struct inode_operations ext2_file_inode_operations = { #ifdef CONFIG_EXT2_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext2_listxattr, .removexattr = generic_removexattr, #endif .setattr = ext2_setattr, .get_acl = ext2_get_acl, .set_acl = ext2_set_acl, .fiemap = ext2_fiemap, };
./CrossVul/dataset_final_sorted/CWE-264/c/good_2287_2
crossvul-cpp_data_good_3646_0
/* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ /* Copyright (c) 2001, 2002 by D-Link Corporation Written by Edward Peng.<edward_peng@dlink.com.tw> Created 03-May-2001, base on Linux' sundance.c. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. */ #define DRV_NAME "DL2000/TC902x-based linux driver" #define DRV_VERSION "v1.19" #define DRV_RELDATE "2007/08/12" #include "dl2k.h" #include <linux/dma-mapping.h> static char version[] __devinitdata = KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; #define MAX_UNITS 8 static int mtu[MAX_UNITS]; static int vlan[MAX_UNITS]; static int jumbo[MAX_UNITS]; static char *media[MAX_UNITS]; static int tx_flow=-1; static int rx_flow=-1; static int copy_thresh; static int rx_coalesce=10; /* Rx frame count each interrupt */ static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ MODULE_AUTHOR ("Edward Peng"); MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); MODULE_LICENSE("GPL"); module_param_array(mtu, int, NULL, 0); module_param_array(media, charp, NULL, 0); module_param_array(vlan, int, NULL, 0); module_param_array(jumbo, int, NULL, 0); module_param(tx_flow, int, 0); module_param(rx_flow, int, 0); module_param(copy_thresh, int, 0); module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ /* Enable the default interrupts */ #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ UpdateStats | LinkEvent) #define EnableInt() \ writew(DEFAULT_INTR, ioaddr + IntEnable) static const int max_intrloop = 50; static const int multicast_filter_limit = 0x40; static int rio_open (struct net_device *dev); static void rio_timer (unsigned long data); static void rio_tx_timeout (struct net_device *dev); static void alloc_list (struct net_device *dev); static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); static irqreturn_t rio_interrupt (int irq, void *dev_instance); static void rio_free_tx (struct net_device *dev, int irq); static void tx_error (struct net_device *dev, int tx_status); static int receive_packet (struct net_device *dev); static void rio_error (struct net_device *dev, int int_status); static int change_mtu (struct net_device *dev, int new_mtu); static void set_multicast (struct net_device *dev); static struct net_device_stats *get_stats (struct net_device *dev); static int clear_stats (struct net_device *dev); static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); static int rio_close (struct net_device *dev); static int find_miiphy (struct net_device *dev); static int parse_eeprom (struct net_device *dev); static int read_eeprom (long ioaddr, int eep_addr); static int mii_wait_link (struct net_device *dev, int wait); static int mii_set_media (struct net_device *dev); static int mii_get_media (struct net_device *dev); static int mii_set_media_pcs (struct net_device *dev); static int mii_get_media_pcs (struct net_device *dev); static int mii_read (struct net_device *dev, int phy_addr, int reg_num); static int mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data); static const struct ethtool_ops ethtool_ops; static const struct net_device_ops netdev_ops = { .ndo_open = rio_open, .ndo_start_xmit = start_xmit, .ndo_stop = rio_close, .ndo_get_stats = get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_set_rx_mode = set_multicast, .ndo_do_ioctl = rio_ioctl, .ndo_tx_timeout = rio_tx_timeout, .ndo_change_mtu = change_mtu, }; static int __devinit rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; static int card_idx; int chip_idx = ent->driver_data; int err, irq; long ioaddr; static int version_printed; void *ring_space; dma_addr_t ring_dma; if (!version_printed++) printk ("%s", version); err = pci_enable_device (pdev); if (err) return err; irq = pdev->irq; err = pci_request_regions (pdev, "dl2k"); if (err) goto err_out_disable; pci_set_master (pdev); dev = alloc_etherdev (sizeof (*np)); if (!dev) { err = -ENOMEM; goto err_out_res; } SET_NETDEV_DEV(dev, &pdev->dev); #ifdef MEM_MAPPING ioaddr = pci_resource_start (pdev, 1); ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE); if (!ioaddr) { err = -ENOMEM; goto err_out_dev; } #else ioaddr = pci_resource_start (pdev, 0); #endif dev->base_addr = ioaddr; dev->irq = irq; np = netdev_priv(dev); np->chip_id = chip_idx; np->pdev = pdev; spin_lock_init (&np->tx_lock); spin_lock_init (&np->rx_lock); /* Parse manual configuration */ np->an_enable = 1; np->tx_coalesce = 1; if (card_idx < MAX_UNITS) { if (media[card_idx] != NULL) { np->an_enable = 0; if (strcmp (media[card_idx], "auto") == 0 || strcmp (media[card_idx], "autosense") == 0 || strcmp (media[card_idx], "0") == 0 ) { np->an_enable = 2; } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || strcmp (media[card_idx], "4") == 0) { np->speed = 100; np->full_duplex = 1; } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || strcmp (media[card_idx], "3") == 0) { np->speed = 100; np->full_duplex = 0; } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || strcmp (media[card_idx], "2") == 0) { np->speed = 10; np->full_duplex = 1; } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || strcmp (media[card_idx], "1") == 0) { np->speed = 10; np->full_duplex = 0; } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || strcmp (media[card_idx], "6") == 0) { np->speed=1000; np->full_duplex=1; } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || strcmp (media[card_idx], "5") == 0) { np->speed = 1000; np->full_duplex = 0; } else { np->an_enable = 1; } } if (jumbo[card_idx] != 0) { np->jumbo = 1; dev->mtu = MAX_JUMBO; } else { np->jumbo = 0; if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) dev->mtu = mtu[card_idx]; } np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? vlan[card_idx] : 0; if (rx_coalesce > 0 && rx_timeout > 0) { np->rx_coalesce = rx_coalesce; np->rx_timeout = rx_timeout; np->coalesce = 1; } np->tx_flow = (tx_flow == 0) ? 0 : 1; np->rx_flow = (rx_flow == 0) ? 0 : 1; if (tx_coalesce < 1) tx_coalesce = 1; else if (tx_coalesce > TX_RING_SIZE-1) tx_coalesce = TX_RING_SIZE - 1; } dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; SET_ETHTOOL_OPS(dev, &ethtool_ops); #if 0 dev->features = NETIF_F_IP_CSUM; #endif pci_set_drvdata (pdev, dev); ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_iounmap; np->tx_ring = ring_space; np->tx_ring_dma = ring_dma; ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = ring_space; np->rx_ring_dma = ring_dma; /* Parse eeprom data */ parse_eeprom (dev); /* Find PHY address */ err = find_miiphy (dev); if (err) goto err_out_unmap_rx; /* Fiber device? */ np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; np->link_status = 0; /* Set media and reset PHY */ if (np->phy_media) { /* default Auto-Negotiation for fiber deivices */ if (np->an_enable == 2) { np->an_enable = 1; } mii_set_media_pcs (dev); } else { /* Auto-Negotiation is mandatory for 1000BASE-T, IEEE 802.3ab Annex 28D page 14 */ if (np->speed == 1000) np->an_enable = 1; mii_set_media (dev); } err = register_netdev (dev); if (err) goto err_out_unmap_rx; card_idx++; printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", dev->name, np->name, dev->dev_addr, irq); if (tx_coalesce > 1) printk(KERN_INFO "tx_coalesce:\t%d packets\n", tx_coalesce); if (np->coalesce) printk(KERN_INFO "rx_coalesce:\t%d packets\n" "rx_timeout: \t%d ns\n", np->rx_coalesce, np->rx_timeout*640); if (np->vlan) printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); return 0; err_out_unmap_rx: pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); err_out_unmap_tx: pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_iounmap: #ifdef MEM_MAPPING iounmap ((void *) ioaddr); err_out_dev: #endif free_netdev (dev); err_out_res: pci_release_regions (pdev); err_out_disable: pci_disable_device (pdev); return err; } static int find_miiphy (struct net_device *dev) { int i, phy_found = 0; struct netdev_private *np; long ioaddr; np = netdev_priv(dev); ioaddr = dev->base_addr; np->phy_addr = 1; for (i = 31; i >= 0; i--) { int mii_status = mii_read (dev, i, 1); if (mii_status != 0xffff && mii_status != 0x0000) { np->phy_addr = i; phy_found++; } } if (!phy_found) { printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); return -ENODEV; } return 0; } static int parse_eeprom (struct net_device *dev) { int i, j; long ioaddr = dev->base_addr; u8 sromdata[256]; u8 *psib; u32 crc; PSROM_t psrom = (PSROM_t) sromdata; struct netdev_private *np = netdev_priv(dev); int cid, next; #ifdef MEM_MAPPING ioaddr = pci_resource_start (np->pdev, 0); #endif /* Read eeprom */ for (i = 0; i < 128; i++) { ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i)); } #ifdef MEM_MAPPING ioaddr = dev->base_addr; #endif if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ /* Check CRC */ crc = ~ether_crc_le (256 - 4, sromdata); if (psrom->crc != cpu_to_le32(crc)) { printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name); return -1; } } /* Set MAC address */ for (i = 0; i < 6; i++) dev->dev_addr[i] = psrom->mac_addr[i]; if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { return 0; } /* Parse Software Information Block */ i = 0x30; psib = (u8 *) sromdata; do { cid = psib[i++]; next = psib[i++]; if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { printk (KERN_ERR "Cell data error\n"); return -1; } switch (cid) { case 0: /* Format version */ break; case 1: /* End of cell */ return 0; case 2: /* Duplex Polarity */ np->duplex_polarity = psib[i]; writeb (readb (ioaddr + PhyCtrl) | psib[i], ioaddr + PhyCtrl); break; case 3: /* Wake Polarity */ np->wake_polarity = psib[i]; break; case 9: /* Adapter description */ j = (next - i > 255) ? 255 : next - i; memcpy (np->name, &(psib[i]), j); break; case 4: case 5: case 6: case 7: case 8: /* Reversed */ break; default: /* Unknown cell */ return -1; } i = next; } while (1); return 0; } static int rio_open (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); long ioaddr = dev->base_addr; int i; u16 macctrl; i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev); if (i) return i; /* Reset all logic functions */ writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset, ioaddr + ASICCtrl + 2); mdelay(10); /* DebugCtrl bit 4, 5, 9 must set */ writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl); /* Jumbo frame */ if (np->jumbo != 0) writew (MAX_JUMBO+14, ioaddr + MaxFrameSize); alloc_list (dev); /* Get station address */ for (i = 0; i < 6; i++) writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i); set_multicast (dev); if (np->coalesce) { writel (np->rx_coalesce | np->rx_timeout << 16, ioaddr + RxDMAIntCtrl); } /* Set RIO to poll every N*320nsec. */ writeb (0x20, ioaddr + RxDMAPollPeriod); writeb (0xff, ioaddr + TxDMAPollPeriod); writeb (0x30, ioaddr + RxDMABurstThresh); writeb (0x30, ioaddr + RxDMAUrgentThresh); writel (0x0007ffff, ioaddr + RmonStatMask); /* clear statistics */ clear_stats (dev); /* VLAN supported */ if (np->vlan) { /* priority field in RxDMAIntCtrl */ writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10, ioaddr + RxDMAIntCtrl); /* VLANId */ writew (np->vlan, ioaddr + VLANId); /* Length/Type should be 0x8100 */ writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag); /* Enable AutoVLANuntagging, but disable AutoVLANtagging. VLAN information tagged by TFC' VID, CFI fields. */ writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging, ioaddr + MACCtrl); } init_timer (&np->timer); np->timer.expires = jiffies + 1*HZ; np->timer.data = (unsigned long) dev; np->timer.function = rio_timer; add_timer (&np->timer); /* Start Tx/Rx */ writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl); macctrl = 0; macctrl |= (np->vlan) ? AutoVLANuntagging : 0; macctrl |= (np->full_duplex) ? DuplexSelect : 0; macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; writew(macctrl, ioaddr + MACCtrl); netif_start_queue (dev); /* Enable default interrupts */ EnableInt (); return 0; } static void rio_timer (unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netdev_private *np = netdev_priv(dev); unsigned int entry; int next_tick = 1*HZ; unsigned long flags; spin_lock_irqsave(&np->rx_lock, flags); /* Recover rx ring exhausted error */ if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { printk(KERN_INFO "Try to recover rx ring exhausted...\n"); /* Re-allocate skbuffs to fill the descriptor ring */ for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { struct sk_buff *skb; entry = np->old_rx % RX_RING_SIZE; /* Dropped packets don't need to re-allocate */ if (np->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); if (skb == NULL) { np->rx_ring[entry].fraginfo = 0; printk (KERN_INFO "%s: Still unable to re-allocate Rx skbuff.#%d\n", dev->name, entry); break; } np->rx_skbuff[entry] = skb; np->rx_ring[entry].fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE)); } np->rx_ring[entry].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); np->rx_ring[entry].status = 0; } /* end for */ } /* end if */ spin_unlock_irqrestore (&np->rx_lock, flags); np->timer.expires = jiffies + next_tick; add_timer(&np->timer); } static void rio_tx_timeout (struct net_device *dev) { long ioaddr = dev->base_addr; printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", dev->name, readl (ioaddr + TxStatus)); rio_free_tx(dev, 0); dev->if_port = 0; dev->trans_start = jiffies; /* prevent tx timeout */ } /* allocate and initialize Tx and Rx descriptors */ static void alloc_list (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; np->cur_rx = np->cur_tx = 0; np->old_rx = np->old_tx = 0; np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].status = cpu_to_le64 (TFDDone); np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + ((i+1)%TX_RING_SIZE) * sizeof (struct netdev_desc)); } /* Initialize Rx descriptors */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + ((i + 1) % RX_RING_SIZE) * sizeof (struct netdev_desc)); np->rx_ring[i].status = 0; np->rx_ring[i].fraginfo = 0; np->rx_skbuff[i] = NULL; } /* Allocate the rx buffers */ for (i = 0; i < RX_RING_SIZE; i++) { /* Allocated fixed size of skbuff */ struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; if (skb == NULL) { printk (KERN_ERR "%s: alloc_list: allocate Rx buffer error! ", dev->name); break; } /* Rubicon now supports 40 bits of addressing space. */ np->rx_ring[i].fraginfo = cpu_to_le64 ( pci_map_single ( np->pdev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE)); np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); } /* Set RFDListPtr */ writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); writel (0, dev->base_addr + RFDListPtr1); } static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); struct netdev_desc *txdesc; unsigned entry; u32 ioaddr; u64 tfc_vlan_tag = 0; if (np->link_status == 0) { /* Link Down */ dev_kfree_skb(skb); return NETDEV_TX_OK; } ioaddr = dev->base_addr; entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; txdesc = &np->tx_ring[entry]; #if 0 if (skb->ip_summed == CHECKSUM_PARTIAL) { txdesc->status |= cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | IPChecksumEnable); } #endif if (np->vlan) { tfc_vlan_tag = VLANTagInsert | ((u64)np->vlan << 32) | ((u64)skb->priority << 45); } txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode * Work around: Always use 1 descriptor in 10Mbps mode */ if (entry % np->tx_coalesce == 0 || np->speed == 10) txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | WordAlignDisable | TxDMAIndicate | (1 << FragCountShift)); else txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | WordAlignDisable | (1 << FragCountShift)); /* TxDMAPollNow */ writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl); /* Schedule ISR */ writel(10000, ioaddr + CountDown); np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE < TX_QUEUE_LEN - 1 && np->speed != 10) { /* do nothing */ } else if (!netif_queue_stopped(dev)) { netif_stop_queue (dev); } /* The first TFDListPtr */ if (readl (dev->base_addr + TFDListPtr0) == 0) { writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc), dev->base_addr + TFDListPtr0); writel (0, dev->base_addr + TFDListPtr1); } return NETDEV_TX_OK; } static irqreturn_t rio_interrupt (int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct netdev_private *np; unsigned int_status; long ioaddr; int cnt = max_intrloop; int handled = 0; ioaddr = dev->base_addr; np = netdev_priv(dev); while (1) { int_status = readw (ioaddr + IntStatus); writew (int_status, ioaddr + IntStatus); int_status &= DEFAULT_INTR; if (int_status == 0 || --cnt < 0) break; handled = 1; /* Processing received packets */ if (int_status & RxDMAComplete) receive_packet (dev); /* TxDMAComplete interrupt */ if ((int_status & (TxDMAComplete|IntRequested))) { int tx_status; tx_status = readl (ioaddr + TxStatus); if (tx_status & 0x01) tx_error (dev, tx_status); /* Free used tx skbuffs */ rio_free_tx (dev, 1); } /* Handle uncommon events */ if (int_status & (HostError | LinkEvent | UpdateStats)) rio_error (dev, int_status); } if (np->cur_tx != np->old_tx) writel (100, ioaddr + CountDown); return IRQ_RETVAL(handled); } static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) { return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); } static void rio_free_tx (struct net_device *dev, int irq) { struct netdev_private *np = netdev_priv(dev); int entry = np->old_tx % TX_RING_SIZE; int tx_use = 0; unsigned long flag = 0; if (irq) spin_lock(&np->tx_lock); else spin_lock_irqsave(&np->tx_lock, flag); /* Free used tx skbuffs */ while (entry != np->cur_tx) { struct sk_buff *skb; if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) break; skb = np->tx_skbuff[entry]; pci_unmap_single (np->pdev, desc_to_dma(&np->tx_ring[entry]), skb->len, PCI_DMA_TODEVICE); if (irq) dev_kfree_skb_irq (skb); else dev_kfree_skb (skb); np->tx_skbuff[entry] = NULL; entry = (entry + 1) % TX_RING_SIZE; tx_use++; } if (irq) spin_unlock(&np->tx_lock); else spin_unlock_irqrestore(&np->tx_lock, flag); np->old_tx = entry; /* If the ring is no longer full, clear tx_full and call netif_wake_queue() */ if (netif_queue_stopped(dev) && ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE < TX_QUEUE_LEN - 1 || np->speed == 10)) { netif_wake_queue (dev); } } static void tx_error (struct net_device *dev, int tx_status) { struct netdev_private *np; long ioaddr = dev->base_addr; int frame_id; int i; np = netdev_priv(dev); frame_id = (tx_status & 0xffff0000); printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", dev->name, tx_status, frame_id); np->stats.tx_errors++; /* Ttransmit Underrun */ if (tx_status & 0x10) { np->stats.tx_fifo_errors++; writew (readw (ioaddr + TxStartThresh) + 0x10, ioaddr + TxStartThresh); /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ writew (TxReset | DMAReset | FIFOReset | NetworkReset, ioaddr + ASICCtrl + 2); /* Wait for ResetBusy bit clear */ for (i = 50; i > 0; i--) { if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) break; mdelay (1); } rio_free_tx (dev, 1); /* Reset TFDListPtr */ writel (np->tx_ring_dma + np->old_tx * sizeof (struct netdev_desc), dev->base_addr + TFDListPtr0); writel (0, dev->base_addr + TFDListPtr1); /* Let TxStartThresh stay default value */ } /* Late Collision */ if (tx_status & 0x04) { np->stats.tx_fifo_errors++; /* TxReset and clear FIFO */ writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2); /* Wait reset done */ for (i = 50; i > 0; i--) { if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) break; mdelay (1); } /* Let TxStartThresh stay default value */ } /* Maximum Collisions */ #ifdef ETHER_STATS if (tx_status & 0x08) np->stats.collisions16++; #else if (tx_status & 0x08) np->stats.collisions++; #endif /* Restart the Tx */ writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl); } static int receive_packet (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int entry = np->cur_rx % RX_RING_SIZE; int cnt = 30; /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ while (1) { struct netdev_desc *desc = &np->rx_ring[entry]; int pkt_len; u64 frame_status; if (!(desc->status & cpu_to_le64(RFDDone)) || !(desc->status & cpu_to_le64(FrameStart)) || !(desc->status & cpu_to_le64(FrameEnd))) break; /* Chip omits the CRC. */ frame_status = le64_to_cpu(desc->status); pkt_len = frame_status & 0xffff; if (--cnt < 0) break; /* Update rx error statistics, drop packet. */ if (frame_status & RFS_Errors) { np->stats.rx_errors++; if (frame_status & (RxRuntFrame | RxLengthError)) np->stats.rx_length_errors++; if (frame_status & RxFCSError) np->stats.rx_crc_errors++; if (frame_status & RxAlignmentError && np->speed != 1000) np->stats.rx_frame_errors++; if (frame_status & RxFIFOOverrun) np->stats.rx_fifo_errors++; } else { struct sk_buff *skb; /* Small skbuffs for short packets */ if (pkt_len > copy_thresh) { pci_unmap_single (np->pdev, desc_to_dma(desc), np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_put (skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { pci_dma_sync_single_for_cpu(np->pdev, desc_to_dma(desc), np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_copy_to_linear_data (skb, np->rx_skbuff[entry]->data, pkt_len); skb_put (skb, pkt_len); pci_dma_sync_single_for_device(np->pdev, desc_to_dma(desc), np->rx_buf_sz, PCI_DMA_FROMDEVICE); } skb->protocol = eth_type_trans (skb, dev); #if 0 /* Checksum done by hw, but csum value unavailable. */ if (np->pdev->pci_rev_id >= 0x0c && !(frame_status & (TCPError | UDPError | IPError))) { skb->ip_summed = CHECKSUM_UNNECESSARY; } #endif netif_rx (skb); } entry = (entry + 1) % RX_RING_SIZE; } spin_lock(&np->rx_lock); np->cur_rx = entry; /* Re-allocate skbuffs to fill the descriptor ring */ entry = np->old_rx; while (entry != np->cur_rx) { struct sk_buff *skb; /* Dropped packets don't need to re-allocate */ if (np->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); if (skb == NULL) { np->rx_ring[entry].fraginfo = 0; printk (KERN_INFO "%s: receive_packet: " "Unable to re-allocate Rx skbuff.#%d\n", dev->name, entry); break; } np->rx_skbuff[entry] = skb; np->rx_ring[entry].fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE)); } np->rx_ring[entry].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); np->rx_ring[entry].status = 0; entry = (entry + 1) % RX_RING_SIZE; } np->old_rx = entry; spin_unlock(&np->rx_lock); return 0; } static void rio_error (struct net_device *dev, int int_status) { long ioaddr = dev->base_addr; struct netdev_private *np = netdev_priv(dev); u16 macctrl; /* Link change event */ if (int_status & LinkEvent) { if (mii_wait_link (dev, 10) == 0) { printk (KERN_INFO "%s: Link up\n", dev->name); if (np->phy_media) mii_get_media_pcs (dev); else mii_get_media (dev); if (np->speed == 1000) np->tx_coalesce = tx_coalesce; else np->tx_coalesce = 1; macctrl = 0; macctrl |= (np->vlan) ? AutoVLANuntagging : 0; macctrl |= (np->full_duplex) ? DuplexSelect : 0; macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; writew(macctrl, ioaddr + MACCtrl); np->link_status = 1; netif_carrier_on(dev); } else { printk (KERN_INFO "%s: Link off\n", dev->name); np->link_status = 0; netif_carrier_off(dev); } } /* UpdateStats statistics registers */ if (int_status & UpdateStats) { get_stats (dev); } /* PCI Error, a catastronphic error related to the bus interface occurs, set GlobalReset and HostReset to reset. */ if (int_status & HostError) { printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", dev->name, int_status); writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2); mdelay (500); } } static struct net_device_stats * get_stats (struct net_device *dev) { long ioaddr = dev->base_addr; struct netdev_private *np = netdev_priv(dev); #ifdef MEM_MAPPING int i; #endif unsigned int stat_reg; /* All statistics registers need to be acknowledged, else statistic overflow could cause problems */ np->stats.rx_packets += readl (ioaddr + FramesRcvOk); np->stats.tx_packets += readl (ioaddr + FramesXmtOk); np->stats.rx_bytes += readl (ioaddr + OctetRcvOk); np->stats.tx_bytes += readl (ioaddr + OctetXmtOk); np->stats.multicast = readl (ioaddr + McstFramesRcvdOk); np->stats.collisions += readl (ioaddr + SingleColFrames) + readl (ioaddr + MultiColFrames); /* detailed tx errors */ stat_reg = readw (ioaddr + FramesAbortXSColls); np->stats.tx_aborted_errors += stat_reg; np->stats.tx_errors += stat_reg; stat_reg = readw (ioaddr + CarrierSenseErrors); np->stats.tx_carrier_errors += stat_reg; np->stats.tx_errors += stat_reg; /* Clear all other statistic register. */ readl (ioaddr + McstOctetXmtOk); readw (ioaddr + BcstFramesXmtdOk); readl (ioaddr + McstFramesXmtdOk); readw (ioaddr + BcstFramesRcvdOk); readw (ioaddr + MacControlFramesRcvd); readw (ioaddr + FrameTooLongErrors); readw (ioaddr + InRangeLengthErrors); readw (ioaddr + FramesCheckSeqErrors); readw (ioaddr + FramesLostRxErrors); readl (ioaddr + McstOctetXmtOk); readl (ioaddr + BcstOctetXmtOk); readl (ioaddr + McstFramesXmtdOk); readl (ioaddr + FramesWDeferredXmt); readl (ioaddr + LateCollisions); readw (ioaddr + BcstFramesXmtdOk); readw (ioaddr + MacControlFramesXmtd); readw (ioaddr + FramesWEXDeferal); #ifdef MEM_MAPPING for (i = 0x100; i <= 0x150; i += 4) readl (ioaddr + i); #endif readw (ioaddr + TxJumboFrames); readw (ioaddr + RxJumboFrames); readw (ioaddr + TCPCheckSumErrors); readw (ioaddr + UDPCheckSumErrors); readw (ioaddr + IPCheckSumErrors); return &np->stats; } static int clear_stats (struct net_device *dev) { long ioaddr = dev->base_addr; #ifdef MEM_MAPPING int i; #endif /* All statistics registers need to be acknowledged, else statistic overflow could cause problems */ readl (ioaddr + FramesRcvOk); readl (ioaddr + FramesXmtOk); readl (ioaddr + OctetRcvOk); readl (ioaddr + OctetXmtOk); readl (ioaddr + McstFramesRcvdOk); readl (ioaddr + SingleColFrames); readl (ioaddr + MultiColFrames); readl (ioaddr + LateCollisions); /* detailed rx errors */ readw (ioaddr + FrameTooLongErrors); readw (ioaddr + InRangeLengthErrors); readw (ioaddr + FramesCheckSeqErrors); readw (ioaddr + FramesLostRxErrors); /* detailed tx errors */ readw (ioaddr + FramesAbortXSColls); readw (ioaddr + CarrierSenseErrors); /* Clear all other statistic register. */ readl (ioaddr + McstOctetXmtOk); readw (ioaddr + BcstFramesXmtdOk); readl (ioaddr + McstFramesXmtdOk); readw (ioaddr + BcstFramesRcvdOk); readw (ioaddr + MacControlFramesRcvd); readl (ioaddr + McstOctetXmtOk); readl (ioaddr + BcstOctetXmtOk); readl (ioaddr + McstFramesXmtdOk); readl (ioaddr + FramesWDeferredXmt); readw (ioaddr + BcstFramesXmtdOk); readw (ioaddr + MacControlFramesXmtd); readw (ioaddr + FramesWEXDeferal); #ifdef MEM_MAPPING for (i = 0x100; i <= 0x150; i += 4) readl (ioaddr + i); #endif readw (ioaddr + TxJumboFrames); readw (ioaddr + RxJumboFrames); readw (ioaddr + TCPCheckSumErrors); readw (ioaddr + UDPCheckSumErrors); readw (ioaddr + IPCheckSumErrors); return 0; } static int change_mtu (struct net_device *dev, int new_mtu) { struct netdev_private *np = netdev_priv(dev); int max = (np->jumbo) ? MAX_JUMBO : 1536; if ((new_mtu < 68) || (new_mtu > max)) { return -EINVAL; } dev->mtu = new_mtu; return 0; } static void set_multicast (struct net_device *dev) { long ioaddr = dev->base_addr; u32 hash_table[2]; u16 rx_mode = 0; struct netdev_private *np = netdev_priv(dev); hash_table[0] = hash_table[1] = 0; /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ hash_table[1] |= 0x02000000; if (dev->flags & IFF_PROMISC) { /* Receive all frames promiscuously. */ rx_mode = ReceiveAllFrames; } else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > multicast_filter_limit)) { /* Receive broadcast and multicast frames */ rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; } else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; /* Receive broadcast frames and multicast frames filtering by Hashtable */ rx_mode = ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; netdev_for_each_mc_addr(ha, dev) { int bit, index = 0; int crc = ether_crc_le(ETH_ALEN, ha->addr); /* The inverted high significant 6 bits of CRC are used as an index to hashtable */ for (bit = 0; bit < 6; bit++) if (crc & (1 << (31 - bit))) index |= (1 << bit); hash_table[index / 32] |= (1 << (index % 32)); } } else { rx_mode = ReceiveBroadcast | ReceiveUnicast; } if (np->vlan) { /* ReceiveVLANMatch field in ReceiveMode */ rx_mode |= ReceiveVLANMatch; } writel (hash_table[0], ioaddr + HashTable0); writel (hash_table[1], ioaddr + HashTable1); writew (rx_mode, ioaddr + ReceiveMode); } static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); strcpy(info->driver, "dl2k"); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, pci_name(np->pdev)); } static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct netdev_private *np = netdev_priv(dev); if (np->phy_media) { /* fiber device */ cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; cmd->port = PORT_FIBRE; cmd->transceiver = XCVR_INTERNAL; } else { /* copper device */ cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII; cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full| ADVERTISED_Autoneg | ADVERTISED_MII; cmd->port = PORT_MII; cmd->transceiver = XCVR_INTERNAL; } if ( np->link_status ) { ethtool_cmd_speed_set(cmd, np->speed); cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; } else { ethtool_cmd_speed_set(cmd, -1); cmd->duplex = -1; } if ( np->an_enable) cmd->autoneg = AUTONEG_ENABLE; else cmd->autoneg = AUTONEG_DISABLE; cmd->phy_address = np->phy_addr; return 0; } static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct netdev_private *np = netdev_priv(dev); netif_carrier_off(dev); if (cmd->autoneg == AUTONEG_ENABLE) { if (np->an_enable) return 0; else { np->an_enable = 1; mii_set_media(dev); return 0; } } else { np->an_enable = 0; if (np->speed == 1000) { ethtool_cmd_speed_set(cmd, SPEED_100); cmd->duplex = DUPLEX_FULL; printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); } switch (ethtool_cmd_speed(cmd)) { case SPEED_10: np->speed = 10; np->full_duplex = (cmd->duplex == DUPLEX_FULL); break; case SPEED_100: np->speed = 100; np->full_duplex = (cmd->duplex == DUPLEX_FULL); break; case SPEED_1000: /* not supported */ default: return -EINVAL; } mii_set_media(dev); } return 0; } static u32 rio_get_link(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return np->link_status; } static const struct ethtool_ops ethtool_ops = { .get_drvinfo = rio_get_drvinfo, .get_settings = rio_get_settings, .set_settings = rio_set_settings, .get_link = rio_get_link, }; static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) { int phy_addr; struct netdev_private *np = netdev_priv(dev); struct mii_ioctl_data *miidata = if_mii(rq); phy_addr = np->phy_addr; switch (cmd) { case SIOCGMIIPHY: miidata->phy_id = phy_addr; break; case SIOCGMIIREG: miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num); break; case SIOCSMIIREG: if (!capable(CAP_NET_ADMIN)) return -EPERM; mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in); break; default: return -EOPNOTSUPP; } return 0; } #define EEP_READ 0x0200 #define EEP_BUSY 0x8000 /* Read the EEPROM word */ /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ static int read_eeprom (long ioaddr, int eep_addr) { int i = 1000; outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl); while (i-- > 0) { if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) { return inw (ioaddr + EepromData); } } return 0; } enum phy_ctrl_bits { MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, MII_DUPLEX = 0x08, }; #define mii_delay() readb(ioaddr) static void mii_sendbit (struct net_device *dev, u32 data) { long ioaddr = dev->base_addr + PhyCtrl; data = (data) ? MII_DATA1 : 0; data |= MII_WRITE; data |= (readb (ioaddr) & 0xf8) | MII_WRITE; writeb (data, ioaddr); mii_delay (); writeb (data | MII_CLK, ioaddr); mii_delay (); } static int mii_getbit (struct net_device *dev) { long ioaddr = dev->base_addr + PhyCtrl; u8 data; data = (readb (ioaddr) & 0xf8) | MII_READ; writeb (data, ioaddr); mii_delay (); writeb (data | MII_CLK, ioaddr); mii_delay (); return ((readb (ioaddr) >> 1) & 1); } static void mii_send_bits (struct net_device *dev, u32 data, int len) { int i; for (i = len - 1; i >= 0; i--) { mii_sendbit (dev, data & (1 << i)); } } static int mii_read (struct net_device *dev, int phy_addr, int reg_num) { u32 cmd; int i; u32 retval = 0; /* Preamble */ mii_send_bits (dev, 0xffffffff, 32); /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ /* ST,OP = 0110'b for read operation */ cmd = (0x06 << 10 | phy_addr << 5 | reg_num); mii_send_bits (dev, cmd, 14); /* Turnaround */ if (mii_getbit (dev)) goto err_out; /* Read data */ for (i = 0; i < 16; i++) { retval |= mii_getbit (dev); retval <<= 1; } /* End cycle */ mii_getbit (dev); return (retval >> 1) & 0xffff; err_out: return 0; } static int mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) { u32 cmd; /* Preamble */ mii_send_bits (dev, 0xffffffff, 32); /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; mii_send_bits (dev, cmd, 32); /* End cycle */ mii_getbit (dev); return 0; } static int mii_wait_link (struct net_device *dev, int wait) { __u16 bmsr; int phy_addr; struct netdev_private *np; np = netdev_priv(dev); phy_addr = np->phy_addr; do { bmsr = mii_read (dev, phy_addr, MII_BMSR); if (bmsr & BMSR_LSTATUS) return 0; mdelay (1); } while (--wait > 0); return -1; } static int mii_get_media (struct net_device *dev) { __u16 negotiate; __u16 bmsr; __u16 mscr; __u16 mssr; int phy_addr; struct netdev_private *np; np = netdev_priv(dev); phy_addr = np->phy_addr; bmsr = mii_read (dev, phy_addr, MII_BMSR); if (np->an_enable) { if (!(bmsr & BMSR_ANEGCOMPLETE)) { /* Auto-Negotiation not completed */ return -1; } negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) & mii_read (dev, phy_addr, MII_LPA); mscr = mii_read (dev, phy_addr, MII_CTRL1000); mssr = mii_read (dev, phy_addr, MII_STAT1000); if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) { np->speed = 1000; np->full_duplex = 1; printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) { np->speed = 1000; np->full_duplex = 0; printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); } else if (negotiate & ADVERTISE_100FULL) { np->speed = 100; np->full_duplex = 1; printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); } else if (negotiate & ADVERTISE_100HALF) { np->speed = 100; np->full_duplex = 0; printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); } else if (negotiate & ADVERTISE_10FULL) { np->speed = 10; np->full_duplex = 1; printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); } else if (negotiate & ADVERTISE_10HALF) { np->speed = 10; np->full_duplex = 0; printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); } if (negotiate & ADVERTISE_PAUSE_CAP) { np->tx_flow &= 1; np->rx_flow &= 1; } else if (negotiate & ADVERTISE_PAUSE_ASYM) { np->tx_flow = 0; np->rx_flow &= 1; } /* else tx_flow, rx_flow = user select */ } else { __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR); switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) { case BMCR_SPEED1000: printk (KERN_INFO "Operating at 1000 Mbps, "); break; case BMCR_SPEED100: printk (KERN_INFO "Operating at 100 Mbps, "); break; case 0: printk (KERN_INFO "Operating at 10 Mbps, "); } if (bmcr & BMCR_FULLDPLX) { printk (KERN_CONT "Full duplex\n"); } else { printk (KERN_CONT "Half duplex\n"); } } if (np->tx_flow) printk(KERN_INFO "Enable Tx Flow Control\n"); else printk(KERN_INFO "Disable Tx Flow Control\n"); if (np->rx_flow) printk(KERN_INFO "Enable Rx Flow Control\n"); else printk(KERN_INFO "Disable Rx Flow Control\n"); return 0; } static int mii_set_media (struct net_device *dev) { __u16 pscr; __u16 bmcr; __u16 bmsr; __u16 anar; int phy_addr; struct netdev_private *np; np = netdev_priv(dev); phy_addr = np->phy_addr; /* Does user set speed? */ if (np->an_enable) { /* Advertise capabilities */ bmsr = mii_read (dev, phy_addr, MII_BMSR); anar = mii_read (dev, phy_addr, MII_ADVERTISE) & ~(ADVERTISE_100FULL | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_10HALF | ADVERTISE_100BASE4); if (bmsr & BMSR_100FULL) anar |= ADVERTISE_100FULL; if (bmsr & BMSR_100HALF) anar |= ADVERTISE_100HALF; if (bmsr & BMSR_100BASE4) anar |= ADVERTISE_100BASE4; if (bmsr & BMSR_10FULL) anar |= ADVERTISE_10FULL; if (bmsr & BMSR_10HALF) anar |= ADVERTISE_10HALF; anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; mii_write (dev, phy_addr, MII_ADVERTISE, anar); /* Enable Auto crossover */ pscr = mii_read (dev, phy_addr, MII_PHY_SCR); pscr |= 3 << 5; /* 11'b */ mii_write (dev, phy_addr, MII_PHY_SCR, pscr); /* Soft reset PHY */ mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay(1); } else { /* Force speed setting */ /* 1) Disable Auto crossover */ pscr = mii_read (dev, phy_addr, MII_PHY_SCR); pscr &= ~(3 << 5); mii_write (dev, phy_addr, MII_PHY_SCR, pscr); /* 2) PHY Reset */ bmcr = mii_read (dev, phy_addr, MII_BMCR); bmcr |= BMCR_RESET; mii_write (dev, phy_addr, MII_BMCR, bmcr); /* 3) Power Down */ bmcr = 0x1940; /* must be 0x1940 */ mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay (100); /* wait a certain time */ /* 4) Advertise nothing */ mii_write (dev, phy_addr, MII_ADVERTISE, 0); /* 5) Set media and Power Up */ bmcr = BMCR_PDOWN; if (np->speed == 100) { bmcr |= BMCR_SPEED100; printk (KERN_INFO "Manual 100 Mbps, "); } else if (np->speed == 10) { printk (KERN_INFO "Manual 10 Mbps, "); } if (np->full_duplex) { bmcr |= BMCR_FULLDPLX; printk (KERN_CONT "Full duplex\n"); } else { printk (KERN_CONT "Half duplex\n"); } #if 0 /* Set 1000BaseT Master/Slave setting */ mscr = mii_read (dev, phy_addr, MII_CTRL1000); mscr |= MII_MSCR_CFG_ENABLE; mscr &= ~MII_MSCR_CFG_VALUE = 0; #endif mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay(10); } return 0; } static int mii_get_media_pcs (struct net_device *dev) { __u16 negotiate; __u16 bmsr; int phy_addr; struct netdev_private *np; np = netdev_priv(dev); phy_addr = np->phy_addr; bmsr = mii_read (dev, phy_addr, PCS_BMSR); if (np->an_enable) { if (!(bmsr & BMSR_ANEGCOMPLETE)) { /* Auto-Negotiation not completed */ return -1; } negotiate = mii_read (dev, phy_addr, PCS_ANAR) & mii_read (dev, phy_addr, PCS_ANLPAR); np->speed = 1000; if (negotiate & PCS_ANAR_FULL_DUPLEX) { printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); np->full_duplex = 1; } else { printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); np->full_duplex = 0; } if (negotiate & PCS_ANAR_PAUSE) { np->tx_flow &= 1; np->rx_flow &= 1; } else if (negotiate & PCS_ANAR_ASYMMETRIC) { np->tx_flow = 0; np->rx_flow &= 1; } /* else tx_flow, rx_flow = user select */ } else { __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); printk (KERN_INFO "Operating at 1000 Mbps, "); if (bmcr & BMCR_FULLDPLX) { printk (KERN_CONT "Full duplex\n"); } else { printk (KERN_CONT "Half duplex\n"); } } if (np->tx_flow) printk(KERN_INFO "Enable Tx Flow Control\n"); else printk(KERN_INFO "Disable Tx Flow Control\n"); if (np->rx_flow) printk(KERN_INFO "Enable Rx Flow Control\n"); else printk(KERN_INFO "Disable Rx Flow Control\n"); return 0; } static int mii_set_media_pcs (struct net_device *dev) { __u16 bmcr; __u16 esr; __u16 anar; int phy_addr; struct netdev_private *np; np = netdev_priv(dev); phy_addr = np->phy_addr; /* Auto-Negotiation? */ if (np->an_enable) { /* Advertise capabilities */ esr = mii_read (dev, phy_addr, PCS_ESR); anar = mii_read (dev, phy_addr, MII_ADVERTISE) & ~PCS_ANAR_HALF_DUPLEX & ~PCS_ANAR_FULL_DUPLEX; if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) anar |= PCS_ANAR_HALF_DUPLEX; if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) anar |= PCS_ANAR_FULL_DUPLEX; anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC; mii_write (dev, phy_addr, MII_ADVERTISE, anar); /* Soft reset PHY */ mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay(1); } else { /* Force speed setting */ /* PHY Reset */ bmcr = BMCR_RESET; mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay(10); if (np->full_duplex) { bmcr = BMCR_FULLDPLX; printk (KERN_INFO "Manual full duplex\n"); } else { bmcr = 0; printk (KERN_INFO "Manual half duplex\n"); } mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay(10); /* Advertise nothing */ mii_write (dev, phy_addr, MII_ADVERTISE, 0); } return 0; } static int rio_close (struct net_device *dev) { long ioaddr = dev->base_addr; struct netdev_private *np = netdev_priv(dev); struct sk_buff *skb; int i; netif_stop_queue (dev); /* Disable interrupts */ writew (0, ioaddr + IntEnable); /* Stop Tx and Rx logics */ writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl); free_irq (dev->irq, dev); del_timer_sync (&np->timer); /* Free all the skbuffs in the queue. */ for (i = 0; i < RX_RING_SIZE; i++) { skb = np->rx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]), skb->len, PCI_DMA_FROMDEVICE); dev_kfree_skb (skb); np->rx_skbuff[i] = NULL; } np->rx_ring[i].status = 0; np->rx_ring[i].fraginfo = 0; } for (i = 0; i < TX_RING_SIZE; i++) { skb = np->tx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb (skb); np->tx_skbuff[i] = NULL; } } return 0; } static void __devexit rio_remove1 (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); if (dev) { struct netdev_private *np = netdev_priv(dev); unregister_netdev (dev); pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); #ifdef MEM_MAPPING iounmap ((char *) (dev->base_addr)); #endif free_netdev (dev); pci_release_regions (pdev); pci_disable_device (pdev); } pci_set_drvdata (pdev, NULL); } static struct pci_driver rio_driver = { .name = "dl2k", .id_table = rio_pci_tbl, .probe = rio_probe1, .remove = __devexit_p(rio_remove1), }; static int __init rio_init (void) { return pci_register_driver(&rio_driver); } static void __exit rio_exit (void) { pci_unregister_driver (&rio_driver); } module_init (rio_init); module_exit (rio_exit); /* Compile command: gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c Read Documentation/networking/dl2k.txt for details. */
./CrossVul/dataset_final_sorted/CWE-264/c/good_3646_0
crossvul-cpp_data_good_2190_5
/* * linux/kernel/capability.c * * Copyright (C) 1997 Andrew Main <zefram@fysh.org> * * Integrated into 2.1.97+, Andrew G. Morgan <morgan@kernel.org> * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/audit.h> #include <linux/capability.h> #include <linux/mm.h> #include <linux/export.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/pid_namespace.h> #include <linux/user_namespace.h> #include <asm/uaccess.h> /* * Leveraged for setting/resetting capabilities */ const kernel_cap_t __cap_empty_set = CAP_EMPTY_SET; EXPORT_SYMBOL(__cap_empty_set); int file_caps_enabled = 1; static int __init file_caps_disable(char *str) { file_caps_enabled = 0; return 1; } __setup("no_file_caps", file_caps_disable); /* * More recent versions of libcap are available from: * * http://www.kernel.org/pub/linux/libs/security/linux-privs/ */ static void warn_legacy_capability_use(void) { char name[sizeof(current->comm)]; pr_info_once("warning: `%s' uses 32-bit capabilities (legacy support in use)\n", get_task_comm(name, current)); } /* * Version 2 capabilities worked fine, but the linux/capability.h file * that accompanied their introduction encouraged their use without * the necessary user-space source code changes. As such, we have * created a version 3 with equivalent functionality to version 2, but * with a header change to protect legacy source code from using * version 2 when it wanted to use version 1. If your system has code * that trips the following warning, it is using version 2 specific * capabilities and may be doing so insecurely. * * The remedy is to either upgrade your version of libcap (to 2.10+, * if the application is linked against it), or recompile your * application with modern kernel headers and this warning will go * away. */ static void warn_deprecated_v2(void) { char name[sizeof(current->comm)]; pr_info_once("warning: `%s' uses deprecated v2 capabilities in a way that may be insecure\n", get_task_comm(name, current)); } /* * Version check. Return the number of u32s in each capability flag * array, or a negative value on error. */ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy) { __u32 version; if (get_user(version, &header->version)) return -EFAULT; switch (version) { case _LINUX_CAPABILITY_VERSION_1: warn_legacy_capability_use(); *tocopy = _LINUX_CAPABILITY_U32S_1; break; case _LINUX_CAPABILITY_VERSION_2: warn_deprecated_v2(); /* * fall through - v3 is otherwise equivalent to v2. */ case _LINUX_CAPABILITY_VERSION_3: *tocopy = _LINUX_CAPABILITY_U32S_3; break; default: if (put_user((u32)_KERNEL_CAPABILITY_VERSION, &header->version)) return -EFAULT; return -EINVAL; } return 0; } /* * The only thing that can change the capabilities of the current * process is the current process. As such, we can't be in this code * at the same time as we are in the process of setting capabilities * in this process. The net result is that we can limit our use of * locks to when we are reading the caps of another process. */ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, kernel_cap_t *pIp, kernel_cap_t *pPp) { int ret; if (pid && (pid != task_pid_vnr(current))) { struct task_struct *target; rcu_read_lock(); target = find_task_by_vpid(pid); if (!target) ret = -ESRCH; else ret = security_capget(target, pEp, pIp, pPp); rcu_read_unlock(); } else ret = security_capget(current, pEp, pIp, pPp); return ret; } /** * sys_capget - get the capabilities of a given process. * @header: pointer to struct that contains capability version and * target pid data * @dataptr: pointer to struct that contains the effective, permitted, * and inheritable capabilities that are returned * * Returns 0 on success and < 0 on error. */ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr) { int ret = 0; pid_t pid; unsigned tocopy; kernel_cap_t pE, pI, pP; ret = cap_validate_magic(header, &tocopy); if ((dataptr == NULL) || (ret != 0)) return ((dataptr == NULL) && (ret == -EINVAL)) ? 0 : ret; if (get_user(pid, &header->pid)) return -EFAULT; if (pid < 0) return -EINVAL; ret = cap_get_target_pid(pid, &pE, &pI, &pP); if (!ret) { struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; unsigned i; for (i = 0; i < tocopy; i++) { kdata[i].effective = pE.cap[i]; kdata[i].permitted = pP.cap[i]; kdata[i].inheritable = pI.cap[i]; } /* * Note, in the case, tocopy < _KERNEL_CAPABILITY_U32S, * we silently drop the upper capabilities here. This * has the effect of making older libcap * implementations implicitly drop upper capability * bits when they perform a: capget/modify/capset * sequence. * * This behavior is considered fail-safe * behavior. Upgrading the application to a newer * version of libcap will enable access to the newer * capabilities. * * An alternative would be to return an error here * (-ERANGE), but that causes legacy applications to * unexpectedly fail; the capget/modify/capset aborts * before modification is attempted and the application * fails. */ if (copy_to_user(dataptr, kdata, tocopy * sizeof(struct __user_cap_data_struct))) { return -EFAULT; } } return ret; } /** * sys_capset - set capabilities for a process or (*) a group of processes * @header: pointer to struct that contains capability version and * target pid data * @data: pointer to struct that contains the effective, permitted, * and inheritable capabilities * * Set capabilities for the current process only. The ability to any other * process(es) has been deprecated and removed. * * The restrictions on setting capabilities are specified as: * * I: any raised capabilities must be a subset of the old permitted * P: any raised capabilities must be a subset of the old permitted * E: must be set to a subset of new permitted * * Returns 0 on success and < 0 on error. */ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) { struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; unsigned i, tocopy, copybytes; kernel_cap_t inheritable, permitted, effective; struct cred *new; int ret; pid_t pid; ret = cap_validate_magic(header, &tocopy); if (ret != 0) return ret; if (get_user(pid, &header->pid)) return -EFAULT; /* may only affect current now */ if (pid != 0 && pid != task_pid_vnr(current)) return -EPERM; copybytes = tocopy * sizeof(struct __user_cap_data_struct); if (copybytes > sizeof(kdata)) return -EFAULT; if (copy_from_user(&kdata, data, copybytes)) return -EFAULT; for (i = 0; i < tocopy; i++) { effective.cap[i] = kdata[i].effective; permitted.cap[i] = kdata[i].permitted; inheritable.cap[i] = kdata[i].inheritable; } while (i < _KERNEL_CAPABILITY_U32S) { effective.cap[i] = 0; permitted.cap[i] = 0; inheritable.cap[i] = 0; i++; } new = prepare_creds(); if (!new) return -ENOMEM; ret = security_capset(new, current_cred(), &effective, &inheritable, &permitted); if (ret < 0) goto error; audit_log_capset(new, current_cred()); return commit_creds(new); error: abort_creds(new); return ret; } /** * has_ns_capability - Does a task have a capability in a specific user ns * @t: The task in question * @ns: target user namespace * @cap: The capability to be tested for * * Return true if the specified task has the given superior capability * currently in effect to the specified user namespace, false if not. * * Note that this does not set PF_SUPERPRIV on the task. */ bool has_ns_capability(struct task_struct *t, struct user_namespace *ns, int cap) { int ret; rcu_read_lock(); ret = security_capable(__task_cred(t), ns, cap); rcu_read_unlock(); return (ret == 0); } /** * has_capability - Does a task have a capability in init_user_ns * @t: The task in question * @cap: The capability to be tested for * * Return true if the specified task has the given superior capability * currently in effect to the initial user namespace, false if not. * * Note that this does not set PF_SUPERPRIV on the task. */ bool has_capability(struct task_struct *t, int cap) { return has_ns_capability(t, &init_user_ns, cap); } /** * has_ns_capability_noaudit - Does a task have a capability (unaudited) * in a specific user ns. * @t: The task in question * @ns: target user namespace * @cap: The capability to be tested for * * Return true if the specified task has the given superior capability * currently in effect to the specified user namespace, false if not. * Do not write an audit message for the check. * * Note that this does not set PF_SUPERPRIV on the task. */ bool has_ns_capability_noaudit(struct task_struct *t, struct user_namespace *ns, int cap) { int ret; rcu_read_lock(); ret = security_capable_noaudit(__task_cred(t), ns, cap); rcu_read_unlock(); return (ret == 0); } /** * has_capability_noaudit - Does a task have a capability (unaudited) in the * initial user ns * @t: The task in question * @cap: The capability to be tested for * * Return true if the specified task has the given superior capability * currently in effect to init_user_ns, false if not. Don't write an * audit message for the check. * * Note that this does not set PF_SUPERPRIV on the task. */ bool has_capability_noaudit(struct task_struct *t, int cap) { return has_ns_capability_noaudit(t, &init_user_ns, cap); } /** * ns_capable - Determine if the current task has a superior capability in effect * @ns: The usernamespace we want the capability in * @cap: The capability to be tested for * * Return true if the current task has the given superior capability currently * available for use, false if not. * * This sets PF_SUPERPRIV on the task if the capability is available on the * assumption that it's about to be used. */ bool ns_capable(struct user_namespace *ns, int cap) { if (unlikely(!cap_valid(cap))) { pr_crit("capable() called with invalid cap=%u\n", cap); BUG(); } if (security_capable(current_cred(), ns, cap) == 0) { current->flags |= PF_SUPERPRIV; return true; } return false; } EXPORT_SYMBOL(ns_capable); /** * file_ns_capable - Determine if the file's opener had a capability in effect * @file: The file we want to check * @ns: The usernamespace we want the capability in * @cap: The capability to be tested for * * Return true if task that opened the file had a capability in effect * when the file was opened. * * This does not set PF_SUPERPRIV because the caller may not * actually be privileged. */ bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap) { if (WARN_ON_ONCE(!cap_valid(cap))) return false; if (security_capable(file->f_cred, ns, cap) == 0) return true; return false; } EXPORT_SYMBOL(file_ns_capable); /** * capable - Determine if the current task has a superior capability in effect * @cap: The capability to be tested for * * Return true if the current task has the given superior capability currently * available for use, false if not. * * This sets PF_SUPERPRIV on the task if the capability is available on the * assumption that it's about to be used. */ bool capable(int cap) { return ns_capable(&init_user_ns, cap); } EXPORT_SYMBOL(capable); /** * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped * @inode: The inode in question * @cap: The capability in question * * Return true if the current task has the given capability targeted at * its own user namespace and that the given inode's uid and gid are * mapped into the current user namespace. */ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap) { struct user_namespace *ns = current_user_ns(); return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) && kgid_has_mapping(ns, inode->i_gid); } EXPORT_SYMBOL(capable_wrt_inode_uidgid);
./CrossVul/dataset_final_sorted/CWE-264/c/good_2190_5