sha
stringlengths
40
40
remote_url
stringclasses
3 values
labels
class label
2 classes
commit_msg
stringlengths
16
10.3k
function
stringlengths
30
17.6k
846feac2ae1d1dab08c0048807ce802a256179fd
https://github.com/qemu/qemu
1not_vulnerable
hw/m68k/q800: fix PROM checksum and MAC address storage The checksum used by MacOS to validate the PROM content is an exclusive-OR rather than a sum over the corresponding bytes. In addition the MAC address must be stored in bit-reversed format as indicated in comments in Linux's macsonic.c. With the PROM contents fixed MacOS starts to probe the device registers when AppleTalk is enabled in the Control Panel. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Tested-by: Finn Thain <fthain@linux-m68k.org> Message-Id: <20210625065401.30170-8-mark.cave-ayland@ilande.co.uk> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
static void q800_init(MachineState *machine) { M68kCPU *cpu = NULL; int linux_boot; int32_t kernel_size; uint64_t elf_entry; char *filename; int bios_size; ram_addr_t initrd_base; int32_t initrd_size; MemoryRegion *rom; MemoryRegion *io; MemoryRegion *dp8393x_prom = g_new(MemoryRegion, 1); uint8_t *prom; const int io_slice_nb = (IO_SIZE / IO_SLICE) - 1; int i, checksum; ram_addr_t ram_size = machine->ram_size; const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *bios_name = machine->firmware ?: MACROM_FILENAME; hwaddr parameters_base; CPUState *cs; DeviceState *dev; DeviceState *via_dev; DeviceState *escc_orgate; SysBusESPState *sysbus_esp; ESPState *esp; SysBusDevice *sysbus; BusState *adb_bus; NubusBus *nubus; DeviceState *glue; DriveInfo *dinfo; linux_boot = (kernel_filename != NULL); if (ram_size > 1 * GiB) { error_report("Too much memory for this machine: %" PRId64 " MiB, " "maximum 1024 MiB", ram_size / MiB); exit(1); } /* init CPUs */ cpu = M68K_CPU(cpu_create(machine->cpu_type)); qemu_register_reset(main_cpu_reset, cpu); /* RAM */ memory_region_add_subregion(get_system_memory(), 0, machine->ram); /* * Memory from IO_BASE to IO_BASE + IO_SLICE is repeated * from IO_BASE + IO_SLICE to IO_BASE + IO_SIZE */ io = g_new(MemoryRegion, io_slice_nb); for (i = 0; i < io_slice_nb; i++) { char *name = g_strdup_printf("mac_m68k.io[%d]", i + 1); memory_region_init_alias(&io[i], NULL, name, get_system_memory(), IO_BASE, IO_SLICE); memory_region_add_subregion(get_system_memory(), IO_BASE + (i + 1) * IO_SLICE, &io[i]); g_free(name); } /* IRQ Glue */ glue = qdev_new(TYPE_GLUE); object_property_set_link(OBJECT(glue), "cpu", OBJECT(cpu), &error_abort); sysbus_realize_and_unref(SYS_BUS_DEVICE(glue), &error_fatal); /* VIA */ via_dev = qdev_new(TYPE_MAC_VIA); dinfo = drive_get(IF_MTD, 0, 0); if (dinfo) { qdev_prop_set_drive(via_dev, "drive", blk_by_legacy_dinfo(dinfo)); } sysbus = SYS_BUS_DEVICE(via_dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_mmio_map(sysbus, 0, VIA_BASE); qdev_connect_gpio_out_named(DEVICE(sysbus), "irq", 0, qdev_get_gpio_in(glue, 0)); qdev_connect_gpio_out_named(DEVICE(sysbus), "irq", 1, qdev_get_gpio_in(glue, 1)); adb_bus = qdev_get_child_bus(via_dev, "adb.0"); dev = qdev_new(TYPE_ADB_KEYBOARD); qdev_realize_and_unref(dev, adb_bus, &error_fatal); dev = qdev_new(TYPE_ADB_MOUSE); qdev_realize_and_unref(dev, adb_bus, &error_fatal); /* MACSONIC */ if (nb_nics > 1) { error_report("q800 can only have one ethernet interface"); exit(1); } qemu_check_nic_model(&nd_table[0], "dp83932"); /* * MacSonic driver needs an Apple MAC address * Valid prefix are: * 00:05:02 Apple * 00:80:19 Dayna Communications, Inc. * 00:A0:40 Apple * 08:00:07 Apple * (Q800 use the last one) */ nd_table[0].macaddr.a[0] = 0x08; nd_table[0].macaddr.a[1] = 0x00; nd_table[0].macaddr.a[2] = 0x07; dev = qdev_new("dp8393x"); qdev_set_nic_properties(dev, &nd_table[0]); qdev_prop_set_uint8(dev, "it_shift", 2); qdev_prop_set_bit(dev, "big_endian", true); object_property_set_link(OBJECT(dev), "dma_mr", OBJECT(get_system_memory()), &error_abort); sysbus = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_mmio_map(sysbus, 0, SONIC_BASE); sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, 2)); memory_region_init_rom(dp8393x_prom, NULL, "dp8393x-q800.prom", SONIC_PROM_SIZE, &error_fatal); memory_region_add_subregion(get_system_memory(), SONIC_PROM_BASE, dp8393x_prom); /* Add MAC address with valid checksum to PROM */ prom = memory_region_get_ram_ptr(dp8393x_prom); checksum = 0; for (i = 0; i < 6; i++) { prom[i] = bitrev8(nd_table[0].macaddr.a[i]); checksum ^= prom[i]; } prom[7] = 0xff - checksum; /* SCC */ dev = qdev_new(TYPE_ESCC); qdev_prop_set_uint32(dev, "disabled", 0); qdev_prop_set_uint32(dev, "frequency", MAC_CLOCK); qdev_prop_set_uint32(dev, "it_shift", 1); qdev_prop_set_bit(dev, "bit_swap", true); qdev_prop_set_chr(dev, "chrA", serial_hd(0)); qdev_prop_set_chr(dev, "chrB", serial_hd(1)); qdev_prop_set_uint32(dev, "chnBtype", 0); qdev_prop_set_uint32(dev, "chnAtype", 0); sysbus = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sysbus, &error_fatal); /* Logically OR both its IRQs together */ escc_orgate = DEVICE(object_new(TYPE_OR_IRQ)); object_property_set_int(OBJECT(escc_orgate), "num-lines", 2, &error_fatal); qdev_realize_and_unref(escc_orgate, NULL, &error_fatal); sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(escc_orgate, 0)); sysbus_connect_irq(sysbus, 1, qdev_get_gpio_in(escc_orgate, 1)); qdev_connect_gpio_out(DEVICE(escc_orgate), 0, qdev_get_gpio_in(glue, 3)); sysbus_mmio_map(sysbus, 0, SCC_BASE); /* SCSI */ dev = qdev_new(TYPE_SYSBUS_ESP); sysbus_esp = SYSBUS_ESP(dev); esp = &sysbus_esp->esp; esp->dma_memory_read = NULL; esp->dma_memory_write = NULL; esp->dma_opaque = NULL; sysbus_esp->it_shift = 4; esp->dma_enabled = 1; sysbus = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in_named(via_dev, "via2-irq", VIA2_IRQ_SCSI_BIT)); sysbus_connect_irq(sysbus, 1, qdev_get_gpio_in_named(via_dev, "via2-irq", VIA2_IRQ_SCSI_DATA_BIT)); sysbus_mmio_map(sysbus, 0, ESP_BASE); sysbus_mmio_map(sysbus, 1, ESP_PDMA); scsi_bus_legacy_handle_cmdline(&esp->bus); /* SWIM floppy controller */ dev = qdev_new(TYPE_SWIM); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, SWIM_BASE); /* NuBus */ dev = qdev_new(TYPE_MAC_NUBUS_BRIDGE); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, NUBUS_SUPER_SLOT_BASE); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, NUBUS_SLOT_BASE); nubus = MAC_NUBUS_BRIDGE(dev)->bus; /* framebuffer in nubus slot #9 */ dev = qdev_new(TYPE_NUBUS_MACFB); qdev_prop_set_uint32(dev, "width", graphic_width); qdev_prop_set_uint32(dev, "height", graphic_height); qdev_prop_set_uint8(dev, "depth", graphic_depth); qdev_realize_and_unref(dev, BUS(nubus), &error_fatal); cs = CPU(cpu); if (linux_boot) { uint64_t high; kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, NULL, &high, NULL, 1, EM_68K, 0, 0); if (kernel_size < 0) { error_report("could not load kernel '%s'", kernel_filename); exit(1); } stl_phys(cs->as, 4, elf_entry); /* reset initial PC */ parameters_base = (high + 1) & ~1; BOOTINFO1(cs->as, parameters_base, BI_MACHTYPE, MACH_MAC); BOOTINFO1(cs->as, parameters_base, BI_FPUTYPE, FPU_68040); BOOTINFO1(cs->as, parameters_base, BI_MMUTYPE, MMU_68040); BOOTINFO1(cs->as, parameters_base, BI_CPUTYPE, CPU_68040); BOOTINFO1(cs->as, parameters_base, BI_MAC_CPUID, CPUB_68040); BOOTINFO1(cs->as, parameters_base, BI_MAC_MODEL, MAC_MODEL_Q800); BOOTINFO1(cs->as, parameters_base, BI_MAC_MEMSIZE, ram_size >> 20); /* in MB */ BOOTINFO2(cs->as, parameters_base, BI_MEMCHUNK, 0, ram_size); BOOTINFO1(cs->as, parameters_base, BI_MAC_VADDR, VIDEO_BASE); BOOTINFO1(cs->as, parameters_base, BI_MAC_VDEPTH, graphic_depth); BOOTINFO1(cs->as, parameters_base, BI_MAC_VDIM, (graphic_height << 16) | graphic_width); BOOTINFO1(cs->as, parameters_base, BI_MAC_VROW, (graphic_width * graphic_depth + 7) / 8); BOOTINFO1(cs->as, parameters_base, BI_MAC_SCCBASE, SCC_BASE); rom = g_malloc(sizeof(*rom)); memory_region_init_ram_ptr(rom, NULL, "m68k_fake_mac.rom", sizeof(fake_mac_rom), fake_mac_rom); memory_region_set_readonly(rom, true); memory_region_add_subregion(get_system_memory(), MACROM_ADDR, rom); if (kernel_cmdline) { BOOTINFOSTR(cs->as, parameters_base, BI_COMMAND_LINE, kernel_cmdline); } /* load initrd */ if (initrd_filename) { initrd_size = get_image_size(initrd_filename); if (initrd_size < 0) { error_report("could not load initial ram disk '%s'", initrd_filename); exit(1); } initrd_base = (ram_size - initrd_size) & TARGET_PAGE_MASK; load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); BOOTINFO2(cs->as, parameters_base, BI_RAMDISK, initrd_base, initrd_size); } else { initrd_base = 0; initrd_size = 0; } BOOTINFO0(cs->as, parameters_base, BI_LAST); } else { uint8_t *ptr; /* allocate and load BIOS */ rom = g_malloc(sizeof(*rom)); memory_region_init_rom(rom, NULL, "m68k_mac.rom", MACROM_SIZE, &error_abort); filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); memory_region_add_subregion(get_system_memory(), MACROM_ADDR, rom); /* Load MacROM binary */ if (filename) { bios_size = load_image_targphys(filename, MACROM_ADDR, MACROM_SIZE); g_free(filename); } else { bios_size = -1; } /* Remove qtest_enabled() check once firmware files are in the tree */ if (!qtest_enabled()) { if (bios_size < 0 || bios_size > MACROM_SIZE) { error_report("could not load MacROM '%s'", bios_name); exit(1); } ptr = rom_ptr(MACROM_ADDR, MACROM_SIZE); stl_phys(cs->as, 0, ldl_p(ptr)); /* reset initial SP */ stl_phys(cs->as, 4, MACROM_ADDR + ldl_p(ptr + 4)); /* reset initial PC */ } } }
8660df5ea25ea4e6ee94fca43559165fe7610199
https://github.com/qemu/qemu
1not_vulnerable
g364fb: add VMStateDescription for G364SysBusState Currently when QEMU attempts to migrate the MIPS magnum machine it crashes due to a mistake in the g364fb VMStateDescription configuration which expects a G364SysBusState and not a G364State. Resolve the issue by adding a new VMStateDescription for G364SysBusState and embedding the existing vmstate_g364fb VMStateDescription inside it using VMSTATE_STRUCT. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Fixes: 97a3f6ffbba ("g364fb: convert to qdev") Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-Id: <20210625163554.14879-3-mark.cave-ayland@ilande.co.uk> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
static void g364fb_sysbus_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = g364fb_sysbus_realize; set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); dc->desc = "G364 framebuffer"; dc->reset = g364fb_sysbus_reset; dc->vmsd = &vmstate_g364fb_sysbus; device_class_set_props(dc, g364fb_sysbus_properties); }
92ecfab50ee2b30e60c774f96f05fc38714874f1
https://github.com/qemu/qemu
1not_vulnerable
target/mips: Fix gen_mxu_s32ldd_s32lddr There were two bugs here: (1) the required endianness was not present in the MemOp, and (2) we were not providing a zero-extended input to the bswap as semantics required. The best fix is to fold the bswap into the memory operation, producing the desired result directly. Acked-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx) { TCGv t0, t1; uint32_t XRa, Rb, s12, sel; t0 = tcg_temp_new(); t1 = tcg_temp_new(); XRa = extract32(ctx->opcode, 6, 4); s12 = extract32(ctx->opcode, 10, 10); sel = extract32(ctx->opcode, 20, 1); Rb = extract32(ctx->opcode, 21, 5); gen_load_gpr(t0, Rb); tcg_gen_movi_tl(t1, s12); tcg_gen_shli_tl(t1, t1, 2); if (s12 & 0x200) { tcg_gen_ori_tl(t1, t1, 0xFFFFF000); } tcg_gen_add_tl(t1, t0, t1); tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_TESL ^ (sel * MO_BSWAP)); gen_store_mxu_gpr(t1, XRa); tcg_temp_free(t0); tcg_temp_free(t1); }
06188c8981ca5d5386e22c82d5bd40e5f27c8492
https://github.com/qemu/qemu
1not_vulnerable
target/cris: Fix use_goto_tb Do not skip the page check for user-only -- mmap/mprotect can still change page mappings. Only check dc->base.pc_first, not dc->ppc -- the start page is the only one that's relevant. Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false) { TCGLabel *l1 = gen_new_label(); /* Conditional jmp. */ tcg_gen_mov_tl(env_pc, pc_false); tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1); tcg_gen_mov_tl(env_pc, pc_true); gen_set_label(l1); }
2d369d6e6e890a0204183e853604f8077329c4bc
https://github.com/qemu/qemu
1not_vulnerable
Prevent compiler warning on block.c Commit 3108a15cf (block: introduce bdrv_drop_filter()) introduced uninitialized variable to_cow_parent in bdrv_replace_node_common function that is used only when detach_subchain is true. It is used in two places. First if block properly initialize the variable and second block use it. However, compiler may treat these two blocks as two independent cases so it thinks first block can fail test and second one pass (although both use same condition). This cause warning that variable can be uninitialized in second block. The warning was observed with GCC 8.4.1 and 11.0.1. To prevent this warning, initialize the variable with NULL. Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com> Message-Id: <1162368493.17178530.1620201543649.JavaMail.zimbra@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static int bdrv_replace_node_common(BlockDriverState *from, BlockDriverState *to, bool auto_skip, bool detach_subchain, Error **errp) { Transaction *tran = tran_new(); g_autoptr(GHashTable) found = NULL; g_autoptr(GSList) refresh_list = NULL; BlockDriverState *to_cow_parent = NULL; int ret; if (detach_subchain) { assert(bdrv_chain_contains(from, to)); assert(from != to); for (to_cow_parent = from; bdrv_filter_or_cow_bs(to_cow_parent) != to; to_cow_parent = bdrv_filter_or_cow_bs(to_cow_parent)) { ; } } /* Make sure that @from doesn't go away until we have successfully attached * all of its parents to @to. */ bdrv_ref(from); assert(qemu_get_current_aio_context() == qemu_get_aio_context()); assert(bdrv_get_aio_context(from) == bdrv_get_aio_context(to)); bdrv_drained_begin(from); /* * Do the replacement without permission update. * Replacement may influence the permissions, we should calculate new * permissions based on new graph. If we fail, we'll roll-back the * replacement. */ ret = bdrv_replace_node_noperm(from, to, auto_skip, tran, errp); if (ret < 0) { goto out; } if (detach_subchain) { bdrv_remove_filter_or_cow_child(to_cow_parent, tran); } found = g_hash_table_new(NULL, NULL); refresh_list = bdrv_topological_dfs(refresh_list, found, to); refresh_list = bdrv_topological_dfs(refresh_list, found, from); ret = bdrv_list_refresh_perms(refresh_list, NULL, tran, errp); if (ret < 0) { goto out; } ret = 0; out: tran_finalize(tran, ret); bdrv_drained_end(from); bdrv_unref(from); return ret; }
2b02aabc9d02f9e95946cf639f546bb61f1721b7
https://github.com/qemu/qemu
1not_vulnerable
hw/nvme: fix missing check for PMR capability Qiang Liu reported that an access on an unknown address is triggered in memory_region_set_enabled because a check on CAP.PMRS is missing for the PMRCTL register write when no PMR is configured. Cc: qemu-stable@nongnu.org Fixes: 75c3c9de961d ("hw/block/nvme: disable PMR at boot up") Resolves: https://gitlab.com/qemu-project/qemu/-/issues/362 Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Keith Busch <kbusch@kernel.org>
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, unsigned size) { if (unlikely(offset & (sizeof(uint32_t) - 1))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32, "MMIO write not 32-bit aligned," " offset=0x%"PRIx64"", offset); /* should be ignored, fall through for now */ } if (unlikely(size < sizeof(uint32_t))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall, "MMIO write smaller than 32-bits," " offset=0x%"PRIx64", size=%u", offset, size); /* should be ignored, fall through for now */ } switch (offset) { case 0xc: /* INTMS */ if (unlikely(msix_enabled(&(n->parent_obj)))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, "undefined access to interrupt mask set" " when MSI-X is enabled"); /* should be ignored, fall through for now */ } n->bar.intms |= data & 0xffffffff; n->bar.intmc = n->bar.intms; trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc); nvme_irq_check(n); break; case 0x10: /* INTMC */ if (unlikely(msix_enabled(&(n->parent_obj)))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, "undefined access to interrupt mask clr" " when MSI-X is enabled"); /* should be ignored, fall through for now */ } n->bar.intms &= ~(data & 0xffffffff); n->bar.intmc = n->bar.intms; trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc); nvme_irq_check(n); break; case 0x14: /* CC */ trace_pci_nvme_mmio_cfg(data & 0xffffffff); /* Windows first sends data, then sends enable bit */ if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) && !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc)) { n->bar.cc = data; } if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) { n->bar.cc = data; if (unlikely(nvme_start_ctrl(n))) { trace_pci_nvme_err_startfail(); n->bar.csts = NVME_CSTS_FAILED; } else { trace_pci_nvme_mmio_start_success(); n->bar.csts = NVME_CSTS_READY; } } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) { trace_pci_nvme_mmio_stopped(); nvme_ctrl_reset(n); n->bar.csts &= ~NVME_CSTS_READY; } if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) { trace_pci_nvme_mmio_shutdown_set(); nvme_ctrl_shutdown(n); n->bar.cc = data; n->bar.csts |= NVME_CSTS_SHST_COMPLETE; } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) { trace_pci_nvme_mmio_shutdown_cleared(); n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE; n->bar.cc = data; } break; case 0x1c: /* CSTS */ if (data & (1 << 4)) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported, "attempted to W1C CSTS.NSSRO" " but CAP.NSSRS is zero (not supported)"); } else if (data != 0) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts, "attempted to set a read only bit" " of controller status"); } break; case 0x20: /* NSSR */ if (data == 0x4e564d65) { trace_pci_nvme_ub_mmiowr_ssreset_unsupported(); } else { /* The spec says that writes of other values have no effect */ return; } break; case 0x24: /* AQA */ n->bar.aqa = data & 0xffffffff; trace_pci_nvme_mmio_aqattr(data & 0xffffffff); break; case 0x28: /* ASQ */ n->bar.asq = size == 8 ? data : (n->bar.asq & ~0xffffffffULL) | (data & 0xffffffff); trace_pci_nvme_mmio_asqaddr(data); break; case 0x2c: /* ASQ hi */ n->bar.asq = (n->bar.asq & 0xffffffff) | (data << 32); trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq); break; case 0x30: /* ACQ */ trace_pci_nvme_mmio_acqaddr(data); n->bar.acq = size == 8 ? data : (n->bar.acq & ~0xffffffffULL) | (data & 0xffffffff); break; case 0x34: /* ACQ hi */ n->bar.acq = (n->bar.acq & 0xffffffff) | (data << 32); trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq); break; case 0x38: /* CMBLOC */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved, "invalid write to reserved CMBLOC" " when CMBSZ is zero, ignored"); return; case 0x3C: /* CMBSZ */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly, "invalid write to read only CMBSZ, ignored"); return; case 0x50: /* CMBMSC */ if (!NVME_CAP_CMBS(n->bar.cap)) { return; } n->bar.cmbmsc = size == 8 ? data : (n->bar.cmbmsc & ~0xffffffff) | (data & 0xffffffff); n->cmb.cmse = false; if (NVME_CMBMSC_CRE(data)) { nvme_cmb_enable_regs(n); if (NVME_CMBMSC_CMSE(data)) { hwaddr cba = NVME_CMBMSC_CBA(data) << CMBMSC_CBA_SHIFT; if (cba + int128_get64(n->cmb.mem.size) < cba) { NVME_CMBSTS_SET_CBAI(n->bar.cmbsts, 1); return; } n->cmb.cba = cba; n->cmb.cmse = true; } } else { n->bar.cmbsz = 0; n->bar.cmbloc = 0; } return; case 0x54: /* CMBMSC hi */ n->bar.cmbmsc = (n->bar.cmbmsc & 0xffffffff) | (data << 32); return; case 0xe00: /* PMRCAP */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly, "invalid write to PMRCAP register, ignored"); return; case 0xe04: /* PMRCTL */ if (!NVME_CAP_PMRS(n->bar.cap)) { return; } n->bar.pmrctl = data; if (NVME_PMRCTL_EN(data)) { memory_region_set_enabled(&n->pmr.dev->mr, true); n->bar.pmrsts = 0; } else { memory_region_set_enabled(&n->pmr.dev->mr, false); NVME_PMRSTS_SET_NRDY(n->bar.pmrsts, 1); n->pmr.cmse = false; } return; case 0xe08: /* PMRSTS */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly, "invalid write to PMRSTS register, ignored"); return; case 0xe0C: /* PMREBS */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly, "invalid write to PMREBS register, ignored"); return; case 0xe10: /* PMRSWTP */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly, "invalid write to PMRSWTP register, ignored"); return; case 0xe14: /* PMRMSCL */ if (!NVME_CAP_PMRS(n->bar.cap)) { return; } n->bar.pmrmsc = (n->bar.pmrmsc & ~0xffffffff) | (data & 0xffffffff); n->pmr.cmse = false; if (NVME_PMRMSC_CMSE(n->bar.pmrmsc)) { hwaddr cba = NVME_PMRMSC_CBA(n->bar.pmrmsc) << PMRMSC_CBA_SHIFT; if (cba + int128_get64(n->pmr.dev->mr.size) < cba) { NVME_PMRSTS_SET_CBAI(n->bar.pmrsts, 1); return; } n->pmr.cmse = true; n->pmr.cba = cba; } return; case 0xe18: /* PMRMSCU */ if (!NVME_CAP_PMRS(n->bar.cap)) { return; } n->bar.pmrmsc = (n->bar.pmrmsc & 0xffffffff) | (data << 32); return; default: NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid, "invalid MMIO write," " offset=0x%"PRIx64", data=%"PRIx64"", offset, data); break; } }
e76fb260ca8fc2420a4ce792324af0544628b331
https://github.com/qemu/qemu
1not_vulnerable
Partially revert "hw/block/nvme: drain namespaces on sq deletion" This partially reverts commit 98f84f5a4eca5c03e32fff20f246d9b4b96d6422. Since all "multi aio" commands are now reimplemented to properly track the nested aiocbs, we can revert the "hack" that was introduced to make sure all requests we're properly drained upon sq deletion. The revert is partial since we keep the assert that no outstanding requests remain on the submission queue after the explicit cancellation. Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Keith Busch <kbusch@kernel.org>
static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req) { NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; NvmeRequest *r, *next; NvmeSQueue *sq; NvmeCQueue *cq; uint16_t qid = le16_to_cpu(c->qid); if (unlikely(!qid || nvme_check_sqid(n, qid))) { trace_pci_nvme_err_invalid_del_sq(qid); return NVME_INVALID_QID | NVME_DNR; } trace_pci_nvme_del_sq(qid); sq = n->sq[qid]; while (!QTAILQ_EMPTY(&sq->out_req_list)) { r = QTAILQ_FIRST(&sq->out_req_list); assert(r->aiocb); blk_aio_cancel(r->aiocb); } assert(QTAILQ_EMPTY(&sq->out_req_list)); if (!nvme_check_cqid(n, sq->cqid)) { cq = n->cq[sq->cqid]; QTAILQ_REMOVE(&cq->sq_list, sq, entry); nvme_post_cqes(cq); QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) { if (r->sq == sq) { QTAILQ_REMOVE(&cq->req_list, r, entry); QTAILQ_INSERT_TAIL(&sq->req_list, r, entry); } } } nvme_free_sq(sq, n); return NVME_SUCCESS; }
421a30927140945c6aa957c2c0e7ad695984483d
https://github.com/qemu/qemu
1not_vulnerable
hw/nvme: fix lbaf formats initialization Currently LBAF formats are being intialized based on metadata size if and only if nvme-ns "ms" parameter is non-zero value. Since FormatNVM command being supported device parameter "ms" may not be the criteria to initialize the supported LBAFs. And make LBAF array as read-only. Signed-off-by: Gollu Appalanaidu <anaidu.gollu@samsung.com> Reviewed-by: Klaus Jensen <k.jensen@samsung.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static int nvme_ns_init(NvmeNamespace *ns, Error **errp) { NvmeIdNs *id_ns = &ns->id_ns; uint8_t ds; uint16_t ms; int i; ns->csi = NVME_CSI_NVM; ns->status = 0x0; ns->id_ns.dlfeat = 0x1; /* support DULBE and I/O optimization fields */ id_ns->nsfeat |= (0x4 | 0x10); if (ns->params.shared) { id_ns->nmic |= NVME_NMIC_NS_SHARED; } /* simple copy */ id_ns->mssrl = cpu_to_le16(ns->params.mssrl); id_ns->mcl = cpu_to_le32(ns->params.mcl); id_ns->msrc = ns->params.msrc; ds = 31 - clz32(ns->blkconf.logical_block_size); ms = ns->params.ms; id_ns->mc = NVME_ID_NS_MC_EXTENDED | NVME_ID_NS_MC_SEPARATE; if (ms && ns->params.mset) { id_ns->flbas |= NVME_ID_NS_FLBAS_EXTENDED; } id_ns->dpc = 0x1f; id_ns->dps = ns->params.pi; if (ns->params.pi && ns->params.pil) { id_ns->dps |= NVME_ID_NS_DPS_FIRST_EIGHT; } static const NvmeLBAF lbaf[16] = { [0] = { .ds = 9 }, [1] = { .ds = 9, .ms = 8 }, [2] = { .ds = 9, .ms = 16 }, [3] = { .ds = 9, .ms = 64 }, [4] = { .ds = 12 }, [5] = { .ds = 12, .ms = 8 }, [6] = { .ds = 12, .ms = 16 }, [7] = { .ds = 12, .ms = 64 }, }; memcpy(&id_ns->lbaf, &lbaf, sizeof(lbaf)); id_ns->nlbaf = 7; for (i = 0; i <= id_ns->nlbaf; i++) { NvmeLBAF *lbaf = &id_ns->lbaf[i]; if (lbaf->ds == ds) { if (lbaf->ms == ms) { id_ns->flbas |= i; goto lbaf_found; } } } /* add non-standard lba format */ id_ns->nlbaf++; id_ns->lbaf[id_ns->nlbaf].ds = ds; id_ns->lbaf[id_ns->nlbaf].ms = ms; id_ns->flbas |= id_ns->nlbaf; lbaf_found: nvme_ns_init_format(ns); return 0; }
3593b8e0a2146a885f93d71c754757bb2c03864e
https://github.com/qemu/qemu
1not_vulnerable
Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2021-06-24' into staging Block patch: - Fix Coverity complaint in block/snapshot.c # gpg: Signature made Thu 24 Jun 2021 12:42:28 BST # gpg: using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40 # gpg: issuer "mreitz@redhat.com" # gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full] # Primary key fingerprint: 91BE B60A 30DB 3E88 57D1 1829 F407 DB00 61D5 CF40 * remotes/maxreitz/tags/pull-block-2021-06-24: block/snapshot: Clarify goto fallback behavior Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
int bdrv_snapshot_goto(BlockDriverState *bs, const char *snapshot_id, Error **errp) { BlockDriver *drv = bs->drv; BdrvChild **fallback_ptr; int ret, open_ret; if (!drv) { error_setg(errp, "Block driver is closed"); return -ENOMEDIUM; } if (!QLIST_EMPTY(&bs->dirty_bitmaps)) { error_setg(errp, "Device has active dirty bitmaps"); return -EBUSY; } if (drv->bdrv_snapshot_goto) { ret = drv->bdrv_snapshot_goto(bs, snapshot_id); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to load snapshot"); } return ret; } fallback_ptr = bdrv_snapshot_fallback_ptr(bs); if (fallback_ptr) { QDict *options; QDict *file_options; Error *local_err = NULL; BlockDriverState *fallback_bs = (*fallback_ptr)->bs; char *subqdict_prefix = g_strdup_printf("%s.", (*fallback_ptr)->name); options = qdict_clone_shallow(bs->options); /* Prevent it from getting deleted when detached from bs */ bdrv_ref(fallback_bs); qdict_extract_subqdict(options, &file_options, subqdict_prefix); qobject_unref(file_options); g_free(subqdict_prefix); /* Force .bdrv_open() below to re-attach fallback_bs on *fallback_ptr */ qdict_put_str(options, (*fallback_ptr)->name, bdrv_get_node_name(fallback_bs)); /* Now close bs, apply the snapshot on fallback_bs, and re-open bs */ if (drv->bdrv_close) { drv->bdrv_close(bs); } /* .bdrv_open() will re-attach it */ bdrv_unref_child(bs, *fallback_ptr); *fallback_ptr = NULL; ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp); open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err); qobject_unref(options); if (open_ret < 0) { bdrv_unref(fallback_bs); bs->drv = NULL; /* A bdrv_snapshot_goto() error takes precedence */ error_propagate(errp, local_err); return ret < 0 ? ret : open_ret; } /* * fallback_ptr is &bs->file or &bs->backing. *fallback_ptr * was closed above and set to NULL, but the .bdrv_open() call * has opened it again, because we set the respective option * (with the qdict_put_str() call above). * Assert that .bdrv_open() has attached some child on * *fallback_ptr, and that it has attached the one we wanted * it to (i.e., fallback_bs). */ assert(*fallback_ptr && fallback_bs == (*fallback_ptr)->bs); bdrv_unref(fallback_bs); return ret; } error_setg(errp, "Block driver does not support snapshots"); return -ENOTSUP; }
0aebebb561c9c23b9c6d3d58040f83547f059b5c
https://github.com/qemu/qemu
1not_vulnerable
machine: reject -smp dies!=1 for non-PC machines Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20210617155308.928754-11-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) { unsigned cpus = config->has_cpus ? config->cpus : 0; unsigned sockets = config->has_sockets ? config->sockets : 0; unsigned cores = config->has_cores ? config->cores : 0; unsigned threads = config->has_threads ? config->threads : 0; if (config->has_dies && config->dies != 0 && config->dies != 1) { error_setg(errp, "dies not supported by this machine's CPU topology"); } /* compute missing values, prefer sockets over cores over threads */ if (cpus == 0 || sockets == 0) { cores = cores > 0 ? cores : 1; threads = threads > 0 ? threads : 1; if (cpus == 0) { sockets = sockets > 0 ? sockets : 1; cpus = cores * threads * sockets; } else { ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; sockets = ms->smp.max_cpus / (cores * threads); } } else if (cores == 0) { threads = threads > 0 ? threads : 1; cores = cpus / (sockets * threads); cores = cores > 0 ? cores : 1; } else if (threads == 0) { threads = cpus / (cores * sockets); threads = threads > 0 ? threads : 1; } else if (sockets * cores * threads < cpus) { error_setg(errp, "cpu topology: " "sockets (%u) * cores (%u) * threads (%u) < " "smp_cpus (%u)", sockets, cores, threads, cpus); return; } ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; if (ms->smp.max_cpus < cpus) { error_setg(errp, "maxcpus must be equal to or greater than smp"); return; } if (sockets * cores * threads != ms->smp.max_cpus) { error_setg(errp, "Invalid CPU topology: " "sockets (%u) * cores (%u) * threads (%u) " "!= maxcpus (%u)", sockets, cores, threads, ms->smp.max_cpus); return; } ms->smp.cpus = cpus; ms->smp.cores = cores; ms->smp.threads = threads; ms->smp.sockets = sockets; }
18473467d55a20d643b6c9b3a52de42f705b4d35
https://github.com/qemu/qemu
1not_vulnerable
file-posix: try BLKSECTGET on block devices too, do not round to power of 2 bs->sg is only true for character devices, but block devices can also be used with scsi-block and scsi-generic. Unfortunately BLKSECTGET returns bytes in an int for /dev/sgN devices, and sectors in a short for block devices, so account for that in the code. The maximum transfer also need not be a power of 2 (for example I have seen disks with 1280 KiB maximum transfer) so there's no need to pass the result through pow2floor. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void raw_reopen_abort(BDRVReopenState *state) { BDRVRawReopenState *rs = state->opaque; BDRVRawState *s = state->bs->opaque; /* nothing to do if NULL, we didn't get far enough */ if (rs == NULL) { return; } g_free(state->opaque); state->opaque = NULL; assert(s->reopen_state == state); s->reopen_state = NULL; }
8ad5ab6148dca8aad297c134c09c84b0b92d45ed
https://github.com/qemu/qemu
1not_vulnerable
file-posix: fix max_iov for /dev/sg devices Even though it was only called for devices that have bs->sg set (which must be character devices), sg_get_max_segments looked at /sys/dev/block which only works for block devices. On Linux the sg driver has its own way to provide the maximum number of iovecs in a scatter/gather list, so add support for it. The block device path is kept because it will be reinstated in the next patches. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com>
static int sg_get_max_segments(int fd) { #ifdef CONFIG_LINUX char buf[32]; const char *end; char *sysfspath = NULL; int ret; int sysfd = -1; long max_segments; struct stat st; if (fstat(fd, &st)) { ret = -errno; goto out; } if (S_ISCHR(st.st_mode)) { if (ioctl(fd, SG_GET_SG_TABLESIZE, &ret) == 0) { return ret; } return -ENOTSUP; } if (!S_ISBLK(st.st_mode)) { return -ENOTSUP; } sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/max_segments", major(st.st_rdev), minor(st.st_rdev)); sysfd = open(sysfspath, O_RDONLY); if (sysfd == -1) { ret = -errno; goto out; } do { ret = read(sysfd, buf, sizeof(buf) - 1); } while (ret == -1 && errno == EINTR); if (ret < 0) { ret = -errno; goto out; } else if (ret == 0) { ret = -EIO; goto out; } buf[ret] = 0; /* The file is ended with '\n', pass 'end' to accept that. */ ret = qemu_strtol(buf, &end, 10, &max_segments); if (ret == 0 && end && *end == '\n') { ret = max_segments; } out: if (sysfd != -1) { close(sysfd); } g_free(sysfspath); return ret; #else return -ENOTSUP; #endif }
05d9d0359e6da7dc8255712d745d079a04fa5ae5
https://github.com/qemu/qemu
1not_vulnerable
target/mips: Do not abort on invalid instruction On real hardware an invalid instruction doesn't halt the world, but usually triggers a RESERVED INSTRUCTION exception. TCG guest code shouldn't abort QEMU anyway. Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210617174323.2900831-2-f4bug@amsat.org>
static void gen_branch(DisasContext *ctx, int insn_bytes) { if (ctx->hflags & MIPS_HFLAG_BMASK) { int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK; /* Branches completion */ clear_branch_hflags(ctx); ctx->base.is_jmp = DISAS_NORETURN; /* FIXME: Need to clear can_do_io. */ switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) { case MIPS_HFLAG_FBNSLOT: gen_goto_tb(ctx, 0, ctx->base.pc_next + insn_bytes); break; case MIPS_HFLAG_B: /* unconditional branch */ if (proc_hflags & MIPS_HFLAG_BX) { tcg_gen_xori_i32(hflags, hflags, MIPS_HFLAG_M16); } gen_goto_tb(ctx, 0, ctx->btarget); break; case MIPS_HFLAG_BL: /* blikely taken case */ gen_goto_tb(ctx, 0, ctx->btarget); break; case MIPS_HFLAG_BC: /* Conditional branch */ { TCGLabel *l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1); gen_goto_tb(ctx, 1, ctx->base.pc_next + insn_bytes); gen_set_label(l1); gen_goto_tb(ctx, 0, ctx->btarget); } break; case MIPS_HFLAG_BR: /* unconditional branch to register */ if (ctx->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { TCGv t0 = tcg_temp_new(); TCGv_i32 t1 = tcg_temp_new_i32(); tcg_gen_andi_tl(t0, btarget, 0x1); tcg_gen_trunc_tl_i32(t1, t0); tcg_temp_free(t0); tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16); tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT); tcg_gen_or_i32(hflags, hflags, t1); tcg_temp_free_i32(t1); tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1); } else { tcg_gen_mov_tl(cpu_PC, btarget); } if (ctx->base.singlestep_enabled) { save_cpu_state(ctx, 0); gen_helper_raise_exception_debug(cpu_env); } tcg_gen_lookup_and_goto_ptr(); break; default: LOG_DISAS("unknown branch 0x%x\n", proc_hflags); gen_reserved_instruction(ctx); } } }
2838b1d6356044eb240edd4e1b9b5ab5946c5b28
https://github.com/qemu/qemu
1not_vulnerable
target/mips: Fix potential integer overflow (CID 1452921) Use the BIT_ULL() macro to ensure we use 64-bit arithmetic. This fixes the following Coverity issue (OVERFLOW_BEFORE_WIDEN): CID 1452921: Integer handling issues: Potentially overflowing expression "1 << w" with type "int" (32 bits, signed) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "uint64_t" (64 bits, unsigned). Fixes: 074cfcb4dae ("target/mips: Implement hardware page table walker") Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210505215119.1517465-1-f4bug@amsat.org>
static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, int directory_index, bool *huge_page, bool *hgpg_directory_hit, uint64_t *pw_entrylo0, uint64_t *pw_entrylo1) { int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1; int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F; int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F; int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; int directory_shift = (ptew > 1) ? -1 : (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; int leaf_shift = (ptew > 1) ? -1 : (ptew == 1) ? native_shift + 1 : native_shift; uint32_t direntry_size = 1 << (directory_shift + 3); uint32_t leafentry_size = 1 << (leaf_shift + 3); uint64_t entry; uint64_t paddr; int prot; uint64_t lsb = 0; uint64_t w = 0; if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD, cpu_mmu_index(env, false)) != TLBRET_MATCH) { /* wrong base address */ return 0; } if (!get_pte(env, *vaddr, direntry_size, &entry)) { return 0; } if ((entry & (1 << psn)) && hugepg) { *huge_page = true; *hgpg_directory_hit = true; entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); w = directory_index - 1; if (directory_index & 0x1) { /* Generate adjacent page from same PTE for odd TLB page */ lsb = BIT_ULL(w) >> 6; *pw_entrylo0 = entry & ~lsb; /* even page */ *pw_entrylo1 = entry | lsb; /* odd page */ } else if (dph) { int oddpagebit = 1 << leaf_shift; uint64_t vaddr2 = *vaddr ^ oddpagebit; if (*vaddr & oddpagebit) { *pw_entrylo1 = entry; } else { *pw_entrylo0 = entry; } if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD, cpu_mmu_index(env, false)) != TLBRET_MATCH) { return 0; } if (!get_pte(env, vaddr2, leafentry_size, &entry)) { return 0; } entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); if (*vaddr & oddpagebit) { *pw_entrylo0 = entry; } else { *pw_entrylo1 = entry; } } else { return 0; } return 1; } else { *vaddr = entry; return 2; } }
79a412891f0cb6bbffd8fd9e13608066234e56c1
https://github.com/qemu/qemu
1not_vulnerable
target/riscv: gdbstub: Fix dynamic CSR XML generation Since commit 605def6eeee5 ("target/riscv: Use the RISCVException enum for CSR operations") the CSR predicate() function was changed to return RISCV_EXCP_NONE instead of 0 for a valid CSR, but it forgot to update the dynamic CSR XML generation codes in gdbstub. Fixes: 605def6eeee5 ("target/riscv: Use the RISCVException enum for CSR operations") Reported-by: Xuzhou Cheng <xuzhou.cheng@windriver.com> Signed-off-by: Bin Meng <bin.meng@windriver.com> Tested-by: Xuzhou Cheng <xuzhou.cheng@windriver.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Message-id: 20210615085133.389887-1-bmeng.cn@gmail.com Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; GString *s = g_string_new(NULL); riscv_csr_predicate_fn predicate; int bitsize = riscv_cpu_is_32bit(env) ? 32 : 64; int i; g_string_printf(s, "<?xml version=\"1.0\"?>"); g_string_append_printf(s, "<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"); g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.csr\">"); for (i = 0; i < CSR_TABLE_SIZE; i++) { predicate = csr_ops[i].predicate; if (predicate && (predicate(env, i) == RISCV_EXCP_NONE)) { if (csr_ops[i].name) { g_string_append_printf(s, "<reg name=\"%s\"", csr_ops[i].name); } else { g_string_append_printf(s, "<reg name=\"csr%03x\"", i); } g_string_append_printf(s, " bitsize=\"%d\"", bitsize); g_string_append_printf(s, " regnum=\"%d\"/>", base_reg + i); } } g_string_append_printf(s, "</feature>"); cpu->dyn_csr_xml = g_string_free(s, false); return CSR_TABLE_SIZE; }
3c11c2ebb062ffb5d7dcad44ab0fb60505ad5cac
https://github.com/qemu/qemu
1not_vulnerable
target/s390x: Do not modify cpu state in s390_cpu_get_psw_mask We want to use this function for debugging, and debug should not modify cpu state (even non-architectural cpu state) lest we introduce heisenbugs. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: David Hildenbrand <david@redhat.com> Tested-by: jonathan.albrecht <jonathan.albrecht@linux.vnet.ibm.com> Tested-by: <ruixin.bao@ibm.com> Message-Id: <20210615030744.1252385-3-richard.henderson@linaro.org> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
uint64_t s390_cpu_get_psw_mask(CPUS390XState *env) { uint64_t r = env->psw.mask; if (tcg_enabled()) { uint64_t cc = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr); assert(cc <= 3); r &= ~PSW_MASK_CC; r |= cc << 44; } return r; }
96ff758c6e9cd5a01443ee15afbd0df4f00c37a8
https://github.com/qemu/qemu
1not_vulnerable
linux-user: Use public sigev_notify_thread_id member if available _sigev_un._tid is an internal glibc field and is not available on musl libc. The sigevent(7) man page and Linux UAPI headers both use sigev_notify_thread_id as a public way to access this field. musl libc supports this field since 1.2.2[0], and glibc plans to add support as well[1][2]. If sigev_notify_thread_id is not available, fall back to _sigev_un._tid as before. [0] http://git.musl-libc.org/cgit/musl/commit/?id=7c71792e87691451f2a6b76348e83ad1889f1dcb [1] https://www.openwall.com/lists/musl/2019/08/01/5 [2] https://sourceware.org/bugzilla/show_bug.cgi?id=27417 Signed-off-by: Michael Forney <mforney@mforney.org> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Message-Id: <20210526035556.7931-1-mforney@mforney.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, abi_ulong target_addr) { struct target_sigevent *target_sevp; if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { return -TARGET_EFAULT; } /* This union is awkward on 64 bit systems because it has a 32 bit * integer and a pointer in it; we follow the conversion approach * used for handling sigval types in signal.c so the guest should get * the correct value back even if we did a 64 bit byteswap and it's * using the 32 bit integer. */ host_sevp->sigev_value.sival_ptr = (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); host_sevp->sigev_signo = target_to_host_signal(tswap32(target_sevp->sigev_signo)); host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid); unlock_user_struct(target_sevp, target_addr, 1); return 0; }
1c9638667b7068539dc5783c9428d588b14162ea
https://github.com/qemu/qemu
1not_vulnerable
util/oslib-win32: Fix fatal assertion in qemu_try_memalign The function is called with alignment == 0 which caused an assertion. Use the code from oslib-posix.c to fix that regression. Fixes: ed6f53f9ca9 Signed-off-by: Stefan Weil <sw@weilnetz.de> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20210611105846.347954-1-sw@weilnetz.de> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
void *qemu_try_memalign(size_t alignment, size_t size) { void *ptr; g_assert(size != 0); if (alignment < sizeof(void *)) { alignment = sizeof(void *); } else { g_assert(is_power_of_2(alignment)); } ptr = _aligned_malloc(size, alignment); trace_qemu_memalign(alignment, size, ptr); return ptr; }
2a25def4be09714c543713f111813b521b2356ee
https://github.com/qemu/qemu
1not_vulnerable
block/nbd: nbd_client_handshake(): fix leak of s->ioc Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Roman Kagan <rvkagan@yandex-team.ru> Message-Id: <20210610100802.5888-9-vsementsov@virtuozzo.com> Signed-off-by: Eric Blake <eblake@redhat.com>
static int nbd_client_handshake(BlockDriverState *bs, Error **errp);
22fca190e25b10761925bb1eeadeda07aabf3c26
https://github.com/qemu/qemu
1not_vulnerable
vfio: Fix unregister SaveVMHandler in vfio_migration_finalize In the vfio_migration_init(), the SaveVMHandler is registered for VFIO device. But it lacks the operation of 'unregister'. It will lead to 'Segmentation fault (core dumped)' in qemu_savevm_state_setup(), if performing live migration after a VFIO device is hot deleted. Fixes: 7c2f5f75f94 (vfio: Register SaveVMHandlers for VFIO device) Reported-by: Qixin Gan <ganqixin@huawei.com> Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com> Message-Id: <20210527123101.289-1-jiangkunkun@huawei.com> Reviewed by: Kirti Wankhede <kwankhede@nvidia.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
void vfio_migration_finalize(VFIODevice *vbasedev) { if (vbasedev->migration) { VFIOMigration *migration = vbasedev->migration; remove_migration_state_change_notifier(&migration->migration_state); qemu_del_vm_change_state_handler(migration->vm_state); unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev); vfio_migration_exit(vbasedev); } if (vbasedev->migration_blocker) { migrate_del_blocker(vbasedev->migration_blocker); error_free(vbasedev->migration_blocker); vbasedev->migration_blocker = NULL; } }
986bdbc6a29c4d7ef125299c5013783e30dc2cae
https://github.com/qemu/qemu
1not_vulnerable
coreaudio: Fix output stream format settings Before commit 7d6948cd98cf5ad8a3458a4ce7fdbcb79bcd1212, it was coded to retrieve the initial output stream format settings, modify the frame rate, and set again. However, I removed a frame rate modification code by mistake in the commit. It also assumes the initial output stream format is consistent with what QEMU expects, but that expectation is not in the code, which makes it harder to understand and will lead to breakage if the initial settings change. This change explicitly sets all of the output stream settings to solve these problems. Signed-off-by: Akihiko Odaki <akihiko.odaki@gmail.com> Message-Id: <20210616141721.54091-1-akihiko.odaki@gmail.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static OSStatus coreaudio_set_framesize(AudioDeviceID id, UInt32 *framesize) { UInt32 size = sizeof(*framesize); AudioObjectPropertyAddress addr = { kAudioDevicePropertyBufferFrameSize, kAudioDevicePropertyScopeOutput, kAudioObjectPropertyElementMaster }; return AudioObjectSetPropertyData(id, &addr, 0, NULL, size, framesize); }
2833d697b9a418e2b9735e38ad4b33ae86f84739
https://github.com/qemu/qemu
1not_vulnerable
jackaudio: avoid that the client name contains the word (NULL) Currently with jackaudio client name and qemu guest name unset, the JACK client names are out-(NULL) and in-(NULL). These names are user visible in the patch bay. Replace the function call to qemu_get_vm_name() with a call to audio_application_name() which replaces NULL with "qemu" to have more descriptive names. Signed-off-by: Volker Rümelin <vr_qemu@t-online.de> Message-Id: <20210517194604.2545-4-vr_qemu@t-online.de> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static int qjack_client_init(QJackClient *c);
cdfa56c551bb48f286cfe1f2daa1083d333ee45d
https://github.com/qemu/qemu
1not_vulnerable
softmmu/physmem: Fix ram_block_discard_range() to handle shared anonymous memory We can create shared anonymous memory via "-object memory-backend-ram,share=on,..." which is, for example, required by PVRDMA for mremap() to work. Shared anonymous memory is weird, though. Instead of MADV_DONTNEED, we have to use MADV_REMOVE: MADV_DONTNEED will only remove / zap all relevant page table entries of the current process, the backend storage will not get removed, resulting in no reduced memory consumption and a repopulation of previous content on next access. Shared anonymous memory is internally really just shmem, but without a fd exposed. As we cannot use fallocate() without the fd to discard the backing storage, MADV_REMOVE gets the same job done without a fd as documented in "man 2 madvise". Removing backing storage implicitly invalidates all page table entries with relevant mappings - an additional MADV_DONTNEED is not required. Fixes: 06329ccecfa0 ("mem: add share parameter to memory-backend-ram") Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20210406080126.24010-3-david@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) { int ret = -1; uint8_t *host_startaddr = rb->host + start; if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { error_report("ram_block_discard_range: Unaligned start address: %p", host_startaddr); goto err; } if ((start + length) <= rb->max_length) { bool need_madvise, need_fallocate; if (!QEMU_IS_ALIGNED(length, rb->page_size)) { error_report("ram_block_discard_range: Unaligned length: %zx", length); goto err; } errno = ENOTSUP; /* If we are missing MADVISE etc */ /* The logic here is messy; * madvise DONTNEED fails for hugepages * fallocate works on hugepages and shmem * shared anonymous memory requires madvise REMOVE */ need_madvise = (rb->page_size == qemu_host_page_size); need_fallocate = rb->fd != -1; if (need_fallocate) { /* For a file, this causes the area of the file to be zero'd * if read, and for hugetlbfs also causes it to be unmapped * so a userfault will trigger. */ #ifdef CONFIG_FALLOCATE_PUNCH_HOLE ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, start, length); if (ret) { ret = -errno; error_report("ram_block_discard_range: Failed to fallocate " "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; error_report("ram_block_discard_range: fallocate not available/file" "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; #endif } if (need_madvise) { /* For normal RAM this causes it to be unmapped, * for shared memory it causes the local mapping to disappear * and to fall back on the file contents (which we just * fallocate'd away). */ #if defined(CONFIG_MADVISE) if (qemu_ram_is_shared(rb) && rb->fd < 0) { ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE); } else { ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED); } if (ret) { ret = -errno; error_report("ram_block_discard_range: Failed to discard range " "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; error_report("ram_block_discard_range: MADVISE not available" "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; #endif } trace_ram_block_discard_range(rb->idstr, host_startaddr, length, need_madvise, need_fallocate, ret); } else { error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", rb->idstr, start, length, rb->max_length); } err: return ret; }
cd39e773e00bf98ab41e2ffaaeab7a00a3f68bd1
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Diagnose UNALLOCATED in disas_simd_two_reg_misc_fp16 This fprintf+assert has been in place since the beginning. It is prior to the fp_access_check, so we're still good to raise sigill here. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/381 Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-id: 20210604183506.916654-2-richard.henderson@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) { int fpop, opcode, a, u; int rn, rd; bool is_q; bool is_scalar; bool only_in_vector = false; int pass; TCGv_i32 tcg_rmode = NULL; TCGv_ptr tcg_fpstatus = NULL; bool need_rmode = false; bool need_fpst = true; int rmode; if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } rd = extract32(insn, 0, 5); rn = extract32(insn, 5, 5); a = extract32(insn, 23, 1); u = extract32(insn, 29, 1); is_scalar = extract32(insn, 28, 1); is_q = extract32(insn, 30, 1); opcode = extract32(insn, 12, 5); fpop = deposit32(opcode, 5, 1, a); fpop = deposit32(fpop, 6, 1, u); switch (fpop) { case 0x1d: /* SCVTF */ case 0x5d: /* UCVTF */ { int elements; if (is_scalar) { elements = 1; } else { elements = (is_q ? 8 : 4); } if (!fp_access_check(s)) { return; } handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16); return; } break; case 0x2c: /* FCMGT (zero) */ case 0x2d: /* FCMEQ (zero) */ case 0x2e: /* FCMLT (zero) */ case 0x6c: /* FCMGE (zero) */ case 0x6d: /* FCMLE (zero) */ handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd); return; case 0x3d: /* FRECPE */ case 0x3f: /* FRECPX */ break; case 0x18: /* FRINTN */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_TIEEVEN; break; case 0x19: /* FRINTM */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_NEGINF; break; case 0x38: /* FRINTP */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_POSINF; break; case 0x39: /* FRINTZ */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_ZERO; break; case 0x58: /* FRINTA */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_TIEAWAY; break; case 0x59: /* FRINTX */ case 0x79: /* FRINTI */ only_in_vector = true; /* current rounding mode */ break; case 0x1a: /* FCVTNS */ need_rmode = true; rmode = FPROUNDING_TIEEVEN; break; case 0x1b: /* FCVTMS */ need_rmode = true; rmode = FPROUNDING_NEGINF; break; case 0x1c: /* FCVTAS */ need_rmode = true; rmode = FPROUNDING_TIEAWAY; break; case 0x3a: /* FCVTPS */ need_rmode = true; rmode = FPROUNDING_POSINF; break; case 0x3b: /* FCVTZS */ need_rmode = true; rmode = FPROUNDING_ZERO; break; case 0x5a: /* FCVTNU */ need_rmode = true; rmode = FPROUNDING_TIEEVEN; break; case 0x5b: /* FCVTMU */ need_rmode = true; rmode = FPROUNDING_NEGINF; break; case 0x5c: /* FCVTAU */ need_rmode = true; rmode = FPROUNDING_TIEAWAY; break; case 0x7a: /* FCVTPU */ need_rmode = true; rmode = FPROUNDING_POSINF; break; case 0x7b: /* FCVTZU */ need_rmode = true; rmode = FPROUNDING_ZERO; break; case 0x2f: /* FABS */ case 0x6f: /* FNEG */ need_fpst = false; break; case 0x7d: /* FRSQRTE */ case 0x7f: /* FSQRT (vector) */ break; default: unallocated_encoding(s); return; } /* Check additional constraints for the scalar encoding */ if (is_scalar) { if (!is_q) { unallocated_encoding(s); return; } /* FRINTxx is only in the vector form */ if (only_in_vector) { unallocated_encoding(s); return; } } if (!fp_access_check(s)) { return; } if (need_rmode || need_fpst) { tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16); } if (need_rmode) { tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); } if (is_scalar) { TCGv_i32 tcg_op = read_fp_hreg(s, rn); TCGv_i32 tcg_res = tcg_temp_new_i32(); switch (fpop) { case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus); break; case 0x3d: /* FRECPE */ gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus); break; case 0x3f: /* FRECPX */ gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus); break; case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus); break; case 0x6f: /* FNEG */ tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); break; case 0x7d: /* FRSQRTE */ gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus); break; default: g_assert_not_reached(); } /* limit any sign extension going on */ tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff); write_fp_sreg(s, rd, tcg_res); tcg_temp_free_i32(tcg_res); tcg_temp_free_i32(tcg_op); } else { for (pass = 0; pass < (is_q ? 8 : 4); pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(); TCGv_i32 tcg_res = tcg_temp_new_i32(); read_vec_element_i32(s, tcg_op, rn, pass, MO_16); switch (fpop) { case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus); break; case 0x3d: /* FRECPE */ gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus); break; case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus); break; case 0x18: /* FRINTN */ case 0x19: /* FRINTM */ case 0x38: /* FRINTP */ case 0x39: /* FRINTZ */ case 0x58: /* FRINTA */ case 0x79: /* FRINTI */ gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus); break; case 0x59: /* FRINTX */ gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus); break; case 0x2f: /* FABS */ tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff); break; case 0x6f: /* FNEG */ tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); break; case 0x7d: /* FRSQRTE */ gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus); break; case 0x7f: /* FSQRT */ gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus); break; default: g_assert_not_reached(); } write_vec_element_i32(s, tcg_res, rd, pass, MO_16); tcg_temp_free_i32(tcg_res); tcg_temp_free_i32(tcg_op); } clear_vec_high(s, is_q, rd); } if (tcg_rmode) { gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_rmode); } if (tcg_fpstatus) { tcg_temp_free_ptr(tcg_fpstatus); } }
96a664d05c238ea1b64af2394b58e956fe0afe26
https://github.com/qemu/qemu
1not_vulnerable
hw/intc/arm_gicv3_cpuif: Tolerate spurious EOIR writes Commit 382c7160d1cd ("hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic") added an assert_not_reached() if the guest writes the EOIR register while no interrupt is active. It turns out some software does this: EDK2, in GicV3ExitBootServicesEvent(), unconditionally write EOIR for all interrupts that it manages. This now causes QEMU to abort when running UEFI on a VM with GICv3. Although it is UNPREDICTABLE behavior and EDK2 does need fixing, the punishment seems a little harsh, especially since icc_eoir_write() already tolerates writes of nonexistent interrupt numbers. Display a guest error and tolerate spurious EOIR writes. Fixes: 382c7160d1cd ("hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic") Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Tested-by: Alex Bennée <alex.bennee@linaro.org> Message-id: 20210604130352.1887560-1-jean-philippe@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* End of Interrupt */ GICv3CPUState *cs = icc_cs_from_env(env); int irq = value & 0xffffff; int grp; bool is_eoir0 = ri->crm == 8; if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) { icv_eoir_write(env, ri, value); return; } trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1, gicv3_redist_affid(cs), value); if (irq >= cs->gic->num_irq) { /* This handles two cases: * 1. If software writes the ID of a spurious interrupt [ie 1020-1023] * to the GICC_EOIR, the GIC ignores that write. * 2. If software writes the number of a non-existent interrupt * this must be a subcase of "value written does not match the last * valid interrupt value read from the Interrupt Acknowledge * register" and so this is UNPREDICTABLE. We choose to ignore it. */ return; } grp = icc_highest_active_group(cs); switch (grp) { case GICV3_G0: if (!is_eoir0) { return; } if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) { return; } break; case GICV3_G1: if (is_eoir0) { return; } if (!arm_is_secure(env)) { return; } break; case GICV3_G1NS: if (is_eoir0) { return; } if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { return; } break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: IRQ %d isn't active\n", __func__, irq); return; } icc_drop_prio(cs, grp); if (!icc_eoi_split(env, cs)) { /* Priority drop and deactivate not split: deactivate irq now */ icc_deactivate_irq(cs, irq); } }
0bcd5a18940e1c1e3350b93cfadcdc6b58ca1c0e
https://github.com/qemu/qemu
1not_vulnerable
esp: fix migration version check in esp_is_version_5() Commit 4e78f3bf35 "esp: defer command completion interrupt on incoming data transfers" added a version check for use with VMSTATE_*_TEST macros to allow migration from older QEMU versions. Unfortunately the version check fails to work in its current form since if the VMStateDescription version_id is incremented, the test returns false and so the fields are not included in the outgoing migration stream. Change the version check to use >= rather == to ensure that migration works correctly when the ESPState VMStateDescription has version_id > 5. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Fixes: 4e78f3bf35 ("esp: defer command completion interrupt on incoming data transfers") Message-Id: <20210613102614.5438-1-mark.cave-ayland@ilande.co.uk> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static bool esp_is_version_5(void *opaque, int version_id) { ESPState *s = ESP(opaque); version_id = MIN(version_id, s->mig_version_id); return version_id >= 5; }
c348458f357784629c36a6eb1493c0c0c33b74e7
https://github.com/qemu/qemu
1not_vulnerable
esp: correctly accumulate extended messages for PDMA Commit 799d90d818 "esp: transition to message out phase after SATN and stop command" added logic to correctly handle extended messages for DMA requests but not for PDMA requests. Apply the same logic in esp_do_dma() to do_dma_pdma_cb() so that extended messages terminated with a PDMA request are accumulated correctly. This allows the ESP device to respond correctly to the SDTR negotiation initiated by the NetBSD ESP driver without causing errors and timeouts on boot. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Message-Id: <20210519100803.10293-6-mark.cave-ayland@ilande.co.uk> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void do_dma_pdma_cb(ESPState *s) { int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); int len; uint32_t n; if (s->do_cmd) { /* Ensure we have received complete command after SATN and stop */ if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) { return; } s->ti_size = 0; if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { /* No command received */ if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { return; } /* Command has been received */ s->do_cmd = 0; do_cmd(s); } else { /* * Extra message out bytes received: update cmdfifo_cdb_offset * and then switch to commmand phase */ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; s->rregs[ESP_RSEQ] = SEQ_CD; s->rregs[ESP_RINTR] |= INTR_BS; esp_raise_irq(s); } return; } if (!s->current_req) { return; } if (to_device) { /* Copy FIFO data to device */ len = MIN(s->async_len, ESP_FIFO_SZ); len = MIN(len, fifo8_num_used(&s->fifo)); n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); s->async_buf += n; s->async_len -= n; s->ti_size += n; if (n < len) { /* Unaligned accesses can cause FIFO wraparound */ len = len - n; n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); s->async_buf += n; s->async_len -= n; s->ti_size += n; } if (s->async_len == 0) { scsi_req_continue(s->current_req); return; } if (esp_get_tc(s) == 0) { esp_lower_drq(s); esp_dma_done(s); } return; } else { if (s->async_len == 0) { /* Defer until the scsi layer has completed */ scsi_req_continue(s->current_req); s->data_in_ready = false; return; } if (esp_get_tc(s) != 0) { /* Copy device data to FIFO */ len = MIN(s->async_len, esp_get_tc(s)); len = MIN(len, fifo8_num_free(&s->fifo)); fifo8_push_all(&s->fifo, s->async_buf, len); s->async_buf += len; s->async_len -= len; s->ti_size -= len; esp_set_tc(s, esp_get_tc(s) - len); if (esp_get_tc(s) == 0) { /* Indicate transfer to FIFO is complete */ s->rregs[ESP_RSTAT] |= STAT_TC; } return; } /* Partially filled a scsi buffer. Complete immediately. */ esp_lower_drq(s); esp_dma_done(s); } }
35579b523cf8f441da12f968ce5dcf6ae0bfbfea
https://github.com/qemu/qemu
1not_vulnerable
esp: revert 75ef849696 "esp: correctly fill bus id with requested lun" This commit from nearly 10 years ago is now broken due to the improvements in esp emulation (or perhaps was never correct). It shows up as a bug in detecting the CDROM drive under MacOS. The error is caused by the MacOS CDROM driver sending this CDB with an "S without ATN" command and without DMA: 0x12 0x00 0x00 0x00 0x05 0x00 (INQUIRY) This is a valid INQUIRY command, however with this logic present the 3rd byte (0x0) is copied over the 1st byte (0x12) which silently converts the INQUIRY command to a TEST UNIT READY command before passing it to the QEMU SCSI layer. Since the TEST UNIT READY command has a zero length response the MacOS CDROM driver never receives a response and assumes the CDROM is not present. The logic was to ignore the IDENTIFY byte and copy the LUN over from the CDB, which did store the LUN in bits 5-7 of the second byte in olden times. This however is all obsolete, so just drop the code. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Message-Id: <20210519100803.10293-5-mark.cave-ayland@ilande.co.uk> [Tweaked commit message. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static uint32_t get_cmd(ESPState *s, uint32_t maxlen) { uint8_t buf[ESP_CMDFIFO_SZ]; uint32_t dmalen, n; int target; target = s->wregs[ESP_WBUSID] & BUSID_DID; if (s->dma) { dmalen = MIN(esp_get_tc(s), maxlen); if (dmalen == 0) { return 0; } if (s->dma_memory_read) { s->dma_memory_read(s->dma_opaque, buf, dmalen); dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen); fifo8_push_all(&s->cmdfifo, buf, dmalen); } else { if (esp_select(s) < 0) { fifo8_reset(&s->cmdfifo); return -1; } esp_raise_drq(s); fifo8_reset(&s->cmdfifo); return 0; } } else { dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); if (dmalen == 0) { return 0; } n = esp_fifo_pop_buf(&s->fifo, buf, dmalen); n = MIN(fifo8_num_free(&s->cmdfifo), n); fifo8_push_all(&s->cmdfifo, buf, n); } trace_esp_get_cmd(dmalen, target); if (esp_select(s) < 0) { fifo8_reset(&s->cmdfifo); return -1; } return dmalen; }
cf1a7a9b3721544aaa3e43d111eb383c30d71a62
https://github.com/qemu/qemu
1not_vulnerable
esp: only assert INTR_DC interrupt flag if selection fails The datasheet sequence tables confirm that when a target selection fails, only the INTR_DC interrupt flag should be asserted. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Fixes: cf47a41e05 ("esp: latch individual bits in ESP_RINTR register") Message-Id: <20210518212511.21688-2-mark.cave-ayland@ilande.co.uk> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static int esp_select(ESPState *s) { int target; target = s->wregs[ESP_WBUSID] & BUSID_DID; s->ti_size = 0; fifo8_reset(&s->fifo); if (s->current_req) { /* Started a new command before the old one finished. Cancel it. */ scsi_req_cancel(s->current_req); } s->current_dev = scsi_device_find(&s->bus, 0, target, 0); if (!s->current_dev) { /* No such drive */ s->rregs[ESP_RSTAT] = 0; s->rregs[ESP_RINTR] = INTR_DC; s->rregs[ESP_RSEQ] = SEQ_0; esp_raise_irq(s); return -1; } /* * Note that we deliberately don't raise the IRQ here: this will be done * either in do_busid_cmd() for DATA OUT transfers or by the deferred * IRQ mechanism in esp_transfer_data() for DATA IN transfers */ s->rregs[ESP_RINTR] |= INTR_FC; s->rregs[ESP_RSEQ] = SEQ_CD; return 0; }
6e1da3d305499d3907f3c7f6638243e2e09b5085
https://github.com/qemu/qemu
1not_vulnerable
runstate: Initialize Error * to NULL Based on the description of error_setg(), the local variable err in qemu_init_subsystems() should be initialized to NULL. Fixes: efd7ab22fb ("vl: extract qemu_init_subsystems") Cc: qemu-stable@nongnu.org Signed-off-by: Peng Liang <liangpeng10@huawei.com> Message-Id: <20210610131729.3906565-1-liangpeng10@huawei.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void qemu_run_exit_notifiers(void) { notifier_list_notify(&exit_notifiers, NULL); }
38f71349c7c4969bc14da4da1c70b8cc4078d596
https://github.com/qemu/qemu
1not_vulnerable
vl: Fix an assert failure in error path Based on the description of error_setg(), the local variable err in qemu_maybe_daemonize() should be initialized to NULL. Without fix, the uninitialized *errp triggers assert failure which doesn't show much valuable information. Before the fix: qemu-system-x86_64: ../util/error.c:59: error_setv: Assertion `*errp == NULL' failed. After fix: qemu-system-x86_64: cannot create PID file: Cannot open pid file: Permission denied Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Message-Id: <20210610084741.456260-1-zhenzhong.duan@intel.com> Cc: qemu-stable@nongnu.org Fixes: 0546c0609c ("vl: split various early command line options to a separate function", 2020-12-10) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void qemu_process_help_options(void) { /* * Check for -cpu help and -device help before we call select_machine(), * which will return an error if the architecture has no default machine * type and the user did not specify one, so that the user doesn't need * to say '-cpu help -machine something'. */ if (cpu_option && is_help_option(cpu_option)) { list_cpus(cpu_option); exit(0); } if (qemu_opts_foreach(qemu_find_opts("device"), device_help_func, NULL, NULL)) { exit(0); } /* -L help lists the data directories and exits. */ if (list_data_dirs) { qemu_list_data_dirs(); exit(0); } }
144bff0304b8f93cf0eb9ed432434644302dc6d5
https://github.com/qemu/qemu
1not_vulnerable
linux-user: Disable static assert involving __SIGRTMAX if it is missing This check is to ensure that the loop in signal_table_init() from SIGRTMIN to SIGRTMAX falls within the bounds of host_to_target_signal_table (_NSIG). However, it is not critical, since _NSIG is already defined to be the one larger than the largest signal supported by the system (as specified in the upcoming POSIX revision[0]). musl libc does not define __SIGRTMAX, so disabling this check when it is missing fixes one of the last remaining errors when building qemu. [0] https://www.austingroupbugs.net/view.php?id=741 Signed-off-by: Michael Forney <mforney@mforney.org> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Message-Id: <20210526190203.4255-1-mforney@mforney.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static void host_signal_handler(int host_signum, siginfo_t *info, void *puc);
25b2ef2e8ee23109b0c3ce9ea71330bf8a7d12bd
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: reorder free calls. Free in correct order to avoid use-after-free. Resolves: CID 1453812 Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Reviewed-by: Li Qiang <liq3ea@gmail.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20210604103714.1237414-1-kraxel@redhat.com>
static void vg_resource_create_2d(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { pixman_format_code_t pformat; struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_create_2d c2d; VUGPU_FILL_CMD(c2d); virtio_gpu_bswap_32(&c2d, sizeof(c2d)); if (c2d.resource_id == 0) { g_critical("%s: resource id 0 is not allowed", __func__); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } res = virtio_gpu_find_resource(g, c2d.resource_id); if (res) { g_critical("%s: resource already exists %d", __func__, c2d.resource_id); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } res = g_new0(struct virtio_gpu_simple_resource, 1); res->width = c2d.width; res->height = c2d.height; res->format = c2d.format; res->resource_id = c2d.resource_id; pformat = virtio_gpu_get_pixman_format(c2d.format); if (!pformat) { g_critical("%s: host couldn't handle guest format %d", __func__, c2d.format); g_free(res); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; return; } vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height); res->image = pixman_image_create_bits(pformat, c2d.width, c2d.height, (uint32_t *)res->buffer.mmap, res->buffer.stride); if (!res->image) { g_critical("%s: resource creation failed %d %d %d", __func__, c2d.resource_id, c2d.width, c2d.height); vugbm_buffer_destroy(&res->buffer); g_free(res); cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; return; } QTAILQ_INSERT_HEAD(&g->reslist, res, next); }
c7ddc8821d88d958bb6d4ef1279ec3609b17ffda
https://github.com/qemu/qemu
1not_vulnerable
block: preserve errno from fdatasync failures When fdatasync() fails on a file backend we set a flag that short-circuits any future attempts to call fdatasync(). The first failure returns the true errno, but the later short- circuited calls return a generic EIO. The latter is unhelpful because fdatasync() can return a variety of errnos, including EACCESS. Reviewed-by: Connor Kuehl <ckuehl@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
static int handle_aiocb_flush(void *opaque) { RawPosixAIOData *aiocb = opaque; BDRVRawState *s = aiocb->bs->opaque; int ret; if (s->page_cache_inconsistent) { return -s->page_cache_inconsistent; } ret = qemu_fdatasync(aiocb->aio_fildes); if (ret == -1) { /* There is no clear definition of the semantics of a failing fsync(), * so we may have to assume the worst. The sad truth is that this * assumption is correct for Linux. Some pages are now probably marked * clean in the page cache even though they are inconsistent with the * on-disk contents. The next fdatasync() call would succeed, but no * further writeback attempt will be made. We can't get back to a state * in which we know what is on disk (we would have to rewrite * everything that was touched since the last fdatasync() at least), so * make bdrv_flush() fail permanently. Given that the behaviour isn't * really defined, I have little hope that other OSes are doing better. * * Obviously, this doesn't affect O_DIRECT, which bypasses the page * cache. */ if ((s->open_flags & O_DIRECT) == 0) { s->page_cache_inconsistent = errno; } return -errno; } return 0; }
f291f45f4ef445ccc1aaf7b5bc595dab17d89e8d
https://github.com/qemu/qemu
1not_vulnerable
softfloat: Fix tp init in float32_exp2 Typo in the conversion to FloatParts64. Fixes: 572c4d862ff2 Fixes: Coverity CID 1457457 Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20210607223812.110596-1-richard.henderson@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
float32 float32_exp2(float32 a, float_status *status) { FloatParts64 xp, xnp, tp, rp; int i; float32_unpack_canonical(&xp, a, status); if (unlikely(xp.cls != float_class_normal)) { switch (xp.cls) { case float_class_snan: case float_class_qnan: parts_return_nan(&xp, status); return float32_round_pack_canonical(&xp, status); case float_class_inf: return xp.sign ? float32_zero : a; case float_class_zero: return float32_one; default: break; } g_assert_not_reached(); } float_raise(float_flag_inexact, status); float64_unpack_canonical(&tp, float64_ln2, status); xp = *parts_mul(&xp, &tp, status); xnp = xp; float64_unpack_canonical(&rp, float64_one, status); for (i = 0 ; i < 15 ; i++) { float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status); rp = *parts_muladd(&tp, &xp, &rp, 0, status); xnp = *parts_mul(&xnp, &xp, status); } return float32_round_pack_canonical(&rp, status); }
5a2d9929ac1f01a1e8ef2a3f56f69e6069863dad
https://github.com/qemu/qemu
1not_vulnerable
Fixed calculation error of pkt->header_size in fill_pkt_tcp_info() The data pointer has skipped vnet_hdr_len in the function of parse_packet_early().So, we can not subtract vnet_hdr_len again when calculating pkt->header_size in fill_pkt_tcp_info(). Otherwise, it will cause network packet comparsion errors and greatly increase the frequency of checkpoints. Signed-off-by: Lei Rao <lei.rao@intel.com> Signed-off-by: Zhang Chen <chen.zhang@intel.com> Reviewed-by: Li Zhijian <lizhijian@fujitsu.com> Reviewed-by: Zhang Chen <chen.zhang@intel.com> Reviewed-by: Lukas Straub <lukasstraub2@web.de> Tested-by: Lukas Straub <lukasstraub2@web.de> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void fill_pkt_tcp_info(void *data, uint32_t *max_ack) { Packet *pkt = data; struct tcp_hdr *tcphd; tcphd = (struct tcp_hdr *)pkt->transport_header; pkt->tcp_seq = ntohl(tcphd->th_seq); pkt->tcp_ack = ntohl(tcphd->th_ack); *max_ack = *max_ack > pkt->tcp_ack ? *max_ack : pkt->tcp_ack; pkt->header_size = pkt->transport_header - (uint8_t *)pkt->data + (tcphd->th_off << 2); pkt->payload_size = pkt->size - pkt->header_size; pkt->seq_end = pkt->tcp_seq + pkt->payload_size; pkt->flags = tcphd->th_flags; }
c33f23a419f95da16ab4faaf08be635c89b96ff0
https://github.com/qemu/qemu
1not_vulnerable
vhost-vdpa: don't initialize backend_features We used to initialize backend_features during vhost_vdpa_init() regardless whether or not it was supported by vhost. This will lead the unsupported features like VIRTIO_F_IN_ORDER to be included and set to the vhost-vdpa during vhost_dev_start. Because the VIRTIO_F_IN_ORDER is not supported by vhost-vdpa so it won't be advertised to guest which will break the datapath. Fix this by not initializing the backend_features, so the acked_features could be built only from guest features via vhost_net_ack_features(). Fixes: 108a64818e69b ("vhost-vdpa: introduce vhost-vdpa backend") Cc: qemu-stable@nongnu.org Cc: Gautam Dawar <gdawar@xilinx.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) { uint8_t s; trace_vhost_vdpa_add_status(dev, status); if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) { return; } s |= status; vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); }
d80f54ce53167e38623b8aafe8317458a6d7a6cd
https://github.com/qemu/qemu
1not_vulnerable
channel-socket: Only set CLOEXEC if we have space for fds MSG_CMSG_CLOEXEC cleans up received fd's; it's really only for Unix sockets, but currently we enable it for everything; some socket types (IP_MPTCP) don't like this. Only enable it when we're giving the recvmsg room to receive fd's anyway. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20210421112834.107651-2-dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
static ssize_t qio_channel_socket_readv(QIOChannel *ioc, const struct iovec *iov, size_t niov, int **fds, size_t *nfds, Error **errp) { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); ssize_t ret; struct msghdr msg = { NULL, }; char control[CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS)]; int sflags = 0; memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS)); msg.msg_iov = (struct iovec *)iov; msg.msg_iovlen = niov; if (fds && nfds) { msg.msg_control = control; msg.msg_controllen = sizeof(control); #ifdef MSG_CMSG_CLOEXEC sflags |= MSG_CMSG_CLOEXEC; #endif } retry: ret = recvmsg(sioc->fd, &msg, sflags); if (ret < 0) { if (errno == EAGAIN) { return QIO_CHANNEL_ERR_BLOCK; } if (errno == EINTR) { goto retry; } error_setg_errno(errp, errno, "Unable to read from socket"); return -1; } if (fds && nfds) { qio_channel_socket_copy_fds(&msg, fds, nfds); } return ret; }
787a4baf91fa2ff36b901c0b31ea73f3f0739415
https://github.com/qemu/qemu
1not_vulnerable
target/riscv/pmp: Add assert for ePMP operations Although we construct epmp_operation in such a way that it can only be between 0 and 15 Coverity complains that we don't handle the other possible cases. To fix Coverity and make it easier for humans to read add a default case to the switch statement that calls g_assert_not_reached(). Fixes: CID 1453108 Signed-off-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Bin Meng <bmeng.cn@gmail.com> Reviewed-by: LIU Zhiwei <zhiwei_liu@c-sky.com> Message-id: ec5f225928eec448278c82fcb1f6805ee61dde82.1621550996.git.alistair.francis@wdc.com
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, target_ulong mode) { int i = 0; int ret = -1; int pmp_size = 0; target_ulong s = 0; target_ulong e = 0; /* Short cut if no rules */ if (0 == pmp_get_num_rules(env)) { return pmp_hart_has_privs_default(env, addr, size, privs, allowed_privs, mode); } if (size == 0) { if (riscv_feature(env, RISCV_FEATURE_MMU)) { /* * If size is unknown (0), assume that all bytes * from addr to the end of the page will be accessed. */ pmp_size = -(addr | TARGET_PAGE_MASK); } else { pmp_size = sizeof(target_ulong); } } else { pmp_size = size; } /* 1.10 draft priv spec states there is an implicit order from low to high */ for (i = 0; i < MAX_RISCV_PMPS; i++) { s = pmp_is_in_range(env, i, addr); e = pmp_is_in_range(env, i, addr + pmp_size - 1); /* partially inside */ if ((s + e) == 1) { qemu_log_mask(LOG_GUEST_ERROR, "pmp violation - access is partially inside\n"); ret = 0; break; } /* fully inside */ const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); /* * Convert the PMP permissions to match the truth table in the * ePMP spec. */ const uint8_t epmp_operation = ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { /* * If the PMP entry is not off and the address is in range, * do the priv check */ if (!MSECCFG_MML_ISSET(env)) { /* * If mseccfg.MML Bit is not set, do pmp priv check * This will always apply to regular PMP. */ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; if ((mode != PRV_M) || pmp_is_locked(env, i)) { *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; } } else { /* * If mseccfg.MML Bit set, do the enhanced pmp priv check */ if (mode == PRV_M) { switch (epmp_operation) { case 0: case 1: case 4: case 5: case 6: case 7: case 8: *allowed_privs = 0; break; case 2: case 3: case 14: *allowed_privs = PMP_READ | PMP_WRITE; break; case 9: case 10: *allowed_privs = PMP_EXEC; break; case 11: case 13: *allowed_privs = PMP_READ | PMP_EXEC; break; case 12: case 15: *allowed_privs = PMP_READ; break; default: g_assert_not_reached(); } } else { switch (epmp_operation) { case 0: case 8: case 9: case 12: case 13: case 14: *allowed_privs = 0; break; case 1: case 10: case 11: *allowed_privs = PMP_EXEC; break; case 2: case 4: case 15: *allowed_privs = PMP_READ; break; case 3: case 6: *allowed_privs = PMP_READ | PMP_WRITE; break; case 5: *allowed_privs = PMP_READ | PMP_EXEC; break; case 7: *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; break; default: g_assert_not_reached(); } } } ret = ((privs & *allowed_privs) == privs); break; } } /* No rule matched */ if (ret == -1) { return pmp_hart_has_privs_default(env, addr, size, privs, allowed_privs, mode); } return ret == 1 ? true : false; }
dd6921894905c8ce0664a77f9dac78408bc3b52d
https://github.com/qemu/qemu
1not_vulnerable
target/nios2: fix page-fit instruction count This patch fixes calculation of number of the instructions that fit the current page. It prevents creation of the translation blocks that cross the page boundaries. It is required for deterministic exception generation in icount mode. Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <162072241046.823357.10485774346114851009.stgit@pasha-ThinkPad-X280> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) { CPUNios2State *env = cs->env_ptr; DisasContext dc1, *dc = &dc1; int num_insns; /* Initialize DC */ dc->cpu_env = cpu_env; dc->cpu_R = cpu_R; dc->is_jmp = DISAS_NEXT; dc->pc = tb->pc; dc->tb = tb; dc->mem_idx = cpu_mmu_index(env, false); dc->singlestep_enabled = cs->singlestep_enabled; /* Set up instruction counts */ num_insns = 0; if (max_insns > 1) { int page_insns = (TARGET_PAGE_SIZE - (tb->pc & ~TARGET_PAGE_MASK)) / 4; if (max_insns > page_insns) { max_insns = page_insns; } } gen_tb_start(tb); do { tcg_gen_insn_start(dc->pc); num_insns++; if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { gen_exception(dc, EXCP_DEBUG); /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size below does the right thing. */ dc->pc += 4; break; } if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { gen_io_start(); } /* Decode an instruction */ handle_instruction(dc, env); dc->pc += 4; /* Translation stops when a conditional branch is encountered. * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ } while (!dc->is_jmp && !tcg_op_buf_full() && num_insns < max_insns); /* Indicate where the next block should start */ switch (dc->is_jmp) { case DISAS_NEXT: case DISAS_UPDATE: /* Save the current PC back into the CPU register */ tcg_gen_movi_tl(cpu_R[R_PC], dc->pc); tcg_gen_exit_tb(NULL, 0); break; default: case DISAS_JUMP: /* The jump will already have updated the PC register */ tcg_gen_exit_tb(NULL, 0); break; case DISAS_NORETURN: case DISAS_TB_JUMP: /* nothing more to generate */ break; } /* End off the block */ gen_tb_end(tb, num_insns); /* Mark instruction starts for the final generated instruction */ tb->size = dc->pc - tb->pc; tb->icount = num_insns; #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) && qemu_log_in_addr_range(tb->pc)) { FILE *logfile = qemu_log_lock(); qemu_log("IN: %s\n", lookup_symbol(tb->pc)); log_target_disas(cs, tb->pc, dc->pc - tb->pc); qemu_log("\n"); qemu_log_unlock(logfile); } #endif }
29c3d213f4ad69688638330728cff1a8769d7415
https://github.com/qemu/qemu
1not_vulnerable
oslib-posix: Remove OpenBSD workaround for fcntl("/dev/null", F_SETFL, O_NONBLOCK) failure OpenBSD prior to 6.3 required a workaround to utilize fcntl(F_SETFL) on memory devices. Since modern verions of OpenBSD that are only officialy supported and buildable on do not have this issue I am garbage collecting this workaround. Signed-off-by: Brad Smith <brad@comstyle.com> Message-Id: <YGYECGXQhdamEJgC@humpty.home.comstyle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
int qemu_try_set_nonblock(int fd) { int f; f = fcntl(fd, F_GETFL); if (f == -1) { return -errno; } if (fcntl(fd, F_SETFL, f | O_NONBLOCK) == -1) { return -errno; } return 0; }
6e0c60a2be30c333b06d3558a62b0f177199cbfb
https://github.com/qemu/qemu
1not_vulnerable
target/arm: fix missing exception class The DAIF and PAC checks used raise_exception_ra to raise an exception and unwind CPU state but raise_exception_ra is currently designed for handling data aborts as the syndrome is partially precomputed and encoded in the TB and then merged in merge_syn_data_abort when handling the data abort. Using raise_exception_ra for DAIF and PAC checks results in an empty syndrome being retrieved from data[2] in restore_state_to_opc and setting ESR to 0. This manifested as: kvm [571]: Unknown exception class: esr: 0x000000 – Unknown/Uncategorized when launching a KVM guest when the host qemu used a CPU supporting EL2+pointer authentication and enabling pointer authentication in the guest. Rework raise_exception_ra such that the state is restored before raising the exception so that the exception is not clobbered by restore_state_to_opc. Fixes: 0d43e1a2d29a ("target/arm: Add PAuth helpers") Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Jamie Iles <jamie@nuviainc.com> [PMM: added comment] Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
void raise_exception(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el) { CPUState *cs = do_raise_exception(env, excp, syndrome, target_el); cpu_loop_exit(cs); }
0711a634355a68cd83966872e387402a8b4b048a
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Mark LDS{MIN,MAX} as signed operations The operands to tcg_gen_atomic_fetch_s{min,max}_i64 must be signed, so that the inputs are properly extended. Zero extend the result afterward, as needed. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/364 Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-id: 20210602020720.47679-1-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void disas_ldst_atomic(DisasContext *s, uint32_t insn, int size, int rt, bool is_vector) { int rs = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int o3_opc = extract32(insn, 12, 4); bool r = extract32(insn, 22, 1); bool a = extract32(insn, 23, 1); TCGv_i64 tcg_rs, tcg_rt, clean_addr; AtomicThreeOpFn *fn = NULL; MemOp mop = s->be_data | size | MO_ALIGN; if (is_vector || !dc_isar_feature(aa64_atomics, s)) { unallocated_encoding(s); return; } switch (o3_opc) { case 000: /* LDADD */ fn = tcg_gen_atomic_fetch_add_i64; break; case 001: /* LDCLR */ fn = tcg_gen_atomic_fetch_and_i64; break; case 002: /* LDEOR */ fn = tcg_gen_atomic_fetch_xor_i64; break; case 003: /* LDSET */ fn = tcg_gen_atomic_fetch_or_i64; break; case 004: /* LDSMAX */ fn = tcg_gen_atomic_fetch_smax_i64; mop |= MO_SIGN; break; case 005: /* LDSMIN */ fn = tcg_gen_atomic_fetch_smin_i64; mop |= MO_SIGN; break; case 006: /* LDUMAX */ fn = tcg_gen_atomic_fetch_umax_i64; break; case 007: /* LDUMIN */ fn = tcg_gen_atomic_fetch_umin_i64; break; case 010: /* SWP */ fn = tcg_gen_atomic_xchg_i64; break; case 014: /* LDAPR, LDAPRH, LDAPRB */ if (!dc_isar_feature(aa64_rcpc_8_3, s) || rs != 31 || a != 1 || r != 0) { unallocated_encoding(s); return; } break; default: unallocated_encoding(s); return; } if (rn == 31) { gen_check_sp_alignment(s); } clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size); if (o3_opc == 014) { /* * LDAPR* are a special case because they are a simple load, not a * fetch-and-do-something op. * The architectural consistency requirements here are weaker than * full load-acquire (we only need "load-acquire processor consistent"), * but we choose to implement them as full LDAQ. */ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt, disas_ldst_compute_iss_sf(size, false, 0), true); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); return; } tcg_rs = read_cpu_reg(s, rs, true); tcg_rt = cpu_reg(s, rt); if (o3_opc == 1) { /* LDCLR */ tcg_gen_not_i64(tcg_rs, tcg_rs); } /* The tcg atomic primitives are all full barriers. Therefore we * can ignore the Acquire and Release bits of this instruction. */ fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop); if ((mop & MO_SIGN) && size != MO_64) { tcg_gen_ext32u_i64(tcg_rt, tcg_rt); } }
300137965dbacec02eb2e26b3c6763b491d1f1b2
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Fix return values in fp_sysreg_checks() The fp_sysreg_checks() function is supposed to be returning an FPSysRegCheckResult, which is an enum with three possible values. However, three places in the function "return false" (a hangover from a previous iteration of the design where the function just returned a bool). Make these return FPSysRegCheckFailed instead (for no functional change, since both false and FPSysRegCheckFailed are zero). Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210520152840.24453-6-peter.maydell@linaro.org
static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno) { if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) { return FPSysRegCheckFailed; } switch (regno) { case ARM_VFP_FPSCR: case QEMU_VFP_FPSCR_NZCV: break; case ARM_VFP_FPSCR_NZCVQC: if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { return FPSysRegCheckFailed; } break; case ARM_VFP_FPCXT_S: case ARM_VFP_FPCXT_NS: if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { return FPSysRegCheckFailed; } if (!s->v8m_secure) { return FPSysRegCheckFailed; } break; default: return FPSysRegCheckFailed; } /* * FPCXT_NS is a special case: it has specific handling for * "current FP state is inactive", and must do the PreserveFPState() * but not the usual full set of actions done by ExecuteFPCheck(). * So we don't call vfp_access_check() and the callers must handle this. */ if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) { return FPSysRegCheckDone; } return FPSysRegCheckContinue; }
b873ed83311d96644b544b10f6869a430660585a
https://github.com/qemu/qemu
1not_vulnerable
ppc/pef.c: initialize cgs->ready in kvmppc_svm_init() QEMU is failing to launch a CGS pSeries guest in a host that has PEF support: qemu-system-ppc64: ../softmmu/vl.c:2585: qemu_machine_creation_done: Assertion `machine->cgs->ready' failed. Aborted This is happening because we're not setting the cgs->ready flag that is asserted in qemu_machine_creation_done() during machine start. cgs->ready is set in s390_pv_kvm_init() and sev_kvm_init(). Let's set it in kvmppc_svm_init() as well. Reported-by: Ram Pai <linuxram@us.ibm.com> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com> Message-Id: <20210528201619.52363-1-danielhb413@gmail.com> Acked-by: Ram Pai <linuxram@us.ibm.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
int pef_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) { if (!object_dynamic_cast(OBJECT(cgs), TYPE_PEF_GUEST)) { return 0; } if (!kvm_enabled()) { error_setg(errp, "PEF requires KVM"); return -1; } return kvmppc_svm_init(cgs, errp); }
52e9612ee94b58a1bc57242427b4dbe6c766d8f3
https://github.com/qemu/qemu
1not_vulnerable
target/ppc: used ternary operator when registering MAS The write calback decision when registering the MAS SPR has been turned into a ternary operation, rather than an if-then-else block. This was done because when building without TCG, even though the compiler will optimize away the pointers to spr_write_generic*, it doesn't optimize away the decision and assignment to the local pointer, creating compiler errors. This cleanup looked better than using ifdefs, so we decided to with it. Signed-off-by: Bruno Larsen (billionai) <bruno.larsen@eldorado.org.br> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210525115355.8254-2-bruno.larsen@eldorado.org.br> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask, uint32_t *tlbncfg, uint32_t mmucfg) { #if !defined(CONFIG_USER_ONLY) const char *mas_names[8] = { "MAS0", "MAS1", "MAS2", "MAS3", "MAS4", "MAS5", "MAS6", "MAS7", }; int mas_sprn[8] = { SPR_BOOKE_MAS0, SPR_BOOKE_MAS1, SPR_BOOKE_MAS2, SPR_BOOKE_MAS3, SPR_BOOKE_MAS4, SPR_BOOKE_MAS5, SPR_BOOKE_MAS6, SPR_BOOKE_MAS7, }; int i; /* TLB assist registers */ /* XXX : not implemented */ for (i = 0; i < 8; i++) { if (mas_mask & (1 << i)) { spr_register(env, mas_sprn[i], mas_names[i], SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, (i == 2 && (env->insns_flags & PPC_64B)) ? &spr_write_generic : &spr_write_generic32, 0x00000000); } } if (env->nb_pids > 1) { /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID1, "PID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, 0x00000000); } if (env->nb_pids > 2) { /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID2, "PID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, 0x00000000); } spr_register(env, SPR_BOOKE_EPLC, "EPLC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_eplc, 0x00000000); spr_register(env, SPR_BOOKE_EPSC, "EPSC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_epsc, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MMUCFG, "MMUCFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, mmucfg); switch (env->nb_ways) { case 4: spr_register(env, SPR_BOOKE_TLB3CFG, "TLB3CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[3]); /* Fallthru */ case 3: spr_register(env, SPR_BOOKE_TLB2CFG, "TLB2CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[2]); /* Fallthru */ case 2: spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[1]); /* Fallthru */ case 1: spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[0]); /* Fallthru */ case 0: default: break; } #endif register_usprgh_sprs(env); }
ac559ecbea2649819e7b3fdd09f4e0243e0128db
https://github.com/qemu/qemu
1not_vulnerable
spapr: Set LPCR to current AIL mode when starting a new CPU TCG does not keep track of AIL mode in a central place, it's based on the current LPCR[AIL] bits. Synchronize the new CPU's LPCR to the current LPCR in rtas_start_cpu(), similarly to the way the ILE bit is synchronized. Open-code the ILE setting as well now that the caller's LPCR is available directly, there is no need for the indirection. Without this, under both TCG and KVM, adding a POWER8/9/10 class CPU with a new core ID after a modern Linux has booted results in the new CPU's LPCR missing the LPCR[AIL]=0b11 setting that the other CPUs have. This can cause crashes and unexpected behaviour. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Message-Id: <20210526091626.3388262-3-npiggin@gmail.com> Reviewed-by: Cédric Le Goater <clg@kaod.org> Reviewed-by: Greg Kurz <groug@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
static void rtas_start_cpu(PowerPCCPU *callcpu, SpaprMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { target_ulong id, start, r3; PowerPCCPU *newcpu; CPUPPCState *env; target_ulong lpcr; target_ulong caller_lpcr; if (nargs != 3 || nret != 1) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } id = rtas_ld(args, 0); start = rtas_ld(args, 1); r3 = rtas_ld(args, 2); newcpu = spapr_find_cpu(id); if (!newcpu) { /* Didn't find a matching cpu */ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } env = &newcpu->env; if (!CPU(newcpu)->halted) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } cpu_synchronize_state(CPU(newcpu)); env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME); hreg_compute_hflags(env); caller_lpcr = callcpu->env.spr[SPR_LPCR]; lpcr = env->spr[SPR_LPCR]; /* Set ILE the same way */ lpcr = (lpcr & ~LPCR_ILE) | (caller_lpcr & LPCR_ILE); /* Set AIL the same way */ lpcr = (lpcr & ~LPCR_AIL) | (caller_lpcr & LPCR_AIL); if (env->mmu_model == POWERPC_MMU_3_00) { /* * New cpus are expected to start in the same radix/hash mode * as the existing CPUs */ if (ppc64_v3_radix(callcpu)) { lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; } else { lpcr &= ~(LPCR_UPRT | LPCR_GTSE | LPCR_HR); } env->spr[SPR_PSSCR] &= ~PSSCR_EC; } ppc_store_lpcr(newcpu, lpcr); /* * Set the timebase offset of the new CPU to that of the invoking * CPU. This helps hotplugged CPU to have the correct timebase * offset. */ newcpu->env.tb_env->tb_offset = callcpu->env.tb_env->tb_offset; spapr_cpu_set_entry_state(newcpu, start, 0, r3, 0); qemu_cpu_kick(CPU(newcpu)); rtas_st(rets, 0, RTAS_OUT_SUCCESS); }
9f9f82dacebbb816c62d730658f14a615c3ea003
https://github.com/qemu/qemu
1not_vulnerable
spapr: nvdimm: Fix the persistent-memory root node name in device tree The FDT code is adding the pmem root node by name "persistent-memory" which should have been "ibm,persistent-memory". The linux fetches the device tree nodes by type and it has been working correctly as the type is correct. If someone searches by its intended name it would fail, so fix that. Reported-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com> Message-Id: <162204278956.219.9061511386011411578.stgit@cc493db1e665> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
int spapr_pmem_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, void *fdt, int *fdt_start_offset, Error **errp) { NVDIMMDevice *nvdimm = NVDIMM(drc->dev); *fdt_start_offset = spapr_dt_nvdimm(spapr, fdt, 0, nvdimm); return 0; }
3bf0844f3be77b24cc8f56fc8df9ff199f8324cb
https://github.com/qemu/qemu
1not_vulnerable
spapr: Don't hijack current_machine->boot_order QEMU 6.0 moved all the -boot variables to the machine. Especially, the removal of the boot_order static changed the handling of '-boot once' from: if (boot_once) { qemu_boot_set(boot_once, &error_fatal); qemu_register_reset(restore_boot_order, g_strdup(boot_order)); } to if (current_machine->boot_once) { qemu_boot_set(current_machine->boot_once, &error_fatal); qemu_register_reset(restore_boot_order, g_strdup(current_machine->boot_order)); } This means that we now register as subsequent boot order a copy of current_machine->boot_once that was just set with the previous call to qemu_boot_set(), i.e. we never transition away from the once boot order. It is certainly fragile^Wwrong for the spapr code to hijack a field of the base machine type object like that. The boot order rework simply turned this software boundary violation into an actual bug. Have the spapr code to handle that with its own field in SpaprMachineState. Also kfree() the initial boot device string when "once" was used. Fixes: 4b7acd2ac821 ("vl: clean up -boot variables") Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1960119 Cc: pbonzini@redhat.com Signed-off-by: Greg Kurz <groug@kaod.org> Message-Id: <20210521160735.1901914-1-groug@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) { MachineState *machine = MACHINE(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); int chosen; _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); if (reset) { const char *boot_device = spapr->boot_device; char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); size_t cb = 0; char *bootlist = get_boot_devices_list(&cb); if (machine->kernel_cmdline && machine->kernel_cmdline[0]) { _FDT(fdt_setprop_string(fdt, chosen, "bootargs", machine->kernel_cmdline)); } if (spapr->initrd_size) { _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start", spapr->initrd_base)); _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end", spapr->initrd_base + spapr->initrd_size)); } if (spapr->kernel_size) { uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr), cpu_to_be64(spapr->kernel_size) }; _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel", &kprop, sizeof(kprop))); if (spapr->kernel_le) { _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0)); } } if (boot_menu) { _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu))); } _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width)); _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height)); _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth)); if (cb && bootlist) { int i; for (i = 0; i < cb; i++) { if (bootlist[i] == '\n') { bootlist[i] = ' '; } } _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist)); } if (boot_device && strlen(boot_device)) { _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device)); } if (!spapr->has_graphics && stdout_path) { /* * "linux,stdout-path" and "stdout" properties are * deprecated by linux kernel. New platforms should only * use the "stdout-path" property. Set the new property * and continue using older property to remain compatible * with the existing firmware. */ _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path)); _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path)); } /* * We can deal with BAR reallocation just fine, advertise it * to the guest */ if (smc->linux_pci_probe) { _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0)); } spapr_dt_ov5_platform_support(spapr, fdt, chosen); g_free(stdout_path); g_free(bootlist); } _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5")); }
8146b357d0cb3a3f5d500a1536f9f0e1ff3302cc
https://github.com/qemu/qemu
1not_vulnerable
block-copy: fix block_copy_task_entry() progress update Don't report successful progress on failure, when call_state->ret is set. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210528141628.44287-2-vsementsov@virtuozzo.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static coroutine_fn int block_copy_task_entry(AioTask *task);
8081f064e404dd524b3c43248b2084dee9d32d7c
https://github.com/qemu/qemu
1not_vulnerable
block/vvfat: inherit child_vvfat_qcow from child_of_bds Recently we've fixed a crash by adding .get_parent_aio_context handler to child_vvfat_qcow. Now we want it to support .get_parent_desc as well. child_vvfat_qcow wants to implement own .inherit_options, it's not bad. But omitting all other handlers is a bad idea. Let's inherit the class from child_of_bds instead, similar to chain_child_class and detach_by_driver_cb_class in test-bdrv-drain.c. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210601075218.79249-5-vsementsov@virtuozzo.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static void vvfat_qcow_options(BdrvChildRole role, bool parent_is_format, int *child_flags, QDict *child_options, int parent_flags, QDict *parent_options) { qdict_set_default_str(child_options, BDRV_OPT_READ_ONLY, "off"); qdict_set_default_str(child_options, BDRV_OPT_AUTO_READ_ONLY, "off"); qdict_set_default_str(child_options, BDRV_OPT_CACHE_NO_FLUSH, "on"); }
fa95e9fbab2c19fc07ba82988b1690f8a6ff171b
https://github.com/qemu/qemu
1not_vulnerable
block/file-posix: Try other fallbacks after invalid FALLOC_FL_ZERO_RANGE If fallocate(... FALLOC_FL_ZERO_RANGE ...) returns EINVAL, it's likely an indication that the file system is buggy and does not implement unaligned accesses right. We still might be lucky with the other fallback fallocate() calls later in this function, though, so we should not return immediately and try the others first. Since FALLOC_FL_ZERO_RANGE could also return EINVAL if the file descriptor is not a regular file, we ignore this filesystem bug silently, without printing an error message for the user. Signed-off-by: Thomas Huth <thuth@redhat.com> Message-Id: <20210527172020.847617-3-thuth@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static int handle_aiocb_write_zeroes(void *opaque) { RawPosixAIOData *aiocb = opaque; #ifdef CONFIG_FALLOCATE BDRVRawState *s = aiocb->bs->opaque; int64_t len; #endif if (aiocb->aio_type & QEMU_AIO_BLKDEV) { return handle_aiocb_write_zeroes_block(aiocb); } #ifdef CONFIG_FALLOCATE_ZERO_RANGE if (s->has_write_zeroes) { int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == -ENOTSUP) { s->has_write_zeroes = false; } else if (ret == 0 || ret != -EINVAL) { return ret; } /* * Note: Some file systems do not like unaligned byte ranges, and * return EINVAL in such a case, though they should not do it according * to the man-page of fallocate(). Thus we simply ignore this return * value and try the other fallbacks instead. */ } #endif #ifdef CONFIG_FALLOCATE_PUNCH_HOLE if (s->has_discard && s->has_fallocate) { int ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0) { ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0 || ret != -ENOTSUP) { return ret; } s->has_fallocate = false; } else if (ret == -EINVAL) { /* * Some file systems like older versions of GPFS do not like un- * aligned byte ranges, and return EINVAL in such a case, though * they should not do it according to the man-page of fallocate(). * Warn about the bad filesystem and try the final fallback instead. */ warn_report_once("Your file system is misbehaving: " "fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. " "Please report this bug to your file sytem " "vendor."); } else if (ret != -ENOTSUP) { return ret; } else { s->has_discard = false; } } #endif #ifdef CONFIG_FALLOCATE /* Last resort: we are trying to extend the file with zeroed data. This * can be done via fallocate(fd, 0) */ len = bdrv_getlength(aiocb->bs); if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) { int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0 || ret != -ENOTSUP) { return ret; } s->has_fallocate = false; } #endif return -ENOTSUP; }
73ebf29729d1a40feaa9f8ab8951b6ee6dbfbede
https://github.com/qemu/qemu
1not_vulnerable
block/file-posix: Fix problem with fallocate(PUNCH_HOLE) on GPFS A customer reported that running qemu-img convert -t none -O qcow2 -f qcow2 input.qcow2 output.qcow2 fails for them with the following error message when the images are stored on a GPFS file system : qemu-img: error while writing sector 0: Invalid argument After analyzing the strace output, it seems like the problem is in handle_aiocb_write_zeroes(): The call to fallocate(FALLOC_FL_PUNCH_HOLE) returns EINVAL, which can apparently happen if the file system has a different idea of the granularity of the operation. It's arguably a bug in GPFS, since the PUNCH_HOLE mode should not result in EINVAL according to the man-page of fallocate(), but the file system is out there in production and so we have to deal with it. In commit 294682cc3a ("block: workaround for unaligned byte range in fallocate()") we also already applied the a work-around for the same problem to the earlier fallocate(FALLOC_FL_ZERO_RANGE) call, so do it now similar with the PUNCH_HOLE call. But instead of silently catching and returning -ENOTSUP (which causes the caller to fall back to writing zeroes), let's rather inform the user once about the buggy file system and try the other fallback instead. Signed-off-by: Thomas Huth <thuth@redhat.com> Message-Id: <20210527172020.847617-2-thuth@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static int handle_aiocb_write_zeroes(void *opaque) { RawPosixAIOData *aiocb = opaque; #ifdef CONFIG_FALLOCATE BDRVRawState *s = aiocb->bs->opaque; int64_t len; #endif if (aiocb->aio_type & QEMU_AIO_BLKDEV) { return handle_aiocb_write_zeroes_block(aiocb); } #ifdef CONFIG_FALLOCATE_ZERO_RANGE if (s->has_write_zeroes) { int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == -EINVAL) { /* * Allow falling back to pwrite for file systems that * do not support fallocate() for an unaligned byte range. */ return -ENOTSUP; } if (ret == 0 || ret != -ENOTSUP) { return ret; } s->has_write_zeroes = false; } #endif #ifdef CONFIG_FALLOCATE_PUNCH_HOLE if (s->has_discard && s->has_fallocate) { int ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0) { ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0 || ret != -ENOTSUP) { return ret; } s->has_fallocate = false; } else if (ret == -EINVAL) { /* * Some file systems like older versions of GPFS do not like un- * aligned byte ranges, and return EINVAL in such a case, though * they should not do it according to the man-page of fallocate(). * Warn about the bad filesystem and try the final fallback instead. */ warn_report_once("Your file system is misbehaving: " "fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. " "Please report this bug to your file sytem " "vendor."); } else if (ret != -ENOTSUP) { return ret; } else { s->has_discard = false; } } #endif #ifdef CONFIG_FALLOCATE /* Last resort: we are trying to extend the file with zeroed data. This * can be done via fallocate(fd, 0) */ len = bdrv_getlength(aiocb->bs); if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) { int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0 || ret != -ENOTSUP) { return ret; } s->has_fallocate = false; } #endif return -ENOTSUP; }
39df2c6d57b9eaa30d37a34b5a20cbc0474725c0
https://github.com/qemu/qemu
1not_vulnerable
block/vvfat: fix vvfat_child_perm crash It's wrong to rely on s->qcow in vvfat_child_perm, as on permission update during bdrv_open_child() call this field is not set yet. Still prior to aa5a04c7db27eea6b36de32f241b155f0d9ce34d, it didn't crash, as bdrv_open_child passed NULL as child to bdrv_child_perm(), and NULL was equal to NULL in assertion (still, it was bad guarantee for child being s->qcow, not backing :). Since aa5a04c7db27eea6b36de32f241b155f0d9ce34d "add bdrv_attach_child_noperm" bdrv_refresh_perms called on parent node when attaching child, and new correct child pointer is passed to .bdrv_child_perm. Still, s->qcow is NULL at the moment. Let's rely only on role instead. Without that fix, ./build/qemu-system-x86_64 -usb -device usb-storage,drive=fat16 \ -drive \ file=fat:rw:fat-type=16:"<path of a host folder>",id=fat16,format=raw,if=none crashes: (gdb) bt 0 raise () at /lib64/libc.so.6 1 abort () at /lib64/libc.so.6 2 _nl_load_domain.cold () at /lib64/libc.so.6 3 annobin_assert.c_end () at /lib64/libc.so.6 4 vvfat_child_perm (bs=0x559186f3d690, c=0x559186f1ed20, role=3, reopen_queue=0x0, perm=0, shared=31, nperm=0x7ffe56f28298, nshared=0x7ffe56f282a0) at ../block/vvfat.c:3214 5 bdrv_child_perm (bs=0x559186f3d690, child_bs=0x559186f60190, c=0x559186f1ed20, role=3, reopen_queue=0x0, parent_perm=0, parent_shared=31, nperm=0x7ffe56f28298, nshared=0x7ffe56f282a0) at ../block.c:2094 6 bdrv_node_refresh_perm (bs=0x559186f3d690, q=0x0, tran=0x559186f65850, errp=0x7ffe56f28530) at ../block.c:2336 7 bdrv_list_refresh_perms (list=0x559186db5b90 = {...}, q=0x0, tran=0x559186f65850, errp=0x7ffe56f28530) at ../block.c:2358 8 bdrv_refresh_perms (bs=0x559186f3d690, errp=0x7ffe56f28530) at ../block.c:2419 9 bdrv_attach_child (parent_bs=0x559186f3d690, child_bs=0x559186f60190, child_name=0x559184d83e3d "write-target", child_class=0x5591852f3b00 <child_vvfat_qcow>, child_role=3, errp=0x7ffe56f28530) at ../block.c:2959 10 bdrv_open_child (filename=0x559186f5cb80 "/var/tmp/vl.7WYmFU", options=0x559186f66c20, bdref_key=0x559184d83e3d "write-target", parent=0x559186f3d690, child_class=0x5591852f3b00 <child_vvfat_qcow>, child_role=3, allow_none=false, errp=0x7ffe56f28530) at ../block.c:3351 11 enable_write_target (bs=0x559186f3d690, errp=0x7ffe56f28530) at ../block/vvfat.c:3177 12 vvfat_open (bs=0x559186f3d690, options=0x559186f42db0, flags=155650, errp=0x7ffe56f28530) at ../block/vvfat.c:1236 13 bdrv_open_driver (bs=0x559186f3d690, drv=0x5591853d97e0 <bdrv_vvfat>, node_name=0x0, options=0x559186f42db0, open_flags=155650, errp=0x7ffe56f28640) at ../block.c:1557 14 bdrv_open_common (bs=0x559186f3d690, file=0x0, options=0x559186f42db0, errp=0x7ffe56f28640) at ../block.c:1833 ... (gdb) fr 4 #4 vvfat_child_perm (bs=0x559186f3d690, c=0x559186f1ed20, role=3, reopen_queue=0x0, perm=0, shared=31, nperm=0x7ffe56f28298, nshared=0x7ffe56f282a0) at ../block/vvfat.c:3214 3214 assert(c == s->qcow || (role & BDRV_CHILD_COW)); (gdb) p role $1 = 3 # BDRV_CHILD_DATA | BDRV_CHILD_METADATA (gdb) p *c $2 = {bs = 0x559186f60190, name = 0x559186f669d0 "write-target", klass = 0x5591852f3b00 <child_vvfat_qcow>, role = 3, opaque = 0x559186f3d690, perm = 3, shared_perm = 4, frozen = false, parent_quiesce_counter = 0, next = {le_next = 0x0, le_prev = 0x559186f41818}, next_parent = {le_next = 0x0, le_prev = 0x559186f64320}} (gdb) p s->qcow $3 = (BdrvChild *) 0x0 Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210524101257.119377-3-vsementsov@virtuozzo.com> Tested-by: John Arbuckle <programmingkidx@gmail.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static void vvfat_child_perm(BlockDriverState *bs, BdrvChild *c, BdrvChildRole role, BlockReopenQueue *reopen_queue, uint64_t perm, uint64_t shared, uint64_t *nperm, uint64_t *nshared) { if (role & BDRV_CHILD_DATA) { /* This is a private node, nobody should try to attach to it */ *nperm = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE; *nshared = BLK_PERM_WRITE_UNCHANGED; } else { assert(role & BDRV_CHILD_COW); /* The backing file is there so 'commit' can use it. vvfat doesn't * access it in any way. */ *nperm = 0; *nshared = BLK_PERM_ALL; } }
8eaf10187a2fd25aa27cb81b602815b07f9a7f89
https://github.com/qemu/qemu
1not_vulnerable
qemu-io-cmds: assert that we don't have .perm requested in no-blk case Coverity thinks blk may be NULL. It's a false-positive, as described in a new comment. Fixes: Coverity CID 1453194 Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210519090532.3753-1-vsementsov@virtuozzo.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static int command(BlockBackend *blk, const cmdinfo_t *ct, int argc, char **argv) { char *cmd = argv[0]; if (!init_check_command(blk, ct)) { return -EINVAL; } if (argc - 1 < ct->argmin || (ct->argmax != -1 && argc - 1 > ct->argmax)) { if (ct->argmax == -1) { fprintf(stderr, "bad argument count %d to %s, expected at least %d arguments\n", argc-1, cmd, ct->argmin); } else if (ct->argmin == ct->argmax) { fprintf(stderr, "bad argument count %d to %s, expected %d arguments\n", argc-1, cmd, ct->argmin); } else { fprintf(stderr, "bad argument count %d to %s, expected between %d and %d arguments\n", argc-1, cmd, ct->argmin, ct->argmax); } return -EINVAL; } /* * Request additional permissions if necessary for this command. The caller * is responsible for restoring the original permissions afterwards if this * is what it wants. * * Coverity thinks that blk may be NULL in the following if condition. It's * not so: in init_check_command() we fail if blk is NULL for command with * both CMD_FLAG_GLOBAL and CMD_NOFILE_OK flags unset. And in * qemuio_add_command() we assert that command with non-zero .perm field * doesn't set this flags. So, the following assertion is to silence * Coverity: */ assert(blk || !ct->perm); if (ct->perm && blk_is_available(blk)) { uint64_t orig_perm, orig_shared_perm; blk_get_perm(blk, &orig_perm, &orig_shared_perm); if (ct->perm & ~orig_perm) { uint64_t new_perm; Error *local_err = NULL; int ret; new_perm = orig_perm | ct->perm; ret = blk_set_perm(blk, new_perm, orig_shared_perm, &local_err); if (ret < 0) { error_report_err(local_err); return ret; } } } qemu_reset_optind(); return ct->cfunc(blk, argc, argv); }
ce7015d9e8669e2a45aba7a95fe6ef8a8f55bfe0
https://github.com/qemu/qemu
1not_vulnerable
hw/display/qxl: Set pci rom address aligned with page size On some MIPS system, page size is 16K, and qxl vga device can be used for VM in kvm mode. Qxl pci rom size is set 8K fixed, smaller than 16K page size on host system, it fails to be added into memslots in kvm mode where memory_size and GPA are required to align with page size. This patch fixes this issue. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Message-Id: <1621340448-31617-1-git-send-email-maobibo@loongson.cn> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static ram_addr_t qxl_rom_size(void) { #define QXL_REQUIRED_SZ (sizeof(QXLRom) + sizeof(QXLModes) + sizeof(qxl_modes)) #define QXL_ROM_SZ 8192 QEMU_BUILD_BUG_ON(QXL_REQUIRED_SZ > QXL_ROM_SZ); return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size); }
9f22893adcb02580aee5968f32baa2cd109b3ec2
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix OOB write in 'virgl_cmd_get_capset' (CVE-2021-3546) If 'virgl_cmd_get_capset' set 'max_size' to 0, the 'virgl_renderer_fill_caps' will write the data after the 'resp'. This patch avoid this by checking the returned 'max_size'. virtio-gpu fix: abd7f08b23 ("display: virtio-gpu-3d: check virgl capabilities max_size") Fixes: CVE-2021-3546 Reported-by: Li Qiang <liq3ea@163.com> Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org> Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-8-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void virgl_cmd_get_capset(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_get_capset gc; struct virtio_gpu_resp_capset *resp; uint32_t max_ver, max_size; VUGPU_FILL_CMD(gc); virgl_renderer_get_cap_set(gc.capset_id, &max_ver, &max_size); if (!max_size) { cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; return; } resp = g_malloc0(sizeof(*resp) + max_size); resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; virgl_renderer_fill_caps(gc.capset_id, gc.capset_version, (void *)resp->capset_data); vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); g_free(resp); }
63736af5a6571d9def93769431e0d7e38c6677bf
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix memory leak in 'virgl_resource_attach_backing' (CVE-2021-3544) If 'virgl_renderer_resource_attach_iov' failed, the 'res_iovs' will be leaked. Fixes: CVE-2021-3544 Reported-by: Li Qiang <liq3ea@163.com> virtio-gpu fix: 33243031da ("virtio-gpu-3d: fix memory leak in resource attach backing") Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-7-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void virgl_resource_attach_backing(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_resource_attach_backing att_rb; struct iovec *res_iovs; int ret; VUGPU_FILL_CMD(att_rb); ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs); if (ret != 0) { cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; return; } ret = virgl_renderer_resource_attach_iov(att_rb.resource_id, res_iovs, att_rb.nr_entries); if (ret != 0) { g_free(res_iovs); } }
f6091d86ba9ea05f4e111b9b42ee0005c37a6779
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix memory leak in 'virgl_cmd_resource_unref' (CVE-2021-3544) The 'res->iov' will be leaked if the guest trigger following sequences: virgl_cmd_create_resource_2d virgl_resource_attach_backing virgl_cmd_resource_unref This patch fixes this. Fixes: CVE-2021-3544 Reported-by: Li Qiang <liq3ea@163.com> virtio-gpu fix: 5e8e3c4c75 ("virtio-gpu: fix resource leak in virgl_cmd_resource_unref" Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-6-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void virgl_cmd_resource_unref(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_resource_unref unref; struct iovec *res_iovs = NULL; int num_iovs = 0; VUGPU_FILL_CMD(unref); virgl_renderer_resource_detach_iov(unref.resource_id, &res_iovs, &num_iovs); g_free(res_iovs); virgl_renderer_resource_unref(unref.resource_id); }
b7afebcf9e6ecf3cf9b5a9b9b731ed04bca6aa3e
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix memory leak while calling 'vg_resource_unref' (CVE-2021-3544) If the guest trigger following sequences, the attach_backing will be leaked: vg_resource_create_2d vg_resource_attach_backing vg_resource_unref This patch fix this by freeing 'res->iov' in vg_resource_destroy. Fixes: CVE-2021-3544 Reported-by: Li Qiang <liq3ea@163.com> virtio-gpu fix: 5e8e3c4c75 ("virtio-gpu: fix resource leak in virgl_cmd_resource_unref") Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org> Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-5-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void vg_resource_destroy(VuGpu *g, struct virtio_gpu_simple_resource *res) { int i; if (res->scanout_bitmask) { for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { if (res->scanout_bitmask & (1 << i)) { vg_disable_scanout(g, i); } } } vugbm_buffer_destroy(&res->buffer); g_free(res->iov); pixman_image_unref(res->image); QTAILQ_REMOVE(&g->reslist, res, next); g_free(res); }
b9f79858a614d95f5de875d0ca31096eaab72c3b
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix memory leak in vg_resource_attach_backing (CVE-2021-3544) Check whether the 'res' has already been attach_backing to avoid memory leak. Fixes: CVE-2021-3544 Reported-by: Li Qiang <liq3ea@163.com> virtio-gpu fix: 204f01b309 ("virtio-gpu: fix memory leak in resource attach backing") Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-4-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void vg_resource_attach_backing(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_attach_backing ab; int ret; VUGPU_FILL_CMD(ab); virtio_gpu_bswap_32(&ab, sizeof(ab)); res = virtio_gpu_find_resource(g, ab.resource_id); if (!res) { g_critical("%s: illegal resource specified %d", __func__, ab.resource_id); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } if (res->iov) { cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; return; } ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov); if (ret != 0) { cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; return; } res->iov_cnt = ab.nr_entries; }
86dd8fac2acc366930a5dc08d3fb1b1e816f4e1e
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix resource leak in 'vg_resource_create_2d' (CVE-2021-3544) Call 'vugbm_buffer_destroy' in error path to avoid resource leak. Fixes: CVE-2021-3544 Reported-by: Li Qiang <liq3ea@163.com> Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org> Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-3-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void vg_resource_create_2d(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { pixman_format_code_t pformat; struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_create_2d c2d; VUGPU_FILL_CMD(c2d); virtio_gpu_bswap_32(&c2d, sizeof(c2d)); if (c2d.resource_id == 0) { g_critical("%s: resource id 0 is not allowed", __func__); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } res = virtio_gpu_find_resource(g, c2d.resource_id); if (res) { g_critical("%s: resource already exists %d", __func__, c2d.resource_id); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } res = g_new0(struct virtio_gpu_simple_resource, 1); res->width = c2d.width; res->height = c2d.height; res->format = c2d.format; res->resource_id = c2d.resource_id; pformat = virtio_gpu_get_pixman_format(c2d.format); if (!pformat) { g_critical("%s: host couldn't handle guest format %d", __func__, c2d.format); g_free(res); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; return; } vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height); res->image = pixman_image_create_bits(pformat, c2d.width, c2d.height, (uint32_t *)res->buffer.mmap, res->buffer.stride); if (!res->image) { g_critical("%s: resource creation failed %d %d %d", __func__, c2d.resource_id, c2d.width, c2d.height); g_free(res); vugbm_buffer_destroy(&res->buffer); cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; return; } QTAILQ_INSERT_HEAD(&g->reslist, res, next); }
121841b25d72d13f8cad554363138c360f1250ea
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix memory disclosure in virgl_cmd_get_capset_info (CVE-2021-3545) Otherwise some of the 'resp' will be leaked to guest. Fixes: CVE-2021-3545 Reported-by: Li Qiang <liq3ea@163.com> virtio-gpu fix: 42a8dadc74 ("virtio-gpu: fix information leak in getting capset info dispatch") Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-2-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void virgl_cmd_get_capset_info(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_get_capset_info info; struct virtio_gpu_resp_capset_info resp; VUGPU_FILL_CMD(info); memset(&resp, 0, sizeof(resp)); if (info.capset_index == 0) { resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; virgl_renderer_get_cap_set(resp.capset_id, &resp.capset_max_version, &resp.capset_max_size); } else if (info.capset_index == 1) { resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; virgl_renderer_get_cap_set(resp.capset_id, &resp.capset_max_version, &resp.capset_max_size); } else { resp.capset_max_version = 0; resp.capset_max_size = 0; } resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); }
1e157667d7657418b68fadb5cc016c6804e17501
https://github.com/qemu/qemu
1not_vulnerable
virtio-net: failover: add missing remove_migration_state_change_notifier() In the failover case configuration, virtio_net_device_realize() uses an add_migration_state_change_notifier() to add a state notifier, but this notifier is not removed by the unrealize function when the virtio-net card is unplugged. If the card is unplugged and a migration is started, the notifier is called and as it is not valid anymore QEMU crashes. This patch fixes the problem by adding the remove_migration_state_change_notifier() in virtio_net_device_unrealize(). The problem can be reproduced with: $ qemu-system-x86_64 -enable-kvm -m 1g -M q35 \ -device pcie-root-port,slot=4,id=root1 \ -device pcie-root-port,slot=5,id=root2 \ -device virtio-net-pci,id=net1,mac=52:54:00:6f:55:cc,failover=on,bus=root1 \ -monitor stdio disk.qcow2 (qemu) device_del net1 (qemu) migrate "exec:gzip -c > STATEFILE.gz" Thread 1 "qemu-system-x86" received signal SIGSEGV, Segmentation fault. 0x0000000000000000 in ?? () (gdb) bt #0 0x0000000000000000 in () #1 0x0000555555d726d7 in notifier_list_notify (...) at .../util/notify.c:39 #2 0x0000555555842c1a in migrate_fd_connect (...) at .../migration/migration.c:3975 #3 0x0000555555950f7d in migration_channel_connect (...) error@entry=0x0) at .../migration/channel.c:107 #4 0x0000555555910922 in exec_start_outgoing_migration (...) at .../migration/exec.c:42 Reported-by: Igor Mammedov <imammedo@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Laurent Vivier <lvivier@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void virtio_net_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIONet *n = VIRTIO_NET(dev); int i, max_queues; /* This will stop vhost backend if appropriate. */ virtio_net_set_status(vdev, 0); g_free(n->netclient_name); n->netclient_name = NULL; g_free(n->netclient_type); n->netclient_type = NULL; g_free(n->mac_table.macs); g_free(n->vlans); if (n->failover) { device_listener_unregister(&n->primary_listener); remove_migration_state_change_notifier(&n->migration_state); } max_queues = n->multiqueue ? n->max_queues : 1; for (i = 0; i < max_queues; i++) { virtio_net_del_queue(n, i); } /* delete also control vq */ virtio_del_queue(vdev, max_queues * 2); qemu_announce_timer_del(&n->announce_timer, false); g_free(n->vqs); qemu_del_nic(n->nic); virtio_net_rsc_cleanup(n); g_free(n->rss_data.indirections_table); net_rx_pkt_uninit(n->rx_pkt); virtio_cleanup(vdev); }
4e812d2338acb354b969b59f792f413f567c0ace
https://github.com/qemu/qemu
1not_vulnerable
migration/rdma: cleanup rdma in rdma_start_incoming_migration error path the error path after calling qemu_rdma_dest_init() should do rdma cleanup Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com> Message-Id: <20210520081148.17001-1-lizhijian@cn.fujitsu.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
void rdma_start_incoming_migration(const char *host_port, Error **errp) { int ret; RDMAContext *rdma, *rdma_return_path = NULL; Error *local_err = NULL; trace_rdma_start_incoming_migration(); /* Avoid ram_block_discard_disable(), cannot change during migration. */ if (ram_block_discard_is_required()) { error_setg(errp, "RDMA: cannot disable RAM discard"); return; } rdma = qemu_rdma_data_init(host_port, &local_err); if (rdma == NULL) { goto err; } ret = qemu_rdma_dest_init(rdma, &local_err); if (ret) { goto err; } trace_rdma_start_incoming_migration_after_dest_init(); ret = rdma_listen(rdma->listen_id, 5); if (ret) { ERROR(errp, "listening on socket!"); goto cleanup_rdma; } trace_rdma_start_incoming_migration_after_rdma_listen(); /* initialize the RDMAContext for return path */ if (migrate_postcopy()) { rdma_return_path = qemu_rdma_data_init(host_port, &local_err); if (rdma_return_path == NULL) { goto cleanup_rdma; } qemu_rdma_return_path_dest_init(rdma_return_path, rdma); } qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration, NULL, (void *)(intptr_t)rdma); return; cleanup_rdma: qemu_rdma_cleanup(rdma); err: error_propagate(errp, local_err); if (rdma) { g_free(rdma->host); } g_free(rdma); g_free(rdma_return_path); }
efb208dc9c3f1e881aecff21fb1c7a7b6b869480
https://github.com/qemu/qemu
1not_vulnerable
migration/rdma: Fix cm_event used before being initialized A segmentation fault was triggered when i try to abort a postcopy + rdma migration. since rdma_ack_cm_event releases a uninitialized cm_event in these case. like below: 2496 ret = rdma_get_cm_event(rdma->channel, &cm_event); 2497 if (ret) { 2498 perror("rdma_get_cm_event after rdma_connect"); 2499 ERROR(errp, "connecting to destination!"); 2500 rdma_ack_cm_event(cm_event); <<<< cause segmentation fault 2501 goto err_rdma_source_connect; 2502 } Refer to the rdma_get_cm_event() code, cm_event will be updated/changed only if rdma_get_cm_event() returns 0. So it's okey to remove the ack in error patch. Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com> Message-Id: <20210519064740.10828-1-lizhijian@cn.fujitsu.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
static int qemu_rdma_connect(RDMAContext *rdma, Error **errp) { RDMACapabilities cap = { .version = RDMA_CONTROL_VERSION_CURRENT, .flags = 0, }; struct rdma_conn_param conn_param = { .initiator_depth = 2, .retry_count = 5, .private_data = &cap, .private_data_len = sizeof(cap), }; struct rdma_cm_event *cm_event; int ret; /* * Only negotiate the capability with destination if the user * on the source first requested the capability. */ if (rdma->pin_all) { trace_qemu_rdma_connect_pin_all_requested(); cap.flags |= RDMA_CAPABILITY_PIN_ALL; } caps_to_network(&cap); ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); if (ret) { ERROR(errp, "posting second control recv"); goto err_rdma_source_connect; } ret = rdma_connect(rdma->cm_id, &conn_param); if (ret) { perror("rdma_connect"); ERROR(errp, "connecting to destination!"); goto err_rdma_source_connect; } ret = rdma_get_cm_event(rdma->channel, &cm_event); if (ret) { perror("rdma_get_cm_event after rdma_connect"); ERROR(errp, "connecting to destination!"); goto err_rdma_source_connect; } if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) { perror("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect"); ERROR(errp, "connecting to destination!"); rdma_ack_cm_event(cm_event); goto err_rdma_source_connect; } rdma->connected = true; memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap)); network_to_caps(&cap); /* * Verify that the *requested* capabilities are supported by the destination * and disable them otherwise. */ if (rdma->pin_all && !(cap.flags & RDMA_CAPABILITY_PIN_ALL)) { ERROR(errp, "Server cannot support pinning all memory. " "Will register memory dynamically."); rdma->pin_all = false; } trace_qemu_rdma_connect_pin_all_outcome(rdma->pin_all); rdma_ack_cm_event(cm_event); rdma->control_ready_expected = 1; rdma->nb_sent = 0; return 0; err_rdma_source_connect: qemu_rdma_cleanup(rdma); return -1; }
c53cd04e70641fdf9410aac40c617d074047b3e1
https://github.com/qemu/qemu
1not_vulnerable
hmp: Fix loadvm to resume the VM on success instead of failure Commit f61fe11aa6f broke hmp_loadvm() by adding an incorrect negation when converting from 0/-errno return values to a bool value. The result is that loadvm resumes the VM now if it failed and keeps it stopped if it failed. Fix it to restore the old behaviour and do it the other way around. Fixes: f61fe11aa6f7f8f0ffe4ddaa56a8108f3ab57854 Cc: qemu-stable@nongnu.org Reported-by: Yanhui Ma <yama@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20210511163151.45167-1-kwolf@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
void hmp_loadvm(Monitor *mon, const QDict *qdict) { int saved_vm_running = runstate_is_running(); const char *name = qdict_get_str(qdict, "name"); Error *err = NULL; vm_stop(RUN_STATE_RESTORE_VM); if (load_snapshot(name, NULL, false, NULL, &err) && saved_vm_running) { vm_start(); } hmp_handle_error(mon, err); }
b802d14dc6f3fba988baa9804af8f4cf837c6886
https://github.com/qemu/qemu
1not_vulnerable
hw/scsi: Fix sector translation bug in scsi_unmap_complete_noio check_lba_range expects sectors to be expressed in original qdev blocksize, but scsi_unmap_complete_noio was translating them to 512 block sizes, which was causing sense errors in the larger LBAs in devices using a 4k block size. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/345 Signed-off-by: Kit Westneat <kit.westneat@gmail.com> Message-Id: <20210521142829.326217-1-kit.westneat@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) { SCSIDiskReq *r = data->r; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); assert(r->req.aiocb == NULL); if (data->count > 0) { uint64_t sector_num = ldq_be_p(&data->inbuf[0]); uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE); r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); if (!check_lba_range(s, sector_num, nb_sectors)) { block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); goto done; } block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, r->sector_count * BDRV_SECTOR_SIZE, BLOCK_ACCT_UNMAP); r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, r->sector * BDRV_SECTOR_SIZE, r->sector_count * BDRV_SECTOR_SIZE, scsi_unmap_complete, data); data->count--; data->inbuf += 16; return; } scsi_req_complete(&r->req, GOOD); done: scsi_req_unref(&r->req); g_free(data); }
d349f92f78d26db2805ca39a7745cc70affea021
https://github.com/qemu/qemu
1not_vulnerable
vl: allow not specifying size in -m when using -M memory-backend Starting in QEMU 6.0's commit f5c9fcb82d ("vl: separate qemu_create_machine", 2020-12-10), a function have_custom_ram_size() replaced the return value of set_memory_options(). The purpose of the return value was to record the presence of "-m size", and if it was not there, change the default RAM size to the size of the memory backend passed with "-M memory-backend". With that commit, however, have_custom_ram_size() is now queried only after set_memory_options has stored the fixed-up RAM size in QemuOpts for "future use". This was actually the only future use of the fixed-up RAM size, so remove that code and fix the bug. Cc: qemu-stable@nongnu.org Fixes: f5c9fcb82d ("vl: separate qemu_create_machine", 2020-12-10) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void set_memory_options(MachineClass *mc) { uint64_t sz; const char *mem_str; const ram_addr_t default_ram_size = mc->default_ram_size; QemuOpts *opts = qemu_find_opts_singleton("memory"); Location loc; loc_push_none(&loc); qemu_opts_loc_restore(opts); sz = 0; mem_str = qemu_opt_get(opts, "size"); if (mem_str) { if (!*mem_str) { error_report("missing 'size' option value"); exit(EXIT_FAILURE); } sz = qemu_opt_get_size(opts, "size", ram_size); /* Fix up legacy suffix-less format */ if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) { uint64_t overflow_check = sz; sz *= MiB; if (sz / MiB != overflow_check) { error_report("too large 'size' option value"); exit(EXIT_FAILURE); } } } /* backward compatibility behaviour for case "-m 0" */ if (sz == 0) { sz = default_ram_size; } sz = QEMU_ALIGN_UP(sz, 8192); if (mc->fixup_ram_size) { sz = mc->fixup_ram_size(sz); } ram_size = sz; if (ram_size != sz) { error_report("ram size too large"); exit(EXIT_FAILURE); } maxram_size = ram_size; if (qemu_opt_get(opts, "maxmem")) { uint64_t slots; sz = qemu_opt_get_size(opts, "maxmem", 0); slots = qemu_opt_get_number(opts, "slots", 0); if (sz < ram_size) { error_report("invalid value of -m option maxmem: " "maximum memory size (0x%" PRIx64 ") must be at least " "the initial memory size (0x" RAM_ADDR_FMT ")", sz, ram_size); exit(EXIT_FAILURE); } else if (slots && sz == ram_size) { error_report("invalid value of -m option maxmem: " "memory slots were specified but maximum memory size " "(0x%" PRIx64 ") is equal to the initial memory size " "(0x" RAM_ADDR_FMT ")", sz, ram_size); exit(EXIT_FAILURE); } maxram_size = sz; ram_slots = slots; } else if (qemu_opt_get(opts, "slots")) { error_report("invalid -m option value: missing 'maxmem' option"); exit(EXIT_FAILURE); } loc_pop(&loc); }
a81a592698c5cf3d84486b00c84b7979dfa0a3da
https://github.com/qemu/qemu
1not_vulnerable
KVM: Disable manual dirty log when dirty ring enabled KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is for KVM_CLEAR_DIRTY_LOG, which is only useful for KVM_GET_DIRTY_LOG. Skip enabling it for kvm dirty ring. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will not wr-protect all the pages initially, which is against how kvm dirty ring is used - there's no way for kvm dirty ring to re-protect a page before it's notified as being written first with a GFN entry in the ring! So when KVM_DIRTY_LOG_INITIALLY_SET is enabled with dirty ring, we'll see silent data loss after migration. Signed-off-by: Peter Xu <peterx@redhat.com> Message-Id: <20210506160549.130416-10-peterx@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static int kvm_init(MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); static const char upgrade_note[] = "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" "(see http://sourceforge.net/projects/kvm).\n"; struct { const char *name; int num; } num_cpus[] = { { "SMP", ms->smp.cpus }, { "hotpluggable", ms->smp.max_cpus }, { NULL, } }, *nc = num_cpus; int soft_vcpus_limit, hard_vcpus_limit; KVMState *s; const KVMCapabilityInfo *missing_cap; int ret; int type = 0; uint64_t dirty_log_manual_caps; qemu_mutex_init(&kml_slots_lock); s = KVM_STATE(ms->accelerator); /* * On systems where the kernel can support different base page * sizes, host page size may be different from TARGET_PAGE_SIZE, * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum * page size for the system though. */ assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size); s->sigmask_len = 8; #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif QLIST_INIT(&s->kvm_parked_vcpus); s->fd = qemu_open_old("/dev/kvm", O_RDWR); if (s->fd == -1) { fprintf(stderr, "Could not access KVM kernel module: %m\n"); ret = -errno; goto err; } ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { if (ret >= 0) { ret = -EINVAL; } fprintf(stderr, "kvm version too old\n"); goto err; } if (ret > KVM_API_VERSION) { ret = -EINVAL; fprintf(stderr, "kvm version not supported\n"); goto err; } kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT); s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); /* If unspecified, use the default value */ if (!s->nr_slots) { s->nr_slots = 32; } s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); if (s->nr_as <= 1) { s->nr_as = 1; } s->as = g_new0(struct KVMAs, s->nr_as); if (object_property_find(OBJECT(current_machine), "kvm-type")) { g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), "kvm-type", &error_abort); type = mc->kvm_type(ms, kvm_type); } else if (mc->kvm_type) { type = mc->kvm_type(ms, NULL); } do { ret = kvm_ioctl(s, KVM_CREATE_VM, type); } while (ret == -EINTR); if (ret < 0) { fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret, strerror(-ret)); #ifdef TARGET_S390X if (ret == -EINVAL) { fprintf(stderr, "Host kernel setup problem detected. Please verify:\n"); fprintf(stderr, "- for kernels supporting the switch_amode or" " user_mode parameters, whether\n"); fprintf(stderr, " user space is running in primary address space\n"); fprintf(stderr, "- for kernels supporting the vm.allocate_pgste sysctl, " "whether it is enabled\n"); } #endif goto err; } s->vmfd = ret; /* check the vcpu limits */ soft_vcpus_limit = kvm_recommended_vcpus(s); hard_vcpus_limit = kvm_max_vcpus(s); while (nc->name) { if (nc->num > soft_vcpus_limit) { warn_report("Number of %s cpus requested (%d) exceeds " "the recommended cpus supported by KVM (%d)", nc->name, nc->num, soft_vcpus_limit); if (nc->num > hard_vcpus_limit) { fprintf(stderr, "Number of %s cpus requested (%d) exceeds " "the maximum cpus supported by KVM (%d)\n", nc->name, nc->num, hard_vcpus_limit); exit(1); } } nc++; } missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); if (!missing_cap) { missing_cap = kvm_check_extension_list(s, kvm_arch_required_capabilities); } if (missing_cap) { ret = -EINVAL; fprintf(stderr, "kvm does not support %s\n%s", missing_cap->name, upgrade_note); goto err; } s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); s->coalesced_pio = s->coalesced_mmio && kvm_check_extension(s, KVM_CAP_COALESCED_PIO); /* * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no * page is wr-protected initially, which is against how kvm dirty ring is * usage - kvm dirty ring requires all pages are wr-protected at the very * beginning. Enabling this feature for dirty ring causes data corruption. */ if (!s->kvm_dirty_ring_size) { dirty_log_manual_caps = kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET); s->manual_dirty_log_protect = dirty_log_manual_caps; if (dirty_log_manual_caps) { ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, dirty_log_manual_caps); if (ret) { warn_report("Trying to enable capability %"PRIu64" of " "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. " "Falling back to the legacy mode. ", dirty_log_manual_caps); s->manual_dirty_log_protect = 0; } } } #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif s->robust_singlestep = kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); #ifdef KVM_CAP_DEBUGREGS s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); #endif s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); #ifdef KVM_CAP_IRQ_ROUTING kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); #endif s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); s->irq_set_ioctl = KVM_IRQ_LINE; if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; } kvm_readonly_mem_allowed = (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); kvm_eventfds_allowed = (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0); kvm_irqfds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD) > 0); kvm_resamplefds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); kvm_vm_attributes_allowed = (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); kvm_ioeventfd_any_length_allowed = (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0); kvm_state = s; ret = kvm_arch_init(ms, s); if (ret < 0) { goto err; } if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; } qemu_register_reset(kvm_unpoison_all, NULL); if (s->kernel_irqchip_allowed) { kvm_irqchip_create(s); } if (kvm_eventfds_allowed) { s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; } s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; kvm_memory_listener_register(s, &s->memory_listener, &address_space_memory, 0); if (kvm_eventfds_allowed) { memory_listener_register(&kvm_io_listener, &address_space_io); } memory_listener_register(&kvm_coalesced_pio_listener, &address_space_io); s->many_ioeventfds = kvm_check_many_ioeventfds(); s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); if (!s->sync_mmu) { ret = ram_block_discard_disable(true); assert(!ret); } return 0; err: assert(ret < 0); if (s->vmfd >= 0) { close(s->vmfd); } if (s->fd != -1) { close(s->fd); } g_free(s->memory_listener.slots); return ret; }
7704bb02dd73070b218fb091cdda79679dab2b8f
https://github.com/qemu/qemu
1not_vulnerable
ps2: don't raise an interrupt if queue is full ps2_queue() behaves differently than the very similar functions ps2_queue_2() to ps2_queue_4(). The first one calls update_irq() even if the queue is full, the others don't. Change ps2_queue() to be consistent with the others. Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Volker Rümelin <vr_qemu@t-online.de> Message-Id: <20210525181441.27768-2-vr_qemu@t-online.de> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
void ps2_raise_irq(PS2State *s) { s->update_irq(s->update_arg, 1); }
76968101f549fb6bb51b4bdea65e8a48307c765d
https://github.com/qemu/qemu
1not_vulnerable
ps2: fix mouse stream corruption Commit 7abe7eb294 "ps2: Fix mouse stream corruption due to lost data" added code to avoid mouse stream corruptions but the calculation of the needed free queue size was wrong. Fix this. To reproduce, open a text file with the vim 7.3 32 bit for DOS exe- cutable in a FreeDOS client started with -display sdl and move the mouse around for a few seconds. You will quickly see erratic mouse movements and unexpected mouse clicks. CuteMouse (ctmouse.exe) in FreeDOS doesn't try to re-sync the mouse stream. Fixes: 7abe7eb294 ("ps2: Fix mouse stream corruption due to lost data") Signed-off-by: Volker Rümelin <vr_qemu@t-online.de> Message-Id: <20210525181441.27768-1-vr_qemu@t-online.de> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
void ps2_keyboard_set_translation(void *opaque, int mode) { PS2KbdState *s = (PS2KbdState *)opaque; trace_ps2_keyboard_set_translation(opaque, mode); s->translate = mode; }
64ea60869be0fc80e32055912fe3c1a55290231c
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Fix decode for VDOT (indexed) We were extracting the M register twice, once incorrectly as M:vm and once correctly as rm. Remove the incorrect name and remove the incorrect decode. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210525010358.152808-87-richard.henderson@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a) { gen_helper_gvec_4 *fn_gvec; int opr_sz; if (!dc_isar_feature(aa32_dp, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vn) & 0x10)) { return false; } if ((a->vd | a->vn) & a->q) { return false; } if (!vfp_access_check(s)) { return true; } fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b; opr_sz = (1 + a->q) * 8; tcg_gen_gvec_4_ool(vfp_reg_offset(1, a->vd), vfp_reg_offset(1, a->vn), vfp_reg_offset(1, a->vm), vfp_reg_offset(1, a->vd), opr_sz, opr_sz, a->index, fn_gvec); return true; }
382c7160d1cd9e815fb94d3889a5ddcf0e1845ab
https://github.com/qemu/qemu
1not_vulnerable
hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic In icc_eoir_write() we assume that we can identify the group of the IRQ being completed based purely on which register is being written to and the current CPU state, and that "CPU state matches group indicated by register" is the only necessary access check. This isn't correct: if the CPU is not in Secure state then EOIR1 will only complete Group 1 NS IRQs, but if the CPU is in EL3 it can complete both Group 1 S and Group 1 NS IRQs. (The pseudocode ICC_EOIR1_EL1 makes this clear.) We were also missing the logic to prevent EOIR0 writes completing G0 IRQs when they should not. Rearrange the logic to first identify the group of the current highest priority interrupt and then look at whether we should complete it or ignore the access based on which register was accessed and the state of the CPU. The resulting behavioural change is: * EL3 can now complete G1NS interrupts * G0 interrupt completion is now ignored if the GIC and the CPU have the security extension enabled and the CPU is not secure Reported-by: Chan Kim <ckim@etri.re.kr> Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210510150016.24910-1-peter.maydell@linaro.org
static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* End of Interrupt */ GICv3CPUState *cs = icc_cs_from_env(env); int irq = value & 0xffffff; int grp; bool is_eoir0 = ri->crm == 8; if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) { icv_eoir_write(env, ri, value); return; } trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1, gicv3_redist_affid(cs), value); if (irq >= cs->gic->num_irq) { /* This handles two cases: * 1. If software writes the ID of a spurious interrupt [ie 1020-1023] * to the GICC_EOIR, the GIC ignores that write. * 2. If software writes the number of a non-existent interrupt * this must be a subcase of "value written does not match the last * valid interrupt value read from the Interrupt Acknowledge * register" and so this is UNPREDICTABLE. We choose to ignore it. */ return; } grp = icc_highest_active_group(cs); switch (grp) { case GICV3_G0: if (!is_eoir0) { return; } if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) { return; } break; case GICV3_G1: if (is_eoir0) { return; } if (!arm_is_secure(env)) { return; } break; case GICV3_G1NS: if (is_eoir0) { return; } if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { return; } break; default: g_assert_not_reached(); } icc_drop_prio(cs, grp); if (!icc_eoi_split(env, cs)) { /* Priority drop and deactivate not split: deactivate irq now */ icc_deactivate_irq(cs, irq); } }
219729cfbf9e979020bffedac6a790144173ec62
https://github.com/qemu/qemu
1not_vulnerable
hw/arm/smmuv3: Another range invalidation fix 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range") failed to completely fix misalignment issues with range invalidation. For instance invalidations patterns like "invalidate 32 4kB pages starting from 0xff395000 are not correctly handled" due to the fact the previous fix only made sure the number of invalidated pages were a power of 2 but did not properly handle the start address was not aligned with the range. This can be noticed when boothing a fedora 33 with protected virtio-blk-pci. Signed-off-by: Eric Auger <eric.auger@redhat.com> Fixes: 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range") Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, uint8_t tg, uint64_t num_pages) { SMMUDevice *sdev; QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { IOMMUMemoryRegion *mr = &sdev->iommu; IOMMUNotifier *n; trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova, tg, num_pages); IOMMU_NOTIFIER_FOREACH(n, mr) { smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages); } } }
fb74a286feaa4ec2cdcda61ba570244464581ca7
https://github.com/qemu/qemu
1not_vulnerable
coroutine-sleep: disallow NULL QemuCoSleepState** argument Simplify the code by removing conditionals. qemu_co_sleep_ns can simply point the argument to an on-stack temporary. Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-id: 20210517100548.28806-3-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
void qemu_co_sleep_wake(QemuCoSleepState *sleep_state) { /* Write of schedule protected by barrier write in aio_co_schedule */ const char *scheduled = qatomic_cmpxchg(&sleep_state->co->scheduled, qemu_co_sleep_ns__scheduled, NULL); assert(scheduled == qemu_co_sleep_ns__scheduled); *sleep_state->user_state_pointer = NULL; timer_del(&sleep_state->ts); aio_co_wake(sleep_state->co); }

Dataset Card for "vulnerable-functions"

More Information needed

Downloads last month
62
Edit dataset card