sha
stringlengths
40
40
remote_url
stringclasses
3 values
labels
class label
2 classes
commit_msg
stringlengths
16
10.3k
function
stringlengths
30
17.6k
846feac2ae1d1dab08c0048807ce802a256179fd
https://github.com/qemu/qemu
1not_vulnerable
hw/m68k/q800: fix PROM checksum and MAC address storage The checksum used by MacOS to validate the PROM content is an exclusive-OR rather than a sum over the corresponding bytes. In addition the MAC address must be stored in bit-reversed format as indicated in comments in Linux's macsonic.c. With the PROM contents fixed MacOS starts to probe the device registers when AppleTalk is enabled in the Control Panel. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Tested-by: Finn Thain <fthain@linux-m68k.org> Message-Id: <20210625065401.30170-8-mark.cave-ayland@ilande.co.uk> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
static void q800_init(MachineState *machine) { M68kCPU *cpu = NULL; int linux_boot; int32_t kernel_size; uint64_t elf_entry; char *filename; int bios_size; ram_addr_t initrd_base; int32_t initrd_size; MemoryRegion *rom; MemoryRegion *io; MemoryRegion *dp8393x_prom = g_new(MemoryRegion, 1); uint8_t *prom; const int io_slice_nb = (IO_SIZE / IO_SLICE) - 1; int i, checksum; ram_addr_t ram_size = machine->ram_size; const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *bios_name = machine->firmware ?: MACROM_FILENAME; hwaddr parameters_base; CPUState *cs; DeviceState *dev; DeviceState *via_dev; DeviceState *escc_orgate; SysBusESPState *sysbus_esp; ESPState *esp; SysBusDevice *sysbus; BusState *adb_bus; NubusBus *nubus; DeviceState *glue; DriveInfo *dinfo; linux_boot = (kernel_filename != NULL); if (ram_size > 1 * GiB) { error_report("Too much memory for this machine: %" PRId64 " MiB, " "maximum 1024 MiB", ram_size / MiB); exit(1); } /* init CPUs */ cpu = M68K_CPU(cpu_create(machine->cpu_type)); qemu_register_reset(main_cpu_reset, cpu); /* RAM */ memory_region_add_subregion(get_system_memory(), 0, machine->ram); /* * Memory from IO_BASE to IO_BASE + IO_SLICE is repeated * from IO_BASE + IO_SLICE to IO_BASE + IO_SIZE */ io = g_new(MemoryRegion, io_slice_nb); for (i = 0; i < io_slice_nb; i++) { char *name = g_strdup_printf("mac_m68k.io[%d]", i + 1); memory_region_init_alias(&io[i], NULL, name, get_system_memory(), IO_BASE, IO_SLICE); memory_region_add_subregion(get_system_memory(), IO_BASE + (i + 1) * IO_SLICE, &io[i]); g_free(name); } /* IRQ Glue */ glue = qdev_new(TYPE_GLUE); object_property_set_link(OBJECT(glue), "cpu", OBJECT(cpu), &error_abort); sysbus_realize_and_unref(SYS_BUS_DEVICE(glue), &error_fatal); /* VIA */ via_dev = qdev_new(TYPE_MAC_VIA); dinfo = drive_get(IF_MTD, 0, 0); if (dinfo) { qdev_prop_set_drive(via_dev, "drive", blk_by_legacy_dinfo(dinfo)); } sysbus = SYS_BUS_DEVICE(via_dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_mmio_map(sysbus, 0, VIA_BASE); qdev_connect_gpio_out_named(DEVICE(sysbus), "irq", 0, qdev_get_gpio_in(glue, 0)); qdev_connect_gpio_out_named(DEVICE(sysbus), "irq", 1, qdev_get_gpio_in(glue, 1)); adb_bus = qdev_get_child_bus(via_dev, "adb.0"); dev = qdev_new(TYPE_ADB_KEYBOARD); qdev_realize_and_unref(dev, adb_bus, &error_fatal); dev = qdev_new(TYPE_ADB_MOUSE); qdev_realize_and_unref(dev, adb_bus, &error_fatal); /* MACSONIC */ if (nb_nics > 1) { error_report("q800 can only have one ethernet interface"); exit(1); } qemu_check_nic_model(&nd_table[0], "dp83932"); /* * MacSonic driver needs an Apple MAC address * Valid prefix are: * 00:05:02 Apple * 00:80:19 Dayna Communications, Inc. * 00:A0:40 Apple * 08:00:07 Apple * (Q800 use the last one) */ nd_table[0].macaddr.a[0] = 0x08; nd_table[0].macaddr.a[1] = 0x00; nd_table[0].macaddr.a[2] = 0x07; dev = qdev_new("dp8393x"); qdev_set_nic_properties(dev, &nd_table[0]); qdev_prop_set_uint8(dev, "it_shift", 2); qdev_prop_set_bit(dev, "big_endian", true); object_property_set_link(OBJECT(dev), "dma_mr", OBJECT(get_system_memory()), &error_abort); sysbus = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_mmio_map(sysbus, 0, SONIC_BASE); sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, 2)); memory_region_init_rom(dp8393x_prom, NULL, "dp8393x-q800.prom", SONIC_PROM_SIZE, &error_fatal); memory_region_add_subregion(get_system_memory(), SONIC_PROM_BASE, dp8393x_prom); /* Add MAC address with valid checksum to PROM */ prom = memory_region_get_ram_ptr(dp8393x_prom); checksum = 0; for (i = 0; i < 6; i++) { prom[i] = bitrev8(nd_table[0].macaddr.a[i]); checksum ^= prom[i]; } prom[7] = 0xff - checksum; /* SCC */ dev = qdev_new(TYPE_ESCC); qdev_prop_set_uint32(dev, "disabled", 0); qdev_prop_set_uint32(dev, "frequency", MAC_CLOCK); qdev_prop_set_uint32(dev, "it_shift", 1); qdev_prop_set_bit(dev, "bit_swap", true); qdev_prop_set_chr(dev, "chrA", serial_hd(0)); qdev_prop_set_chr(dev, "chrB", serial_hd(1)); qdev_prop_set_uint32(dev, "chnBtype", 0); qdev_prop_set_uint32(dev, "chnAtype", 0); sysbus = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sysbus, &error_fatal); /* Logically OR both its IRQs together */ escc_orgate = DEVICE(object_new(TYPE_OR_IRQ)); object_property_set_int(OBJECT(escc_orgate), "num-lines", 2, &error_fatal); qdev_realize_and_unref(escc_orgate, NULL, &error_fatal); sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(escc_orgate, 0)); sysbus_connect_irq(sysbus, 1, qdev_get_gpio_in(escc_orgate, 1)); qdev_connect_gpio_out(DEVICE(escc_orgate), 0, qdev_get_gpio_in(glue, 3)); sysbus_mmio_map(sysbus, 0, SCC_BASE); /* SCSI */ dev = qdev_new(TYPE_SYSBUS_ESP); sysbus_esp = SYSBUS_ESP(dev); esp = &sysbus_esp->esp; esp->dma_memory_read = NULL; esp->dma_memory_write = NULL; esp->dma_opaque = NULL; sysbus_esp->it_shift = 4; esp->dma_enabled = 1; sysbus = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in_named(via_dev, "via2-irq", VIA2_IRQ_SCSI_BIT)); sysbus_connect_irq(sysbus, 1, qdev_get_gpio_in_named(via_dev, "via2-irq", VIA2_IRQ_SCSI_DATA_BIT)); sysbus_mmio_map(sysbus, 0, ESP_BASE); sysbus_mmio_map(sysbus, 1, ESP_PDMA); scsi_bus_legacy_handle_cmdline(&esp->bus); /* SWIM floppy controller */ dev = qdev_new(TYPE_SWIM); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, SWIM_BASE); /* NuBus */ dev = qdev_new(TYPE_MAC_NUBUS_BRIDGE); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, NUBUS_SUPER_SLOT_BASE); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, NUBUS_SLOT_BASE); nubus = MAC_NUBUS_BRIDGE(dev)->bus; /* framebuffer in nubus slot #9 */ dev = qdev_new(TYPE_NUBUS_MACFB); qdev_prop_set_uint32(dev, "width", graphic_width); qdev_prop_set_uint32(dev, "height", graphic_height); qdev_prop_set_uint8(dev, "depth", graphic_depth); qdev_realize_and_unref(dev, BUS(nubus), &error_fatal); cs = CPU(cpu); if (linux_boot) { uint64_t high; kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, NULL, &high, NULL, 1, EM_68K, 0, 0); if (kernel_size < 0) { error_report("could not load kernel '%s'", kernel_filename); exit(1); } stl_phys(cs->as, 4, elf_entry); /* reset initial PC */ parameters_base = (high + 1) & ~1; BOOTINFO1(cs->as, parameters_base, BI_MACHTYPE, MACH_MAC); BOOTINFO1(cs->as, parameters_base, BI_FPUTYPE, FPU_68040); BOOTINFO1(cs->as, parameters_base, BI_MMUTYPE, MMU_68040); BOOTINFO1(cs->as, parameters_base, BI_CPUTYPE, CPU_68040); BOOTINFO1(cs->as, parameters_base, BI_MAC_CPUID, CPUB_68040); BOOTINFO1(cs->as, parameters_base, BI_MAC_MODEL, MAC_MODEL_Q800); BOOTINFO1(cs->as, parameters_base, BI_MAC_MEMSIZE, ram_size >> 20); /* in MB */ BOOTINFO2(cs->as, parameters_base, BI_MEMCHUNK, 0, ram_size); BOOTINFO1(cs->as, parameters_base, BI_MAC_VADDR, VIDEO_BASE); BOOTINFO1(cs->as, parameters_base, BI_MAC_VDEPTH, graphic_depth); BOOTINFO1(cs->as, parameters_base, BI_MAC_VDIM, (graphic_height << 16) | graphic_width); BOOTINFO1(cs->as, parameters_base, BI_MAC_VROW, (graphic_width * graphic_depth + 7) / 8); BOOTINFO1(cs->as, parameters_base, BI_MAC_SCCBASE, SCC_BASE); rom = g_malloc(sizeof(*rom)); memory_region_init_ram_ptr(rom, NULL, "m68k_fake_mac.rom", sizeof(fake_mac_rom), fake_mac_rom); memory_region_set_readonly(rom, true); memory_region_add_subregion(get_system_memory(), MACROM_ADDR, rom); if (kernel_cmdline) { BOOTINFOSTR(cs->as, parameters_base, BI_COMMAND_LINE, kernel_cmdline); } /* load initrd */ if (initrd_filename) { initrd_size = get_image_size(initrd_filename); if (initrd_size < 0) { error_report("could not load initial ram disk '%s'", initrd_filename); exit(1); } initrd_base = (ram_size - initrd_size) & TARGET_PAGE_MASK; load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); BOOTINFO2(cs->as, parameters_base, BI_RAMDISK, initrd_base, initrd_size); } else { initrd_base = 0; initrd_size = 0; } BOOTINFO0(cs->as, parameters_base, BI_LAST); } else { uint8_t *ptr; /* allocate and load BIOS */ rom = g_malloc(sizeof(*rom)); memory_region_init_rom(rom, NULL, "m68k_mac.rom", MACROM_SIZE, &error_abort); filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); memory_region_add_subregion(get_system_memory(), MACROM_ADDR, rom); /* Load MacROM binary */ if (filename) { bios_size = load_image_targphys(filename, MACROM_ADDR, MACROM_SIZE); g_free(filename); } else { bios_size = -1; } /* Remove qtest_enabled() check once firmware files are in the tree */ if (!qtest_enabled()) { if (bios_size < 0 || bios_size > MACROM_SIZE) { error_report("could not load MacROM '%s'", bios_name); exit(1); } ptr = rom_ptr(MACROM_ADDR, MACROM_SIZE); stl_phys(cs->as, 0, ldl_p(ptr)); /* reset initial SP */ stl_phys(cs->as, 4, MACROM_ADDR + ldl_p(ptr + 4)); /* reset initial PC */ } } }
8660df5ea25ea4e6ee94fca43559165fe7610199
https://github.com/qemu/qemu
1not_vulnerable
g364fb: add VMStateDescription for G364SysBusState Currently when QEMU attempts to migrate the MIPS magnum machine it crashes due to a mistake in the g364fb VMStateDescription configuration which expects a G364SysBusState and not a G364State. Resolve the issue by adding a new VMStateDescription for G364SysBusState and embedding the existing vmstate_g364fb VMStateDescription inside it using VMSTATE_STRUCT. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Fixes: 97a3f6ffbba ("g364fb: convert to qdev") Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-Id: <20210625163554.14879-3-mark.cave-ayland@ilande.co.uk> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
static void g364fb_sysbus_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = g364fb_sysbus_realize; set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); dc->desc = "G364 framebuffer"; dc->reset = g364fb_sysbus_reset; dc->vmsd = &vmstate_g364fb_sysbus; device_class_set_props(dc, g364fb_sysbus_properties); }
92ecfab50ee2b30e60c774f96f05fc38714874f1
https://github.com/qemu/qemu
1not_vulnerable
target/mips: Fix gen_mxu_s32ldd_s32lddr There were two bugs here: (1) the required endianness was not present in the MemOp, and (2) we were not providing a zero-extended input to the bswap as semantics required. The best fix is to fold the bswap into the memory operation, producing the desired result directly. Acked-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx) { TCGv t0, t1; uint32_t XRa, Rb, s12, sel; t0 = tcg_temp_new(); t1 = tcg_temp_new(); XRa = extract32(ctx->opcode, 6, 4); s12 = extract32(ctx->opcode, 10, 10); sel = extract32(ctx->opcode, 20, 1); Rb = extract32(ctx->opcode, 21, 5); gen_load_gpr(t0, Rb); tcg_gen_movi_tl(t1, s12); tcg_gen_shli_tl(t1, t1, 2); if (s12 & 0x200) { tcg_gen_ori_tl(t1, t1, 0xFFFFF000); } tcg_gen_add_tl(t1, t0, t1); tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_TESL ^ (sel * MO_BSWAP)); gen_store_mxu_gpr(t1, XRa); tcg_temp_free(t0); tcg_temp_free(t1); }
06188c8981ca5d5386e22c82d5bd40e5f27c8492
https://github.com/qemu/qemu
1not_vulnerable
target/cris: Fix use_goto_tb Do not skip the page check for user-only -- mmap/mprotect can still change page mappings. Only check dc->base.pc_first, not dc->ppc -- the start page is the only one that's relevant. Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false) { TCGLabel *l1 = gen_new_label(); /* Conditional jmp. */ tcg_gen_mov_tl(env_pc, pc_false); tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1); tcg_gen_mov_tl(env_pc, pc_true); gen_set_label(l1); }
2d369d6e6e890a0204183e853604f8077329c4bc
https://github.com/qemu/qemu
1not_vulnerable
Prevent compiler warning on block.c Commit 3108a15cf (block: introduce bdrv_drop_filter()) introduced uninitialized variable to_cow_parent in bdrv_replace_node_common function that is used only when detach_subchain is true. It is used in two places. First if block properly initialize the variable and second block use it. However, compiler may treat these two blocks as two independent cases so it thinks first block can fail test and second one pass (although both use same condition). This cause warning that variable can be uninitialized in second block. The warning was observed with GCC 8.4.1 and 11.0.1. To prevent this warning, initialize the variable with NULL. Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com> Message-Id: <1162368493.17178530.1620201543649.JavaMail.zimbra@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static int bdrv_replace_node_common(BlockDriverState *from, BlockDriverState *to, bool auto_skip, bool detach_subchain, Error **errp) { Transaction *tran = tran_new(); g_autoptr(GHashTable) found = NULL; g_autoptr(GSList) refresh_list = NULL; BlockDriverState *to_cow_parent = NULL; int ret; if (detach_subchain) { assert(bdrv_chain_contains(from, to)); assert(from != to); for (to_cow_parent = from; bdrv_filter_or_cow_bs(to_cow_parent) != to; to_cow_parent = bdrv_filter_or_cow_bs(to_cow_parent)) { ; } } /* Make sure that @from doesn't go away until we have successfully attached * all of its parents to @to. */ bdrv_ref(from); assert(qemu_get_current_aio_context() == qemu_get_aio_context()); assert(bdrv_get_aio_context(from) == bdrv_get_aio_context(to)); bdrv_drained_begin(from); /* * Do the replacement without permission update. * Replacement may influence the permissions, we should calculate new * permissions based on new graph. If we fail, we'll roll-back the * replacement. */ ret = bdrv_replace_node_noperm(from, to, auto_skip, tran, errp); if (ret < 0) { goto out; } if (detach_subchain) { bdrv_remove_filter_or_cow_child(to_cow_parent, tran); } found = g_hash_table_new(NULL, NULL); refresh_list = bdrv_topological_dfs(refresh_list, found, to); refresh_list = bdrv_topological_dfs(refresh_list, found, from); ret = bdrv_list_refresh_perms(refresh_list, NULL, tran, errp); if (ret < 0) { goto out; } ret = 0; out: tran_finalize(tran, ret); bdrv_drained_end(from); bdrv_unref(from); return ret; }
2b02aabc9d02f9e95946cf639f546bb61f1721b7
https://github.com/qemu/qemu
1not_vulnerable
hw/nvme: fix missing check for PMR capability Qiang Liu reported that an access on an unknown address is triggered in memory_region_set_enabled because a check on CAP.PMRS is missing for the PMRCTL register write when no PMR is configured. Cc: qemu-stable@nongnu.org Fixes: 75c3c9de961d ("hw/block/nvme: disable PMR at boot up") Resolves: https://gitlab.com/qemu-project/qemu/-/issues/362 Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Keith Busch <kbusch@kernel.org>
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, unsigned size) { if (unlikely(offset & (sizeof(uint32_t) - 1))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32, "MMIO write not 32-bit aligned," " offset=0x%"PRIx64"", offset); /* should be ignored, fall through for now */ } if (unlikely(size < sizeof(uint32_t))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall, "MMIO write smaller than 32-bits," " offset=0x%"PRIx64", size=%u", offset, size); /* should be ignored, fall through for now */ } switch (offset) { case 0xc: /* INTMS */ if (unlikely(msix_enabled(&(n->parent_obj)))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, "undefined access to interrupt mask set" " when MSI-X is enabled"); /* should be ignored, fall through for now */ } n->bar.intms |= data & 0xffffffff; n->bar.intmc = n->bar.intms; trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc); nvme_irq_check(n); break; case 0x10: /* INTMC */ if (unlikely(msix_enabled(&(n->parent_obj)))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, "undefined access to interrupt mask clr" " when MSI-X is enabled"); /* should be ignored, fall through for now */ } n->bar.intms &= ~(data & 0xffffffff); n->bar.intmc = n->bar.intms; trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc); nvme_irq_check(n); break; case 0x14: /* CC */ trace_pci_nvme_mmio_cfg(data & 0xffffffff); /* Windows first sends data, then sends enable bit */ if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) && !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc)) { n->bar.cc = data; } if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) { n->bar.cc = data; if (unlikely(nvme_start_ctrl(n))) { trace_pci_nvme_err_startfail(); n->bar.csts = NVME_CSTS_FAILED; } else { trace_pci_nvme_mmio_start_success(); n->bar.csts = NVME_CSTS_READY; } } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) { trace_pci_nvme_mmio_stopped(); nvme_ctrl_reset(n); n->bar.csts &= ~NVME_CSTS_READY; } if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) { trace_pci_nvme_mmio_shutdown_set(); nvme_ctrl_shutdown(n); n->bar.cc = data; n->bar.csts |= NVME_CSTS_SHST_COMPLETE; } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) { trace_pci_nvme_mmio_shutdown_cleared(); n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE; n->bar.cc = data; } break; case 0x1c: /* CSTS */ if (data & (1 << 4)) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported, "attempted to W1C CSTS.NSSRO" " but CAP.NSSRS is zero (not supported)"); } else if (data != 0) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts, "attempted to set a read only bit" " of controller status"); } break; case 0x20: /* NSSR */ if (data == 0x4e564d65) { trace_pci_nvme_ub_mmiowr_ssreset_unsupported(); } else { /* The spec says that writes of other values have no effect */ return; } break; case 0x24: /* AQA */ n->bar.aqa = data & 0xffffffff; trace_pci_nvme_mmio_aqattr(data & 0xffffffff); break; case 0x28: /* ASQ */ n->bar.asq = size == 8 ? data : (n->bar.asq & ~0xffffffffULL) | (data & 0xffffffff); trace_pci_nvme_mmio_asqaddr(data); break; case 0x2c: /* ASQ hi */ n->bar.asq = (n->bar.asq & 0xffffffff) | (data << 32); trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq); break; case 0x30: /* ACQ */ trace_pci_nvme_mmio_acqaddr(data); n->bar.acq = size == 8 ? data : (n->bar.acq & ~0xffffffffULL) | (data & 0xffffffff); break; case 0x34: /* ACQ hi */ n->bar.acq = (n->bar.acq & 0xffffffff) | (data << 32); trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq); break; case 0x38: /* CMBLOC */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved, "invalid write to reserved CMBLOC" " when CMBSZ is zero, ignored"); return; case 0x3C: /* CMBSZ */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly, "invalid write to read only CMBSZ, ignored"); return; case 0x50: /* CMBMSC */ if (!NVME_CAP_CMBS(n->bar.cap)) { return; } n->bar.cmbmsc = size == 8 ? data : (n->bar.cmbmsc & ~0xffffffff) | (data & 0xffffffff); n->cmb.cmse = false; if (NVME_CMBMSC_CRE(data)) { nvme_cmb_enable_regs(n); if (NVME_CMBMSC_CMSE(data)) { hwaddr cba = NVME_CMBMSC_CBA(data) << CMBMSC_CBA_SHIFT; if (cba + int128_get64(n->cmb.mem.size) < cba) { NVME_CMBSTS_SET_CBAI(n->bar.cmbsts, 1); return; } n->cmb.cba = cba; n->cmb.cmse = true; } } else { n->bar.cmbsz = 0; n->bar.cmbloc = 0; } return; case 0x54: /* CMBMSC hi */ n->bar.cmbmsc = (n->bar.cmbmsc & 0xffffffff) | (data << 32); return; case 0xe00: /* PMRCAP */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly, "invalid write to PMRCAP register, ignored"); return; case 0xe04: /* PMRCTL */ if (!NVME_CAP_PMRS(n->bar.cap)) { return; } n->bar.pmrctl = data; if (NVME_PMRCTL_EN(data)) { memory_region_set_enabled(&n->pmr.dev->mr, true); n->bar.pmrsts = 0; } else { memory_region_set_enabled(&n->pmr.dev->mr, false); NVME_PMRSTS_SET_NRDY(n->bar.pmrsts, 1); n->pmr.cmse = false; } return; case 0xe08: /* PMRSTS */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly, "invalid write to PMRSTS register, ignored"); return; case 0xe0C: /* PMREBS */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly, "invalid write to PMREBS register, ignored"); return; case 0xe10: /* PMRSWTP */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly, "invalid write to PMRSWTP register, ignored"); return; case 0xe14: /* PMRMSCL */ if (!NVME_CAP_PMRS(n->bar.cap)) { return; } n->bar.pmrmsc = (n->bar.pmrmsc & ~0xffffffff) | (data & 0xffffffff); n->pmr.cmse = false; if (NVME_PMRMSC_CMSE(n->bar.pmrmsc)) { hwaddr cba = NVME_PMRMSC_CBA(n->bar.pmrmsc) << PMRMSC_CBA_SHIFT; if (cba + int128_get64(n->pmr.dev->mr.size) < cba) { NVME_PMRSTS_SET_CBAI(n->bar.pmrsts, 1); return; } n->pmr.cmse = true; n->pmr.cba = cba; } return; case 0xe18: /* PMRMSCU */ if (!NVME_CAP_PMRS(n->bar.cap)) { return; } n->bar.pmrmsc = (n->bar.pmrmsc & 0xffffffff) | (data << 32); return; default: NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid, "invalid MMIO write," " offset=0x%"PRIx64", data=%"PRIx64"", offset, data); break; } }
e76fb260ca8fc2420a4ce792324af0544628b331
https://github.com/qemu/qemu
1not_vulnerable
Partially revert "hw/block/nvme: drain namespaces on sq deletion" This partially reverts commit 98f84f5a4eca5c03e32fff20f246d9b4b96d6422. Since all "multi aio" commands are now reimplemented to properly track the nested aiocbs, we can revert the "hack" that was introduced to make sure all requests we're properly drained upon sq deletion. The revert is partial since we keep the assert that no outstanding requests remain on the submission queue after the explicit cancellation. Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Keith Busch <kbusch@kernel.org>
static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req) { NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; NvmeRequest *r, *next; NvmeSQueue *sq; NvmeCQueue *cq; uint16_t qid = le16_to_cpu(c->qid); if (unlikely(!qid || nvme_check_sqid(n, qid))) { trace_pci_nvme_err_invalid_del_sq(qid); return NVME_INVALID_QID | NVME_DNR; } trace_pci_nvme_del_sq(qid); sq = n->sq[qid]; while (!QTAILQ_EMPTY(&sq->out_req_list)) { r = QTAILQ_FIRST(&sq->out_req_list); assert(r->aiocb); blk_aio_cancel(r->aiocb); } assert(QTAILQ_EMPTY(&sq->out_req_list)); if (!nvme_check_cqid(n, sq->cqid)) { cq = n->cq[sq->cqid]; QTAILQ_REMOVE(&cq->sq_list, sq, entry); nvme_post_cqes(cq); QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) { if (r->sq == sq) { QTAILQ_REMOVE(&cq->req_list, r, entry); QTAILQ_INSERT_TAIL(&sq->req_list, r, entry); } } } nvme_free_sq(sq, n); return NVME_SUCCESS; }
421a30927140945c6aa957c2c0e7ad695984483d
https://github.com/qemu/qemu
1not_vulnerable
hw/nvme: fix lbaf formats initialization Currently LBAF formats are being intialized based on metadata size if and only if nvme-ns "ms" parameter is non-zero value. Since FormatNVM command being supported device parameter "ms" may not be the criteria to initialize the supported LBAFs. And make LBAF array as read-only. Signed-off-by: Gollu Appalanaidu <anaidu.gollu@samsung.com> Reviewed-by: Klaus Jensen <k.jensen@samsung.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static int nvme_ns_init(NvmeNamespace *ns, Error **errp) { NvmeIdNs *id_ns = &ns->id_ns; uint8_t ds; uint16_t ms; int i; ns->csi = NVME_CSI_NVM; ns->status = 0x0; ns->id_ns.dlfeat = 0x1; /* support DULBE and I/O optimization fields */ id_ns->nsfeat |= (0x4 | 0x10); if (ns->params.shared) { id_ns->nmic |= NVME_NMIC_NS_SHARED; } /* simple copy */ id_ns->mssrl = cpu_to_le16(ns->params.mssrl); id_ns->mcl = cpu_to_le32(ns->params.mcl); id_ns->msrc = ns->params.msrc; ds = 31 - clz32(ns->blkconf.logical_block_size); ms = ns->params.ms; id_ns->mc = NVME_ID_NS_MC_EXTENDED | NVME_ID_NS_MC_SEPARATE; if (ms && ns->params.mset) { id_ns->flbas |= NVME_ID_NS_FLBAS_EXTENDED; } id_ns->dpc = 0x1f; id_ns->dps = ns->params.pi; if (ns->params.pi && ns->params.pil) { id_ns->dps |= NVME_ID_NS_DPS_FIRST_EIGHT; } static const NvmeLBAF lbaf[16] = { [0] = { .ds = 9 }, [1] = { .ds = 9, .ms = 8 }, [2] = { .ds = 9, .ms = 16 }, [3] = { .ds = 9, .ms = 64 }, [4] = { .ds = 12 }, [5] = { .ds = 12, .ms = 8 }, [6] = { .ds = 12, .ms = 16 }, [7] = { .ds = 12, .ms = 64 }, }; memcpy(&id_ns->lbaf, &lbaf, sizeof(lbaf)); id_ns->nlbaf = 7; for (i = 0; i <= id_ns->nlbaf; i++) { NvmeLBAF *lbaf = &id_ns->lbaf[i]; if (lbaf->ds == ds) { if (lbaf->ms == ms) { id_ns->flbas |= i; goto lbaf_found; } } } /* add non-standard lba format */ id_ns->nlbaf++; id_ns->lbaf[id_ns->nlbaf].ds = ds; id_ns->lbaf[id_ns->nlbaf].ms = ms; id_ns->flbas |= id_ns->nlbaf; lbaf_found: nvme_ns_init_format(ns); return 0; }
3593b8e0a2146a885f93d71c754757bb2c03864e
https://github.com/qemu/qemu
1not_vulnerable
Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2021-06-24' into staging Block patch: - Fix Coverity complaint in block/snapshot.c # gpg: Signature made Thu 24 Jun 2021 12:42:28 BST # gpg: using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40 # gpg: issuer "mreitz@redhat.com" # gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full] # Primary key fingerprint: 91BE B60A 30DB 3E88 57D1 1829 F407 DB00 61D5 CF40 * remotes/maxreitz/tags/pull-block-2021-06-24: block/snapshot: Clarify goto fallback behavior Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
int bdrv_snapshot_goto(BlockDriverState *bs, const char *snapshot_id, Error **errp) { BlockDriver *drv = bs->drv; BdrvChild **fallback_ptr; int ret, open_ret; if (!drv) { error_setg(errp, "Block driver is closed"); return -ENOMEDIUM; } if (!QLIST_EMPTY(&bs->dirty_bitmaps)) { error_setg(errp, "Device has active dirty bitmaps"); return -EBUSY; } if (drv->bdrv_snapshot_goto) { ret = drv->bdrv_snapshot_goto(bs, snapshot_id); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to load snapshot"); } return ret; } fallback_ptr = bdrv_snapshot_fallback_ptr(bs); if (fallback_ptr) { QDict *options; QDict *file_options; Error *local_err = NULL; BlockDriverState *fallback_bs = (*fallback_ptr)->bs; char *subqdict_prefix = g_strdup_printf("%s.", (*fallback_ptr)->name); options = qdict_clone_shallow(bs->options); /* Prevent it from getting deleted when detached from bs */ bdrv_ref(fallback_bs); qdict_extract_subqdict(options, &file_options, subqdict_prefix); qobject_unref(file_options); g_free(subqdict_prefix); /* Force .bdrv_open() below to re-attach fallback_bs on *fallback_ptr */ qdict_put_str(options, (*fallback_ptr)->name, bdrv_get_node_name(fallback_bs)); /* Now close bs, apply the snapshot on fallback_bs, and re-open bs */ if (drv->bdrv_close) { drv->bdrv_close(bs); } /* .bdrv_open() will re-attach it */ bdrv_unref_child(bs, *fallback_ptr); *fallback_ptr = NULL; ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp); open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err); qobject_unref(options); if (open_ret < 0) { bdrv_unref(fallback_bs); bs->drv = NULL; /* A bdrv_snapshot_goto() error takes precedence */ error_propagate(errp, local_err); return ret < 0 ? ret : open_ret; } /* * fallback_ptr is &bs->file or &bs->backing. *fallback_ptr * was closed above and set to NULL, but the .bdrv_open() call * has opened it again, because we set the respective option * (with the qdict_put_str() call above). * Assert that .bdrv_open() has attached some child on * *fallback_ptr, and that it has attached the one we wanted * it to (i.e., fallback_bs). */ assert(*fallback_ptr && fallback_bs == (*fallback_ptr)->bs); bdrv_unref(fallback_bs); return ret; } error_setg(errp, "Block driver does not support snapshots"); return -ENOTSUP; }
0aebebb561c9c23b9c6d3d58040f83547f059b5c
https://github.com/qemu/qemu
1not_vulnerable
machine: reject -smp dies!=1 for non-PC machines Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20210617155308.928754-11-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) { unsigned cpus = config->has_cpus ? config->cpus : 0; unsigned sockets = config->has_sockets ? config->sockets : 0; unsigned cores = config->has_cores ? config->cores : 0; unsigned threads = config->has_threads ? config->threads : 0; if (config->has_dies && config->dies != 0 && config->dies != 1) { error_setg(errp, "dies not supported by this machine's CPU topology"); } /* compute missing values, prefer sockets over cores over threads */ if (cpus == 0 || sockets == 0) { cores = cores > 0 ? cores : 1; threads = threads > 0 ? threads : 1; if (cpus == 0) { sockets = sockets > 0 ? sockets : 1; cpus = cores * threads * sockets; } else { ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; sockets = ms->smp.max_cpus / (cores * threads); } } else if (cores == 0) { threads = threads > 0 ? threads : 1; cores = cpus / (sockets * threads); cores = cores > 0 ? cores : 1; } else if (threads == 0) { threads = cpus / (cores * sockets); threads = threads > 0 ? threads : 1; } else if (sockets * cores * threads < cpus) { error_setg(errp, "cpu topology: " "sockets (%u) * cores (%u) * threads (%u) < " "smp_cpus (%u)", sockets, cores, threads, cpus); return; } ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; if (ms->smp.max_cpus < cpus) { error_setg(errp, "maxcpus must be equal to or greater than smp"); return; } if (sockets * cores * threads != ms->smp.max_cpus) { error_setg(errp, "Invalid CPU topology: " "sockets (%u) * cores (%u) * threads (%u) " "!= maxcpus (%u)", sockets, cores, threads, ms->smp.max_cpus); return; } ms->smp.cpus = cpus; ms->smp.cores = cores; ms->smp.threads = threads; ms->smp.sockets = sockets; }
18473467d55a20d643b6c9b3a52de42f705b4d35
https://github.com/qemu/qemu
1not_vulnerable
file-posix: try BLKSECTGET on block devices too, do not round to power of 2 bs->sg is only true for character devices, but block devices can also be used with scsi-block and scsi-generic. Unfortunately BLKSECTGET returns bytes in an int for /dev/sgN devices, and sectors in a short for block devices, so account for that in the code. The maximum transfer also need not be a power of 2 (for example I have seen disks with 1280 KiB maximum transfer) so there's no need to pass the result through pow2floor. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void raw_reopen_abort(BDRVReopenState *state) { BDRVRawReopenState *rs = state->opaque; BDRVRawState *s = state->bs->opaque; /* nothing to do if NULL, we didn't get far enough */ if (rs == NULL) { return; } g_free(state->opaque); state->opaque = NULL; assert(s->reopen_state == state); s->reopen_state = NULL; }
8ad5ab6148dca8aad297c134c09c84b0b92d45ed
https://github.com/qemu/qemu
1not_vulnerable
file-posix: fix max_iov for /dev/sg devices Even though it was only called for devices that have bs->sg set (which must be character devices), sg_get_max_segments looked at /sys/dev/block which only works for block devices. On Linux the sg driver has its own way to provide the maximum number of iovecs in a scatter/gather list, so add support for it. The block device path is kept because it will be reinstated in the next patches. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com>
static int sg_get_max_segments(int fd) { #ifdef CONFIG_LINUX char buf[32]; const char *end; char *sysfspath = NULL; int ret; int sysfd = -1; long max_segments; struct stat st; if (fstat(fd, &st)) { ret = -errno; goto out; } if (S_ISCHR(st.st_mode)) { if (ioctl(fd, SG_GET_SG_TABLESIZE, &ret) == 0) { return ret; } return -ENOTSUP; } if (!S_ISBLK(st.st_mode)) { return -ENOTSUP; } sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/max_segments", major(st.st_rdev), minor(st.st_rdev)); sysfd = open(sysfspath, O_RDONLY); if (sysfd == -1) { ret = -errno; goto out; } do { ret = read(sysfd, buf, sizeof(buf) - 1); } while (ret == -1 && errno == EINTR); if (ret < 0) { ret = -errno; goto out; } else if (ret == 0) { ret = -EIO; goto out; } buf[ret] = 0; /* The file is ended with '\n', pass 'end' to accept that. */ ret = qemu_strtol(buf, &end, 10, &max_segments); if (ret == 0 && end && *end == '\n') { ret = max_segments; } out: if (sysfd != -1) { close(sysfd); } g_free(sysfspath); return ret; #else return -ENOTSUP; #endif }
05d9d0359e6da7dc8255712d745d079a04fa5ae5
https://github.com/qemu/qemu
1not_vulnerable
target/mips: Do not abort on invalid instruction On real hardware an invalid instruction doesn't halt the world, but usually triggers a RESERVED INSTRUCTION exception. TCG guest code shouldn't abort QEMU anyway. Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210617174323.2900831-2-f4bug@amsat.org>
static void gen_branch(DisasContext *ctx, int insn_bytes) { if (ctx->hflags & MIPS_HFLAG_BMASK) { int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK; /* Branches completion */ clear_branch_hflags(ctx); ctx->base.is_jmp = DISAS_NORETURN; /* FIXME: Need to clear can_do_io. */ switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) { case MIPS_HFLAG_FBNSLOT: gen_goto_tb(ctx, 0, ctx->base.pc_next + insn_bytes); break; case MIPS_HFLAG_B: /* unconditional branch */ if (proc_hflags & MIPS_HFLAG_BX) { tcg_gen_xori_i32(hflags, hflags, MIPS_HFLAG_M16); } gen_goto_tb(ctx, 0, ctx->btarget); break; case MIPS_HFLAG_BL: /* blikely taken case */ gen_goto_tb(ctx, 0, ctx->btarget); break; case MIPS_HFLAG_BC: /* Conditional branch */ { TCGLabel *l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1); gen_goto_tb(ctx, 1, ctx->base.pc_next + insn_bytes); gen_set_label(l1); gen_goto_tb(ctx, 0, ctx->btarget); } break; case MIPS_HFLAG_BR: /* unconditional branch to register */ if (ctx->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { TCGv t0 = tcg_temp_new(); TCGv_i32 t1 = tcg_temp_new_i32(); tcg_gen_andi_tl(t0, btarget, 0x1); tcg_gen_trunc_tl_i32(t1, t0); tcg_temp_free(t0); tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16); tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT); tcg_gen_or_i32(hflags, hflags, t1); tcg_temp_free_i32(t1); tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1); } else { tcg_gen_mov_tl(cpu_PC, btarget); } if (ctx->base.singlestep_enabled) { save_cpu_state(ctx, 0); gen_helper_raise_exception_debug(cpu_env); } tcg_gen_lookup_and_goto_ptr(); break; default: LOG_DISAS("unknown branch 0x%x\n", proc_hflags); gen_reserved_instruction(ctx); } } }
2838b1d6356044eb240edd4e1b9b5ab5946c5b28
https://github.com/qemu/qemu
1not_vulnerable
target/mips: Fix potential integer overflow (CID 1452921) Use the BIT_ULL() macro to ensure we use 64-bit arithmetic. This fixes the following Coverity issue (OVERFLOW_BEFORE_WIDEN): CID 1452921: Integer handling issues: Potentially overflowing expression "1 << w" with type "int" (32 bits, signed) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "uint64_t" (64 bits, unsigned). Fixes: 074cfcb4dae ("target/mips: Implement hardware page table walker") Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210505215119.1517465-1-f4bug@amsat.org>
static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, int directory_index, bool *huge_page, bool *hgpg_directory_hit, uint64_t *pw_entrylo0, uint64_t *pw_entrylo1) { int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1; int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F; int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F; int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; int directory_shift = (ptew > 1) ? -1 : (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; int leaf_shift = (ptew > 1) ? -1 : (ptew == 1) ? native_shift + 1 : native_shift; uint32_t direntry_size = 1 << (directory_shift + 3); uint32_t leafentry_size = 1 << (leaf_shift + 3); uint64_t entry; uint64_t paddr; int prot; uint64_t lsb = 0; uint64_t w = 0; if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD, cpu_mmu_index(env, false)) != TLBRET_MATCH) { /* wrong base address */ return 0; } if (!get_pte(env, *vaddr, direntry_size, &entry)) { return 0; } if ((entry & (1 << psn)) && hugepg) { *huge_page = true; *hgpg_directory_hit = true; entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); w = directory_index - 1; if (directory_index & 0x1) { /* Generate adjacent page from same PTE for odd TLB page */ lsb = BIT_ULL(w) >> 6; *pw_entrylo0 = entry & ~lsb; /* even page */ *pw_entrylo1 = entry | lsb; /* odd page */ } else if (dph) { int oddpagebit = 1 << leaf_shift; uint64_t vaddr2 = *vaddr ^ oddpagebit; if (*vaddr & oddpagebit) { *pw_entrylo1 = entry; } else { *pw_entrylo0 = entry; } if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD, cpu_mmu_index(env, false)) != TLBRET_MATCH) { return 0; } if (!get_pte(env, vaddr2, leafentry_size, &entry)) { return 0; } entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); if (*vaddr & oddpagebit) { *pw_entrylo0 = entry; } else { *pw_entrylo1 = entry; } } else { return 0; } return 1; } else { *vaddr = entry; return 2; } }
79a412891f0cb6bbffd8fd9e13608066234e56c1
https://github.com/qemu/qemu
1not_vulnerable
target/riscv: gdbstub: Fix dynamic CSR XML generation Since commit 605def6eeee5 ("target/riscv: Use the RISCVException enum for CSR operations") the CSR predicate() function was changed to return RISCV_EXCP_NONE instead of 0 for a valid CSR, but it forgot to update the dynamic CSR XML generation codes in gdbstub. Fixes: 605def6eeee5 ("target/riscv: Use the RISCVException enum for CSR operations") Reported-by: Xuzhou Cheng <xuzhou.cheng@windriver.com> Signed-off-by: Bin Meng <bin.meng@windriver.com> Tested-by: Xuzhou Cheng <xuzhou.cheng@windriver.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Message-id: 20210615085133.389887-1-bmeng.cn@gmail.com Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; GString *s = g_string_new(NULL); riscv_csr_predicate_fn predicate; int bitsize = riscv_cpu_is_32bit(env) ? 32 : 64; int i; g_string_printf(s, "<?xml version=\"1.0\"?>"); g_string_append_printf(s, "<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"); g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.csr\">"); for (i = 0; i < CSR_TABLE_SIZE; i++) { predicate = csr_ops[i].predicate; if (predicate && (predicate(env, i) == RISCV_EXCP_NONE)) { if (csr_ops[i].name) { g_string_append_printf(s, "<reg name=\"%s\"", csr_ops[i].name); } else { g_string_append_printf(s, "<reg name=\"csr%03x\"", i); } g_string_append_printf(s, " bitsize=\"%d\"", bitsize); g_string_append_printf(s, " regnum=\"%d\"/>", base_reg + i); } } g_string_append_printf(s, "</feature>"); cpu->dyn_csr_xml = g_string_free(s, false); return CSR_TABLE_SIZE; }
3c11c2ebb062ffb5d7dcad44ab0fb60505ad5cac
https://github.com/qemu/qemu
1not_vulnerable
target/s390x: Do not modify cpu state in s390_cpu_get_psw_mask We want to use this function for debugging, and debug should not modify cpu state (even non-architectural cpu state) lest we introduce heisenbugs. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: David Hildenbrand <david@redhat.com> Tested-by: jonathan.albrecht <jonathan.albrecht@linux.vnet.ibm.com> Tested-by: <ruixin.bao@ibm.com> Message-Id: <20210615030744.1252385-3-richard.henderson@linaro.org> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
uint64_t s390_cpu_get_psw_mask(CPUS390XState *env) { uint64_t r = env->psw.mask; if (tcg_enabled()) { uint64_t cc = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr); assert(cc <= 3); r &= ~PSW_MASK_CC; r |= cc << 44; } return r; }
96ff758c6e9cd5a01443ee15afbd0df4f00c37a8
https://github.com/qemu/qemu
1not_vulnerable
linux-user: Use public sigev_notify_thread_id member if available _sigev_un._tid is an internal glibc field and is not available on musl libc. The sigevent(7) man page and Linux UAPI headers both use sigev_notify_thread_id as a public way to access this field. musl libc supports this field since 1.2.2[0], and glibc plans to add support as well[1][2]. If sigev_notify_thread_id is not available, fall back to _sigev_un._tid as before. [0] http://git.musl-libc.org/cgit/musl/commit/?id=7c71792e87691451f2a6b76348e83ad1889f1dcb [1] https://www.openwall.com/lists/musl/2019/08/01/5 [2] https://sourceware.org/bugzilla/show_bug.cgi?id=27417 Signed-off-by: Michael Forney <mforney@mforney.org> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Message-Id: <20210526035556.7931-1-mforney@mforney.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, abi_ulong target_addr) { struct target_sigevent *target_sevp; if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { return -TARGET_EFAULT; } /* This union is awkward on 64 bit systems because it has a 32 bit * integer and a pointer in it; we follow the conversion approach * used for handling sigval types in signal.c so the guest should get * the correct value back even if we did a 64 bit byteswap and it's * using the 32 bit integer. */ host_sevp->sigev_value.sival_ptr = (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); host_sevp->sigev_signo = target_to_host_signal(tswap32(target_sevp->sigev_signo)); host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid); unlock_user_struct(target_sevp, target_addr, 1); return 0; }
1c9638667b7068539dc5783c9428d588b14162ea
https://github.com/qemu/qemu
1not_vulnerable
util/oslib-win32: Fix fatal assertion in qemu_try_memalign The function is called with alignment == 0 which caused an assertion. Use the code from oslib-posix.c to fix that regression. Fixes: ed6f53f9ca9 Signed-off-by: Stefan Weil <sw@weilnetz.de> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20210611105846.347954-1-sw@weilnetz.de> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
void *qemu_try_memalign(size_t alignment, size_t size) { void *ptr; g_assert(size != 0); if (alignment < sizeof(void *)) { alignment = sizeof(void *); } else { g_assert(is_power_of_2(alignment)); } ptr = _aligned_malloc(size, alignment); trace_qemu_memalign(alignment, size, ptr); return ptr; }
2a25def4be09714c543713f111813b521b2356ee
https://github.com/qemu/qemu
1not_vulnerable
block/nbd: nbd_client_handshake(): fix leak of s->ioc Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Roman Kagan <rvkagan@yandex-team.ru> Message-Id: <20210610100802.5888-9-vsementsov@virtuozzo.com> Signed-off-by: Eric Blake <eblake@redhat.com>
static int nbd_client_handshake(BlockDriverState *bs, Error **errp);
22fca190e25b10761925bb1eeadeda07aabf3c26
https://github.com/qemu/qemu
1not_vulnerable
vfio: Fix unregister SaveVMHandler in vfio_migration_finalize In the vfio_migration_init(), the SaveVMHandler is registered for VFIO device. But it lacks the operation of 'unregister'. It will lead to 'Segmentation fault (core dumped)' in qemu_savevm_state_setup(), if performing live migration after a VFIO device is hot deleted. Fixes: 7c2f5f75f94 (vfio: Register SaveVMHandlers for VFIO device) Reported-by: Qixin Gan <ganqixin@huawei.com> Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com> Message-Id: <20210527123101.289-1-jiangkunkun@huawei.com> Reviewed by: Kirti Wankhede <kwankhede@nvidia.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
void vfio_migration_finalize(VFIODevice *vbasedev) { if (vbasedev->migration) { VFIOMigration *migration = vbasedev->migration; remove_migration_state_change_notifier(&migration->migration_state); qemu_del_vm_change_state_handler(migration->vm_state); unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev); vfio_migration_exit(vbasedev); } if (vbasedev->migration_blocker) { migrate_del_blocker(vbasedev->migration_blocker); error_free(vbasedev->migration_blocker); vbasedev->migration_blocker = NULL; } }
986bdbc6a29c4d7ef125299c5013783e30dc2cae
https://github.com/qemu/qemu
1not_vulnerable
coreaudio: Fix output stream format settings Before commit 7d6948cd98cf5ad8a3458a4ce7fdbcb79bcd1212, it was coded to retrieve the initial output stream format settings, modify the frame rate, and set again. However, I removed a frame rate modification code by mistake in the commit. It also assumes the initial output stream format is consistent with what QEMU expects, but that expectation is not in the code, which makes it harder to understand and will lead to breakage if the initial settings change. This change explicitly sets all of the output stream settings to solve these problems. Signed-off-by: Akihiko Odaki <akihiko.odaki@gmail.com> Message-Id: <20210616141721.54091-1-akihiko.odaki@gmail.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static OSStatus coreaudio_set_framesize(AudioDeviceID id, UInt32 *framesize) { UInt32 size = sizeof(*framesize); AudioObjectPropertyAddress addr = { kAudioDevicePropertyBufferFrameSize, kAudioDevicePropertyScopeOutput, kAudioObjectPropertyElementMaster }; return AudioObjectSetPropertyData(id, &addr, 0, NULL, size, framesize); }
2833d697b9a418e2b9735e38ad4b33ae86f84739
https://github.com/qemu/qemu
1not_vulnerable
jackaudio: avoid that the client name contains the word (NULL) Currently with jackaudio client name and qemu guest name unset, the JACK client names are out-(NULL) and in-(NULL). These names are user visible in the patch bay. Replace the function call to qemu_get_vm_name() with a call to audio_application_name() which replaces NULL with "qemu" to have more descriptive names. Signed-off-by: Volker Rümelin <vr_qemu@t-online.de> Message-Id: <20210517194604.2545-4-vr_qemu@t-online.de> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static int qjack_client_init(QJackClient *c);
cdfa56c551bb48f286cfe1f2daa1083d333ee45d
https://github.com/qemu/qemu
1not_vulnerable
softmmu/physmem: Fix ram_block_discard_range() to handle shared anonymous memory We can create shared anonymous memory via "-object memory-backend-ram,share=on,..." which is, for example, required by PVRDMA for mremap() to work. Shared anonymous memory is weird, though. Instead of MADV_DONTNEED, we have to use MADV_REMOVE: MADV_DONTNEED will only remove / zap all relevant page table entries of the current process, the backend storage will not get removed, resulting in no reduced memory consumption and a repopulation of previous content on next access. Shared anonymous memory is internally really just shmem, but without a fd exposed. As we cannot use fallocate() without the fd to discard the backing storage, MADV_REMOVE gets the same job done without a fd as documented in "man 2 madvise". Removing backing storage implicitly invalidates all page table entries with relevant mappings - an additional MADV_DONTNEED is not required. Fixes: 06329ccecfa0 ("mem: add share parameter to memory-backend-ram") Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20210406080126.24010-3-david@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) { int ret = -1; uint8_t *host_startaddr = rb->host + start; if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { error_report("ram_block_discard_range: Unaligned start address: %p", host_startaddr); goto err; } if ((start + length) <= rb->max_length) { bool need_madvise, need_fallocate; if (!QEMU_IS_ALIGNED(length, rb->page_size)) { error_report("ram_block_discard_range: Unaligned length: %zx", length); goto err; } errno = ENOTSUP; /* If we are missing MADVISE etc */ /* The logic here is messy; * madvise DONTNEED fails for hugepages * fallocate works on hugepages and shmem * shared anonymous memory requires madvise REMOVE */ need_madvise = (rb->page_size == qemu_host_page_size); need_fallocate = rb->fd != -1; if (need_fallocate) { /* For a file, this causes the area of the file to be zero'd * if read, and for hugetlbfs also causes it to be unmapped * so a userfault will trigger. */ #ifdef CONFIG_FALLOCATE_PUNCH_HOLE ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, start, length); if (ret) { ret = -errno; error_report("ram_block_discard_range: Failed to fallocate " "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; error_report("ram_block_discard_range: fallocate not available/file" "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; #endif } if (need_madvise) { /* For normal RAM this causes it to be unmapped, * for shared memory it causes the local mapping to disappear * and to fall back on the file contents (which we just * fallocate'd away). */ #if defined(CONFIG_MADVISE) if (qemu_ram_is_shared(rb) && rb->fd < 0) { ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE); } else { ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED); } if (ret) { ret = -errno; error_report("ram_block_discard_range: Failed to discard range " "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; error_report("ram_block_discard_range: MADVISE not available" "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; #endif } trace_ram_block_discard_range(rb->idstr, host_startaddr, length, need_madvise, need_fallocate, ret); } else { error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", rb->idstr, start, length, rb->max_length); } err: return ret; }
cd39e773e00bf98ab41e2ffaaeab7a00a3f68bd1
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Diagnose UNALLOCATED in disas_simd_two_reg_misc_fp16 This fprintf+assert has been in place since the beginning. It is prior to the fp_access_check, so we're still good to raise sigill here. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/381 Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-id: 20210604183506.916654-2-richard.henderson@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) { int fpop, opcode, a, u; int rn, rd; bool is_q; bool is_scalar; bool only_in_vector = false; int pass; TCGv_i32 tcg_rmode = NULL; TCGv_ptr tcg_fpstatus = NULL; bool need_rmode = false; bool need_fpst = true; int rmode; if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } rd = extract32(insn, 0, 5); rn = extract32(insn, 5, 5); a = extract32(insn, 23, 1); u = extract32(insn, 29, 1); is_scalar = extract32(insn, 28, 1); is_q = extract32(insn, 30, 1); opcode = extract32(insn, 12, 5); fpop = deposit32(opcode, 5, 1, a); fpop = deposit32(fpop, 6, 1, u); switch (fpop) { case 0x1d: /* SCVTF */ case 0x5d: /* UCVTF */ { int elements; if (is_scalar) { elements = 1; } else { elements = (is_q ? 8 : 4); } if (!fp_access_check(s)) { return; } handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16); return; } break; case 0x2c: /* FCMGT (zero) */ case 0x2d: /* FCMEQ (zero) */ case 0x2e: /* FCMLT (zero) */ case 0x6c: /* FCMGE (zero) */ case 0x6d: /* FCMLE (zero) */ handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd); return; case 0x3d: /* FRECPE */ case 0x3f: /* FRECPX */ break; case 0x18: /* FRINTN */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_TIEEVEN; break; case 0x19: /* FRINTM */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_NEGINF; break; case 0x38: /* FRINTP */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_POSINF; break; case 0x39: /* FRINTZ */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_ZERO; break; case 0x58: /* FRINTA */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_TIEAWAY; break; case 0x59: /* FRINTX */ case 0x79: /* FRINTI */ only_in_vector = true; /* current rounding mode */ break; case 0x1a: /* FCVTNS */ need_rmode = true; rmode = FPROUNDING_TIEEVEN; break; case 0x1b: /* FCVTMS */ need_rmode = true; rmode = FPROUNDING_NEGINF; break; case 0x1c: /* FCVTAS */ need_rmode = true; rmode = FPROUNDING_TIEAWAY; break; case 0x3a: /* FCVTPS */ need_rmode = true; rmode = FPROUNDING_POSINF; break; case 0x3b: /* FCVTZS */ need_rmode = true; rmode = FPROUNDING_ZERO; break; case 0x5a: /* FCVTNU */ need_rmode = true; rmode = FPROUNDING_TIEEVEN; break; case 0x5b: /* FCVTMU */ need_rmode = true; rmode = FPROUNDING_NEGINF; break; case 0x5c: /* FCVTAU */ need_rmode = true; rmode = FPROUNDING_TIEAWAY; break; case 0x7a: /* FCVTPU */ need_rmode = true; rmode = FPROUNDING_POSINF; break; case 0x7b: /* FCVTZU */ need_rmode = true; rmode = FPROUNDING_ZERO; break; case 0x2f: /* FABS */ case 0x6f: /* FNEG */ need_fpst = false; break; case 0x7d: /* FRSQRTE */ case 0x7f: /* FSQRT (vector) */ break; default: unallocated_encoding(s); return; } /* Check additional constraints for the scalar encoding */ if (is_scalar) { if (!is_q) { unallocated_encoding(s); return; } /* FRINTxx is only in the vector form */ if (only_in_vector) { unallocated_encoding(s); return; } } if (!fp_access_check(s)) { return; } if (need_rmode || need_fpst) { tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16); } if (need_rmode) { tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); } if (is_scalar) { TCGv_i32 tcg_op = read_fp_hreg(s, rn); TCGv_i32 tcg_res = tcg_temp_new_i32(); switch (fpop) { case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus); break; case 0x3d: /* FRECPE */ gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus); break; case 0x3f: /* FRECPX */ gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus); break; case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus); break; case 0x6f: /* FNEG */ tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); break; case 0x7d: /* FRSQRTE */ gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus); break; default: g_assert_not_reached(); } /* limit any sign extension going on */ tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff); write_fp_sreg(s, rd, tcg_res); tcg_temp_free_i32(tcg_res); tcg_temp_free_i32(tcg_op); } else { for (pass = 0; pass < (is_q ? 8 : 4); pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(); TCGv_i32 tcg_res = tcg_temp_new_i32(); read_vec_element_i32(s, tcg_op, rn, pass, MO_16); switch (fpop) { case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus); break; case 0x3d: /* FRECPE */ gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus); break; case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus); break; case 0x18: /* FRINTN */ case 0x19: /* FRINTM */ case 0x38: /* FRINTP */ case 0x39: /* FRINTZ */ case 0x58: /* FRINTA */ case 0x79: /* FRINTI */ gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus); break; case 0x59: /* FRINTX */ gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus); break; case 0x2f: /* FABS */ tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff); break; case 0x6f: /* FNEG */ tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); break; case 0x7d: /* FRSQRTE */ gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus); break; case 0x7f: /* FSQRT */ gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus); break; default: g_assert_not_reached(); } write_vec_element_i32(s, tcg_res, rd, pass, MO_16); tcg_temp_free_i32(tcg_res); tcg_temp_free_i32(tcg_op); } clear_vec_high(s, is_q, rd); } if (tcg_rmode) { gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_rmode); } if (tcg_fpstatus) { tcg_temp_free_ptr(tcg_fpstatus); } }
96a664d05c238ea1b64af2394b58e956fe0afe26
https://github.com/qemu/qemu
1not_vulnerable
hw/intc/arm_gicv3_cpuif: Tolerate spurious EOIR writes Commit 382c7160d1cd ("hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic") added an assert_not_reached() if the guest writes the EOIR register while no interrupt is active. It turns out some software does this: EDK2, in GicV3ExitBootServicesEvent(), unconditionally write EOIR for all interrupts that it manages. This now causes QEMU to abort when running UEFI on a VM with GICv3. Although it is UNPREDICTABLE behavior and EDK2 does need fixing, the punishment seems a little harsh, especially since icc_eoir_write() already tolerates writes of nonexistent interrupt numbers. Display a guest error and tolerate spurious EOIR writes. Fixes: 382c7160d1cd ("hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic") Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Tested-by: Alex Bennée <alex.bennee@linaro.org> Message-id: 20210604130352.1887560-1-jean-philippe@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* End of Interrupt */ GICv3CPUState *cs = icc_cs_from_env(env); int irq = value & 0xffffff; int grp; bool is_eoir0 = ri->crm == 8; if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) { icv_eoir_write(env, ri, value); return; } trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1, gicv3_redist_affid(cs), value); if (irq >= cs->gic->num_irq) { /* This handles two cases: * 1. If software writes the ID of a spurious interrupt [ie 1020-1023] * to the GICC_EOIR, the GIC ignores that write. * 2. If software writes the number of a non-existent interrupt * this must be a subcase of "value written does not match the last * valid interrupt value read from the Interrupt Acknowledge * register" and so this is UNPREDICTABLE. We choose to ignore it. */ return; } grp = icc_highest_active_group(cs); switch (grp) { case GICV3_G0: if (!is_eoir0) { return; } if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) { return; } break; case GICV3_G1: if (is_eoir0) { return; } if (!arm_is_secure(env)) { return; } break; case GICV3_G1NS: if (is_eoir0) { return; } if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { return; } break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: IRQ %d isn't active\n", __func__, irq); return; } icc_drop_prio(cs, grp); if (!icc_eoi_split(env, cs)) { /* Priority drop and deactivate not split: deactivate irq now */ icc_deactivate_irq(cs, irq); } }
0bcd5a18940e1c1e3350b93cfadcdc6b58ca1c0e
https://github.com/qemu/qemu
1not_vulnerable
esp: fix migration version check in esp_is_version_5() Commit 4e78f3bf35 "esp: defer command completion interrupt on incoming data transfers" added a version check for use with VMSTATE_*_TEST macros to allow migration from older QEMU versions. Unfortunately the version check fails to work in its current form since if the VMStateDescription version_id is incremented, the test returns false and so the fields are not included in the outgoing migration stream. Change the version check to use >= rather == to ensure that migration works correctly when the ESPState VMStateDescription has version_id > 5. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Fixes: 4e78f3bf35 ("esp: defer command completion interrupt on incoming data transfers") Message-Id: <20210613102614.5438-1-mark.cave-ayland@ilande.co.uk> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static bool esp_is_version_5(void *opaque, int version_id) { ESPState *s = ESP(opaque); version_id = MIN(version_id, s->mig_version_id); return version_id >= 5; }
c348458f357784629c36a6eb1493c0c0c33b74e7
https://github.com/qemu/qemu
1not_vulnerable
esp: correctly accumulate extended messages for PDMA Commit 799d90d818 "esp: transition to message out phase after SATN and stop command" added logic to correctly handle extended messages for DMA requests but not for PDMA requests. Apply the same logic in esp_do_dma() to do_dma_pdma_cb() so that extended messages terminated with a PDMA request are accumulated correctly. This allows the ESP device to respond correctly to the SDTR negotiation initiated by the NetBSD ESP driver without causing errors and timeouts on boot. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Message-Id: <20210519100803.10293-6-mark.cave-ayland@ilande.co.uk> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void do_dma_pdma_cb(ESPState *s) { int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); int len; uint32_t n; if (s->do_cmd) { /* Ensure we have received complete command after SATN and stop */ if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) { return; } s->ti_size = 0; if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { /* No command received */ if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { return; } /* Command has been received */ s->do_cmd = 0; do_cmd(s); } else { /* * Extra message out bytes received: update cmdfifo_cdb_offset * and then switch to commmand phase */ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; s->rregs[ESP_RSEQ] = SEQ_CD; s->rregs[ESP_RINTR] |= INTR_BS; esp_raise_irq(s); } return; } if (!s->current_req) { return; } if (to_device) { /* Copy FIFO data to device */ len = MIN(s->async_len, ESP_FIFO_SZ); len = MIN(len, fifo8_num_used(&s->fifo)); n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); s->async_buf += n; s->async_len -= n; s->ti_size += n; if (n < len) { /* Unaligned accesses can cause FIFO wraparound */ len = len - n; n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); s->async_buf += n; s->async_len -= n; s->ti_size += n; } if (s->async_len == 0) { scsi_req_continue(s->current_req); return; } if (esp_get_tc(s) == 0) { esp_lower_drq(s); esp_dma_done(s); } return; } else { if (s->async_len == 0) { /* Defer until the scsi layer has completed */ scsi_req_continue(s->current_req); s->data_in_ready = false; return; } if (esp_get_tc(s) != 0) { /* Copy device data to FIFO */ len = MIN(s->async_len, esp_get_tc(s)); len = MIN(len, fifo8_num_free(&s->fifo)); fifo8_push_all(&s->fifo, s->async_buf, len); s->async_buf += len; s->async_len -= len; s->ti_size -= len; esp_set_tc(s, esp_get_tc(s) - len); if (esp_get_tc(s) == 0) { /* Indicate transfer to FIFO is complete */ s->rregs[ESP_RSTAT] |= STAT_TC; } return; } /* Partially filled a scsi buffer. Complete immediately. */ esp_lower_drq(s); esp_dma_done(s); } }
35579b523cf8f441da12f968ce5dcf6ae0bfbfea
https://github.com/qemu/qemu
1not_vulnerable
esp: revert 75ef849696 "esp: correctly fill bus id with requested lun" This commit from nearly 10 years ago is now broken due to the improvements in esp emulation (or perhaps was never correct). It shows up as a bug in detecting the CDROM drive under MacOS. The error is caused by the MacOS CDROM driver sending this CDB with an "S without ATN" command and without DMA: 0x12 0x00 0x00 0x00 0x05 0x00 (INQUIRY) This is a valid INQUIRY command, however with this logic present the 3rd byte (0x0) is copied over the 1st byte (0x12) which silently converts the INQUIRY command to a TEST UNIT READY command before passing it to the QEMU SCSI layer. Since the TEST UNIT READY command has a zero length response the MacOS CDROM driver never receives a response and assumes the CDROM is not present. The logic was to ignore the IDENTIFY byte and copy the LUN over from the CDB, which did store the LUN in bits 5-7 of the second byte in olden times. This however is all obsolete, so just drop the code. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Message-Id: <20210519100803.10293-5-mark.cave-ayland@ilande.co.uk> [Tweaked commit message. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static uint32_t get_cmd(ESPState *s, uint32_t maxlen) { uint8_t buf[ESP_CMDFIFO_SZ]; uint32_t dmalen, n; int target; target = s->wregs[ESP_WBUSID] & BUSID_DID; if (s->dma) { dmalen = MIN(esp_get_tc(s), maxlen); if (dmalen == 0) { return 0; } if (s->dma_memory_read) { s->dma_memory_read(s->dma_opaque, buf, dmalen); dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen); fifo8_push_all(&s->cmdfifo, buf, dmalen); } else { if (esp_select(s) < 0) { fifo8_reset(&s->cmdfifo); return -1; } esp_raise_drq(s); fifo8_reset(&s->cmdfifo); return 0; } } else { dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); if (dmalen == 0) { return 0; } n = esp_fifo_pop_buf(&s->fifo, buf, dmalen); n = MIN(fifo8_num_free(&s->cmdfifo), n); fifo8_push_all(&s->cmdfifo, buf, n); } trace_esp_get_cmd(dmalen, target); if (esp_select(s) < 0) { fifo8_reset(&s->cmdfifo); return -1; } return dmalen; }
cf1a7a9b3721544aaa3e43d111eb383c30d71a62
https://github.com/qemu/qemu
1not_vulnerable
esp: only assert INTR_DC interrupt flag if selection fails The datasheet sequence tables confirm that when a target selection fails, only the INTR_DC interrupt flag should be asserted. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Fixes: cf47a41e05 ("esp: latch individual bits in ESP_RINTR register") Message-Id: <20210518212511.21688-2-mark.cave-ayland@ilande.co.uk> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static int esp_select(ESPState *s) { int target; target = s->wregs[ESP_WBUSID] & BUSID_DID; s->ti_size = 0; fifo8_reset(&s->fifo); if (s->current_req) { /* Started a new command before the old one finished. Cancel it. */ scsi_req_cancel(s->current_req); } s->current_dev = scsi_device_find(&s->bus, 0, target, 0); if (!s->current_dev) { /* No such drive */ s->rregs[ESP_RSTAT] = 0; s->rregs[ESP_RINTR] = INTR_DC; s->rregs[ESP_RSEQ] = SEQ_0; esp_raise_irq(s); return -1; } /* * Note that we deliberately don't raise the IRQ here: this will be done * either in do_busid_cmd() for DATA OUT transfers or by the deferred * IRQ mechanism in esp_transfer_data() for DATA IN transfers */ s->rregs[ESP_RINTR] |= INTR_FC; s->rregs[ESP_RSEQ] = SEQ_CD; return 0; }
6e1da3d305499d3907f3c7f6638243e2e09b5085
https://github.com/qemu/qemu
1not_vulnerable
runstate: Initialize Error * to NULL Based on the description of error_setg(), the local variable err in qemu_init_subsystems() should be initialized to NULL. Fixes: efd7ab22fb ("vl: extract qemu_init_subsystems") Cc: qemu-stable@nongnu.org Signed-off-by: Peng Liang <liangpeng10@huawei.com> Message-Id: <20210610131729.3906565-1-liangpeng10@huawei.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void qemu_run_exit_notifiers(void) { notifier_list_notify(&exit_notifiers, NULL); }
38f71349c7c4969bc14da4da1c70b8cc4078d596
https://github.com/qemu/qemu
1not_vulnerable
vl: Fix an assert failure in error path Based on the description of error_setg(), the local variable err in qemu_maybe_daemonize() should be initialized to NULL. Without fix, the uninitialized *errp triggers assert failure which doesn't show much valuable information. Before the fix: qemu-system-x86_64: ../util/error.c:59: error_setv: Assertion `*errp == NULL' failed. After fix: qemu-system-x86_64: cannot create PID file: Cannot open pid file: Permission denied Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Message-Id: <20210610084741.456260-1-zhenzhong.duan@intel.com> Cc: qemu-stable@nongnu.org Fixes: 0546c0609c ("vl: split various early command line options to a separate function", 2020-12-10) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void qemu_process_help_options(void) { /* * Check for -cpu help and -device help before we call select_machine(), * which will return an error if the architecture has no default machine * type and the user did not specify one, so that the user doesn't need * to say '-cpu help -machine something'. */ if (cpu_option && is_help_option(cpu_option)) { list_cpus(cpu_option); exit(0); } if (qemu_opts_foreach(qemu_find_opts("device"), device_help_func, NULL, NULL)) { exit(0); } /* -L help lists the data directories and exits. */ if (list_data_dirs) { qemu_list_data_dirs(); exit(0); } }
144bff0304b8f93cf0eb9ed432434644302dc6d5
https://github.com/qemu/qemu
1not_vulnerable
linux-user: Disable static assert involving __SIGRTMAX if it is missing This check is to ensure that the loop in signal_table_init() from SIGRTMIN to SIGRTMAX falls within the bounds of host_to_target_signal_table (_NSIG). However, it is not critical, since _NSIG is already defined to be the one larger than the largest signal supported by the system (as specified in the upcoming POSIX revision[0]). musl libc does not define __SIGRTMAX, so disabling this check when it is missing fixes one of the last remaining errors when building qemu. [0] https://www.austingroupbugs.net/view.php?id=741 Signed-off-by: Michael Forney <mforney@mforney.org> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Message-Id: <20210526190203.4255-1-mforney@mforney.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static void host_signal_handler(int host_signum, siginfo_t *info, void *puc);
25b2ef2e8ee23109b0c3ce9ea71330bf8a7d12bd
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: reorder free calls. Free in correct order to avoid use-after-free. Resolves: CID 1453812 Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Reviewed-by: Li Qiang <liq3ea@gmail.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20210604103714.1237414-1-kraxel@redhat.com>
static void vg_resource_create_2d(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { pixman_format_code_t pformat; struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_create_2d c2d; VUGPU_FILL_CMD(c2d); virtio_gpu_bswap_32(&c2d, sizeof(c2d)); if (c2d.resource_id == 0) { g_critical("%s: resource id 0 is not allowed", __func__); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } res = virtio_gpu_find_resource(g, c2d.resource_id); if (res) { g_critical("%s: resource already exists %d", __func__, c2d.resource_id); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } res = g_new0(struct virtio_gpu_simple_resource, 1); res->width = c2d.width; res->height = c2d.height; res->format = c2d.format; res->resource_id = c2d.resource_id; pformat = virtio_gpu_get_pixman_format(c2d.format); if (!pformat) { g_critical("%s: host couldn't handle guest format %d", __func__, c2d.format); g_free(res); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; return; } vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height); res->image = pixman_image_create_bits(pformat, c2d.width, c2d.height, (uint32_t *)res->buffer.mmap, res->buffer.stride); if (!res->image) { g_critical("%s: resource creation failed %d %d %d", __func__, c2d.resource_id, c2d.width, c2d.height); vugbm_buffer_destroy(&res->buffer); g_free(res); cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; return; } QTAILQ_INSERT_HEAD(&g->reslist, res, next); }
c7ddc8821d88d958bb6d4ef1279ec3609b17ffda
https://github.com/qemu/qemu
1not_vulnerable
block: preserve errno from fdatasync failures When fdatasync() fails on a file backend we set a flag that short-circuits any future attempts to call fdatasync(). The first failure returns the true errno, but the later short- circuited calls return a generic EIO. The latter is unhelpful because fdatasync() can return a variety of errnos, including EACCESS. Reviewed-by: Connor Kuehl <ckuehl@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
static int handle_aiocb_flush(void *opaque) { RawPosixAIOData *aiocb = opaque; BDRVRawState *s = aiocb->bs->opaque; int ret; if (s->page_cache_inconsistent) { return -s->page_cache_inconsistent; } ret = qemu_fdatasync(aiocb->aio_fildes); if (ret == -1) { /* There is no clear definition of the semantics of a failing fsync(), * so we may have to assume the worst. The sad truth is that this * assumption is correct for Linux. Some pages are now probably marked * clean in the page cache even though they are inconsistent with the * on-disk contents. The next fdatasync() call would succeed, but no * further writeback attempt will be made. We can't get back to a state * in which we know what is on disk (we would have to rewrite * everything that was touched since the last fdatasync() at least), so * make bdrv_flush() fail permanently. Given that the behaviour isn't * really defined, I have little hope that other OSes are doing better. * * Obviously, this doesn't affect O_DIRECT, which bypasses the page * cache. */ if ((s->open_flags & O_DIRECT) == 0) { s->page_cache_inconsistent = errno; } return -errno; } return 0; }
f291f45f4ef445ccc1aaf7b5bc595dab17d89e8d
https://github.com/qemu/qemu
1not_vulnerable
softfloat: Fix tp init in float32_exp2 Typo in the conversion to FloatParts64. Fixes: 572c4d862ff2 Fixes: Coverity CID 1457457 Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20210607223812.110596-1-richard.henderson@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
float32 float32_exp2(float32 a, float_status *status) { FloatParts64 xp, xnp, tp, rp; int i; float32_unpack_canonical(&xp, a, status); if (unlikely(xp.cls != float_class_normal)) { switch (xp.cls) { case float_class_snan: case float_class_qnan: parts_return_nan(&xp, status); return float32_round_pack_canonical(&xp, status); case float_class_inf: return xp.sign ? float32_zero : a; case float_class_zero: return float32_one; default: break; } g_assert_not_reached(); } float_raise(float_flag_inexact, status); float64_unpack_canonical(&tp, float64_ln2, status); xp = *parts_mul(&xp, &tp, status); xnp = xp; float64_unpack_canonical(&rp, float64_one, status); for (i = 0 ; i < 15 ; i++) { float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status); rp = *parts_muladd(&tp, &xp, &rp, 0, status); xnp = *parts_mul(&xnp, &xp, status); } return float32_round_pack_canonical(&rp, status); }
5a2d9929ac1f01a1e8ef2a3f56f69e6069863dad
https://github.com/qemu/qemu
1not_vulnerable
Fixed calculation error of pkt->header_size in fill_pkt_tcp_info() The data pointer has skipped vnet_hdr_len in the function of parse_packet_early().So, we can not subtract vnet_hdr_len again when calculating pkt->header_size in fill_pkt_tcp_info(). Otherwise, it will cause network packet comparsion errors and greatly increase the frequency of checkpoints. Signed-off-by: Lei Rao <lei.rao@intel.com> Signed-off-by: Zhang Chen <chen.zhang@intel.com> Reviewed-by: Li Zhijian <lizhijian@fujitsu.com> Reviewed-by: Zhang Chen <chen.zhang@intel.com> Reviewed-by: Lukas Straub <lukasstraub2@web.de> Tested-by: Lukas Straub <lukasstraub2@web.de> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void fill_pkt_tcp_info(void *data, uint32_t *max_ack) { Packet *pkt = data; struct tcp_hdr *tcphd; tcphd = (struct tcp_hdr *)pkt->transport_header; pkt->tcp_seq = ntohl(tcphd->th_seq); pkt->tcp_ack = ntohl(tcphd->th_ack); *max_ack = *max_ack > pkt->tcp_ack ? *max_ack : pkt->tcp_ack; pkt->header_size = pkt->transport_header - (uint8_t *)pkt->data + (tcphd->th_off << 2); pkt->payload_size = pkt->size - pkt->header_size; pkt->seq_end = pkt->tcp_seq + pkt->payload_size; pkt->flags = tcphd->th_flags; }
c33f23a419f95da16ab4faaf08be635c89b96ff0
https://github.com/qemu/qemu
1not_vulnerable
vhost-vdpa: don't initialize backend_features We used to initialize backend_features during vhost_vdpa_init() regardless whether or not it was supported by vhost. This will lead the unsupported features like VIRTIO_F_IN_ORDER to be included and set to the vhost-vdpa during vhost_dev_start. Because the VIRTIO_F_IN_ORDER is not supported by vhost-vdpa so it won't be advertised to guest which will break the datapath. Fix this by not initializing the backend_features, so the acked_features could be built only from guest features via vhost_net_ack_features(). Fixes: 108a64818e69b ("vhost-vdpa: introduce vhost-vdpa backend") Cc: qemu-stable@nongnu.org Cc: Gautam Dawar <gdawar@xilinx.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) { uint8_t s; trace_vhost_vdpa_add_status(dev, status); if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) { return; } s |= status; vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); }
d80f54ce53167e38623b8aafe8317458a6d7a6cd
https://github.com/qemu/qemu
1not_vulnerable
channel-socket: Only set CLOEXEC if we have space for fds MSG_CMSG_CLOEXEC cleans up received fd's; it's really only for Unix sockets, but currently we enable it for everything; some socket types (IP_MPTCP) don't like this. Only enable it when we're giving the recvmsg room to receive fd's anyway. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20210421112834.107651-2-dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
static ssize_t qio_channel_socket_readv(QIOChannel *ioc, const struct iovec *iov, size_t niov, int **fds, size_t *nfds, Error **errp) { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); ssize_t ret; struct msghdr msg = { NULL, }; char control[CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS)]; int sflags = 0; memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS)); msg.msg_iov = (struct iovec *)iov; msg.msg_iovlen = niov; if (fds && nfds) { msg.msg_control = control; msg.msg_controllen = sizeof(control); #ifdef MSG_CMSG_CLOEXEC sflags |= MSG_CMSG_CLOEXEC; #endif } retry: ret = recvmsg(sioc->fd, &msg, sflags); if (ret < 0) { if (errno == EAGAIN) { return QIO_CHANNEL_ERR_BLOCK; } if (errno == EINTR) { goto retry; } error_setg_errno(errp, errno, "Unable to read from socket"); return -1; } if (fds && nfds) { qio_channel_socket_copy_fds(&msg, fds, nfds); } return ret; }
787a4baf91fa2ff36b901c0b31ea73f3f0739415
https://github.com/qemu/qemu
1not_vulnerable
target/riscv/pmp: Add assert for ePMP operations Although we construct epmp_operation in such a way that it can only be between 0 and 15 Coverity complains that we don't handle the other possible cases. To fix Coverity and make it easier for humans to read add a default case to the switch statement that calls g_assert_not_reached(). Fixes: CID 1453108 Signed-off-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Bin Meng <bmeng.cn@gmail.com> Reviewed-by: LIU Zhiwei <zhiwei_liu@c-sky.com> Message-id: ec5f225928eec448278c82fcb1f6805ee61dde82.1621550996.git.alistair.francis@wdc.com
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, target_ulong mode) { int i = 0; int ret = -1; int pmp_size = 0; target_ulong s = 0; target_ulong e = 0; /* Short cut if no rules */ if (0 == pmp_get_num_rules(env)) { return pmp_hart_has_privs_default(env, addr, size, privs, allowed_privs, mode); } if (size == 0) { if (riscv_feature(env, RISCV_FEATURE_MMU)) { /* * If size is unknown (0), assume that all bytes * from addr to the end of the page will be accessed. */ pmp_size = -(addr | TARGET_PAGE_MASK); } else { pmp_size = sizeof(target_ulong); } } else { pmp_size = size; } /* 1.10 draft priv spec states there is an implicit order from low to high */ for (i = 0; i < MAX_RISCV_PMPS; i++) { s = pmp_is_in_range(env, i, addr); e = pmp_is_in_range(env, i, addr + pmp_size - 1); /* partially inside */ if ((s + e) == 1) { qemu_log_mask(LOG_GUEST_ERROR, "pmp violation - access is partially inside\n"); ret = 0; break; } /* fully inside */ const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); /* * Convert the PMP permissions to match the truth table in the * ePMP spec. */ const uint8_t epmp_operation = ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { /* * If the PMP entry is not off and the address is in range, * do the priv check */ if (!MSECCFG_MML_ISSET(env)) { /* * If mseccfg.MML Bit is not set, do pmp priv check * This will always apply to regular PMP. */ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; if ((mode != PRV_M) || pmp_is_locked(env, i)) { *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; } } else { /* * If mseccfg.MML Bit set, do the enhanced pmp priv check */ if (mode == PRV_M) { switch (epmp_operation) { case 0: case 1: case 4: case 5: case 6: case 7: case 8: *allowed_privs = 0; break; case 2: case 3: case 14: *allowed_privs = PMP_READ | PMP_WRITE; break; case 9: case 10: *allowed_privs = PMP_EXEC; break; case 11: case 13: *allowed_privs = PMP_READ | PMP_EXEC; break; case 12: case 15: *allowed_privs = PMP_READ; break; default: g_assert_not_reached(); } } else { switch (epmp_operation) { case 0: case 8: case 9: case 12: case 13: case 14: *allowed_privs = 0; break; case 1: case 10: case 11: *allowed_privs = PMP_EXEC; break; case 2: case 4: case 15: *allowed_privs = PMP_READ; break; case 3: case 6: *allowed_privs = PMP_READ | PMP_WRITE; break; case 5: *allowed_privs = PMP_READ | PMP_EXEC; break; case 7: *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; break; default: g_assert_not_reached(); } } } ret = ((privs & *allowed_privs) == privs); break; } } /* No rule matched */ if (ret == -1) { return pmp_hart_has_privs_default(env, addr, size, privs, allowed_privs, mode); } return ret == 1 ? true : false; }
dd6921894905c8ce0664a77f9dac78408bc3b52d
https://github.com/qemu/qemu
1not_vulnerable
target/nios2: fix page-fit instruction count This patch fixes calculation of number of the instructions that fit the current page. It prevents creation of the translation blocks that cross the page boundaries. It is required for deterministic exception generation in icount mode. Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <162072241046.823357.10485774346114851009.stgit@pasha-ThinkPad-X280> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) { CPUNios2State *env = cs->env_ptr; DisasContext dc1, *dc = &dc1; int num_insns; /* Initialize DC */ dc->cpu_env = cpu_env; dc->cpu_R = cpu_R; dc->is_jmp = DISAS_NEXT; dc->pc = tb->pc; dc->tb = tb; dc->mem_idx = cpu_mmu_index(env, false); dc->singlestep_enabled = cs->singlestep_enabled; /* Set up instruction counts */ num_insns = 0; if (max_insns > 1) { int page_insns = (TARGET_PAGE_SIZE - (tb->pc & ~TARGET_PAGE_MASK)) / 4; if (max_insns > page_insns) { max_insns = page_insns; } } gen_tb_start(tb); do { tcg_gen_insn_start(dc->pc); num_insns++; if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { gen_exception(dc, EXCP_DEBUG); /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size below does the right thing. */ dc->pc += 4; break; } if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { gen_io_start(); } /* Decode an instruction */ handle_instruction(dc, env); dc->pc += 4; /* Translation stops when a conditional branch is encountered. * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ } while (!dc->is_jmp && !tcg_op_buf_full() && num_insns < max_insns); /* Indicate where the next block should start */ switch (dc->is_jmp) { case DISAS_NEXT: case DISAS_UPDATE: /* Save the current PC back into the CPU register */ tcg_gen_movi_tl(cpu_R[R_PC], dc->pc); tcg_gen_exit_tb(NULL, 0); break; default: case DISAS_JUMP: /* The jump will already have updated the PC register */ tcg_gen_exit_tb(NULL, 0); break; case DISAS_NORETURN: case DISAS_TB_JUMP: /* nothing more to generate */ break; } /* End off the block */ gen_tb_end(tb, num_insns); /* Mark instruction starts for the final generated instruction */ tb->size = dc->pc - tb->pc; tb->icount = num_insns; #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) && qemu_log_in_addr_range(tb->pc)) { FILE *logfile = qemu_log_lock(); qemu_log("IN: %s\n", lookup_symbol(tb->pc)); log_target_disas(cs, tb->pc, dc->pc - tb->pc); qemu_log("\n"); qemu_log_unlock(logfile); } #endif }
29c3d213f4ad69688638330728cff1a8769d7415
https://github.com/qemu/qemu
1not_vulnerable
oslib-posix: Remove OpenBSD workaround for fcntl("/dev/null", F_SETFL, O_NONBLOCK) failure OpenBSD prior to 6.3 required a workaround to utilize fcntl(F_SETFL) on memory devices. Since modern verions of OpenBSD that are only officialy supported and buildable on do not have this issue I am garbage collecting this workaround. Signed-off-by: Brad Smith <brad@comstyle.com> Message-Id: <YGYECGXQhdamEJgC@humpty.home.comstyle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
int qemu_try_set_nonblock(int fd) { int f; f = fcntl(fd, F_GETFL); if (f == -1) { return -errno; } if (fcntl(fd, F_SETFL, f | O_NONBLOCK) == -1) { return -errno; } return 0; }
6e0c60a2be30c333b06d3558a62b0f177199cbfb
https://github.com/qemu/qemu
1not_vulnerable
target/arm: fix missing exception class The DAIF and PAC checks used raise_exception_ra to raise an exception and unwind CPU state but raise_exception_ra is currently designed for handling data aborts as the syndrome is partially precomputed and encoded in the TB and then merged in merge_syn_data_abort when handling the data abort. Using raise_exception_ra for DAIF and PAC checks results in an empty syndrome being retrieved from data[2] in restore_state_to_opc and setting ESR to 0. This manifested as: kvm [571]: Unknown exception class: esr: 0x000000 – Unknown/Uncategorized when launching a KVM guest when the host qemu used a CPU supporting EL2+pointer authentication and enabling pointer authentication in the guest. Rework raise_exception_ra such that the state is restored before raising the exception so that the exception is not clobbered by restore_state_to_opc. Fixes: 0d43e1a2d29a ("target/arm: Add PAuth helpers") Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Jamie Iles <jamie@nuviainc.com> [PMM: added comment] Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
void raise_exception(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el) { CPUState *cs = do_raise_exception(env, excp, syndrome, target_el); cpu_loop_exit(cs); }
0711a634355a68cd83966872e387402a8b4b048a
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Mark LDS{MIN,MAX} as signed operations The operands to tcg_gen_atomic_fetch_s{min,max}_i64 must be signed, so that the inputs are properly extended. Zero extend the result afterward, as needed. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/364 Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-id: 20210602020720.47679-1-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void disas_ldst_atomic(DisasContext *s, uint32_t insn, int size, int rt, bool is_vector) { int rs = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int o3_opc = extract32(insn, 12, 4); bool r = extract32(insn, 22, 1); bool a = extract32(insn, 23, 1); TCGv_i64 tcg_rs, tcg_rt, clean_addr; AtomicThreeOpFn *fn = NULL; MemOp mop = s->be_data | size | MO_ALIGN; if (is_vector || !dc_isar_feature(aa64_atomics, s)) { unallocated_encoding(s); return; } switch (o3_opc) { case 000: /* LDADD */ fn = tcg_gen_atomic_fetch_add_i64; break; case 001: /* LDCLR */ fn = tcg_gen_atomic_fetch_and_i64; break; case 002: /* LDEOR */ fn = tcg_gen_atomic_fetch_xor_i64; break; case 003: /* LDSET */ fn = tcg_gen_atomic_fetch_or_i64; break; case 004: /* LDSMAX */ fn = tcg_gen_atomic_fetch_smax_i64; mop |= MO_SIGN; break; case 005: /* LDSMIN */ fn = tcg_gen_atomic_fetch_smin_i64; mop |= MO_SIGN; break; case 006: /* LDUMAX */ fn = tcg_gen_atomic_fetch_umax_i64; break; case 007: /* LDUMIN */ fn = tcg_gen_atomic_fetch_umin_i64; break; case 010: /* SWP */ fn = tcg_gen_atomic_xchg_i64; break; case 014: /* LDAPR, LDAPRH, LDAPRB */ if (!dc_isar_feature(aa64_rcpc_8_3, s) || rs != 31 || a != 1 || r != 0) { unallocated_encoding(s); return; } break; default: unallocated_encoding(s); return; } if (rn == 31) { gen_check_sp_alignment(s); } clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size); if (o3_opc == 014) { /* * LDAPR* are a special case because they are a simple load, not a * fetch-and-do-something op. * The architectural consistency requirements here are weaker than * full load-acquire (we only need "load-acquire processor consistent"), * but we choose to implement them as full LDAQ. */ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt, disas_ldst_compute_iss_sf(size, false, 0), true); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); return; } tcg_rs = read_cpu_reg(s, rs, true); tcg_rt = cpu_reg(s, rt); if (o3_opc == 1) { /* LDCLR */ tcg_gen_not_i64(tcg_rs, tcg_rs); } /* The tcg atomic primitives are all full barriers. Therefore we * can ignore the Acquire and Release bits of this instruction. */ fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop); if ((mop & MO_SIGN) && size != MO_64) { tcg_gen_ext32u_i64(tcg_rt, tcg_rt); } }
300137965dbacec02eb2e26b3c6763b491d1f1b2
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Fix return values in fp_sysreg_checks() The fp_sysreg_checks() function is supposed to be returning an FPSysRegCheckResult, which is an enum with three possible values. However, three places in the function "return false" (a hangover from a previous iteration of the design where the function just returned a bool). Make these return FPSysRegCheckFailed instead (for no functional change, since both false and FPSysRegCheckFailed are zero). Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210520152840.24453-6-peter.maydell@linaro.org
static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno) { if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) { return FPSysRegCheckFailed; } switch (regno) { case ARM_VFP_FPSCR: case QEMU_VFP_FPSCR_NZCV: break; case ARM_VFP_FPSCR_NZCVQC: if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { return FPSysRegCheckFailed; } break; case ARM_VFP_FPCXT_S: case ARM_VFP_FPCXT_NS: if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { return FPSysRegCheckFailed; } if (!s->v8m_secure) { return FPSysRegCheckFailed; } break; default: return FPSysRegCheckFailed; } /* * FPCXT_NS is a special case: it has specific handling for * "current FP state is inactive", and must do the PreserveFPState() * but not the usual full set of actions done by ExecuteFPCheck(). * So we don't call vfp_access_check() and the callers must handle this. */ if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) { return FPSysRegCheckDone; } return FPSysRegCheckContinue; }
b873ed83311d96644b544b10f6869a430660585a
https://github.com/qemu/qemu
1not_vulnerable
ppc/pef.c: initialize cgs->ready in kvmppc_svm_init() QEMU is failing to launch a CGS pSeries guest in a host that has PEF support: qemu-system-ppc64: ../softmmu/vl.c:2585: qemu_machine_creation_done: Assertion `machine->cgs->ready' failed. Aborted This is happening because we're not setting the cgs->ready flag that is asserted in qemu_machine_creation_done() during machine start. cgs->ready is set in s390_pv_kvm_init() and sev_kvm_init(). Let's set it in kvmppc_svm_init() as well. Reported-by: Ram Pai <linuxram@us.ibm.com> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com> Message-Id: <20210528201619.52363-1-danielhb413@gmail.com> Acked-by: Ram Pai <linuxram@us.ibm.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
int pef_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) { if (!object_dynamic_cast(OBJECT(cgs), TYPE_PEF_GUEST)) { return 0; } if (!kvm_enabled()) { error_setg(errp, "PEF requires KVM"); return -1; } return kvmppc_svm_init(cgs, errp); }
52e9612ee94b58a1bc57242427b4dbe6c766d8f3
https://github.com/qemu/qemu
1not_vulnerable
target/ppc: used ternary operator when registering MAS The write calback decision when registering the MAS SPR has been turned into a ternary operation, rather than an if-then-else block. This was done because when building without TCG, even though the compiler will optimize away the pointers to spr_write_generic*, it doesn't optimize away the decision and assignment to the local pointer, creating compiler errors. This cleanup looked better than using ifdefs, so we decided to with it. Signed-off-by: Bruno Larsen (billionai) <bruno.larsen@eldorado.org.br> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210525115355.8254-2-bruno.larsen@eldorado.org.br> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask, uint32_t *tlbncfg, uint32_t mmucfg) { #if !defined(CONFIG_USER_ONLY) const char *mas_names[8] = { "MAS0", "MAS1", "MAS2", "MAS3", "MAS4", "MAS5", "MAS6", "MAS7", }; int mas_sprn[8] = { SPR_BOOKE_MAS0, SPR_BOOKE_MAS1, SPR_BOOKE_MAS2, SPR_BOOKE_MAS3, SPR_BOOKE_MAS4, SPR_BOOKE_MAS5, SPR_BOOKE_MAS6, SPR_BOOKE_MAS7, }; int i; /* TLB assist registers */ /* XXX : not implemented */ for (i = 0; i < 8; i++) { if (mas_mask & (1 << i)) { spr_register(env, mas_sprn[i], mas_names[i], SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, (i == 2 && (env->insns_flags & PPC_64B)) ? &spr_write_generic : &spr_write_generic32, 0x00000000); } } if (env->nb_pids > 1) { /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID1, "PID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, 0x00000000); } if (env->nb_pids > 2) { /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID2, "PID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, 0x00000000); } spr_register(env, SPR_BOOKE_EPLC, "EPLC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_eplc, 0x00000000); spr_register(env, SPR_BOOKE_EPSC, "EPSC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_epsc, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MMUCFG, "MMUCFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, mmucfg); switch (env->nb_ways) { case 4: spr_register(env, SPR_BOOKE_TLB3CFG, "TLB3CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[3]); /* Fallthru */ case 3: spr_register(env, SPR_BOOKE_TLB2CFG, "TLB2CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[2]); /* Fallthru */ case 2: spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[1]); /* Fallthru */ case 1: spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[0]); /* Fallthru */ case 0: default: break; } #endif register_usprgh_sprs(env); }
ac559ecbea2649819e7b3fdd09f4e0243e0128db
https://github.com/qemu/qemu
1not_vulnerable
spapr: Set LPCR to current AIL mode when starting a new CPU TCG does not keep track of AIL mode in a central place, it's based on the current LPCR[AIL] bits. Synchronize the new CPU's LPCR to the current LPCR in rtas_start_cpu(), similarly to the way the ILE bit is synchronized. Open-code the ILE setting as well now that the caller's LPCR is available directly, there is no need for the indirection. Without this, under both TCG and KVM, adding a POWER8/9/10 class CPU with a new core ID after a modern Linux has booted results in the new CPU's LPCR missing the LPCR[AIL]=0b11 setting that the other CPUs have. This can cause crashes and unexpected behaviour. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Message-Id: <20210526091626.3388262-3-npiggin@gmail.com> Reviewed-by: Cédric Le Goater <clg@kaod.org> Reviewed-by: Greg Kurz <groug@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
static void rtas_start_cpu(PowerPCCPU *callcpu, SpaprMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { target_ulong id, start, r3; PowerPCCPU *newcpu; CPUPPCState *env; target_ulong lpcr; target_ulong caller_lpcr; if (nargs != 3 || nret != 1) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } id = rtas_ld(args, 0); start = rtas_ld(args, 1); r3 = rtas_ld(args, 2); newcpu = spapr_find_cpu(id); if (!newcpu) { /* Didn't find a matching cpu */ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } env = &newcpu->env; if (!CPU(newcpu)->halted) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } cpu_synchronize_state(CPU(newcpu)); env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME); hreg_compute_hflags(env); caller_lpcr = callcpu->env.spr[SPR_LPCR]; lpcr = env->spr[SPR_LPCR]; /* Set ILE the same way */ lpcr = (lpcr & ~LPCR_ILE) | (caller_lpcr & LPCR_ILE); /* Set AIL the same way */ lpcr = (lpcr & ~LPCR_AIL) | (caller_lpcr & LPCR_AIL); if (env->mmu_model == POWERPC_MMU_3_00) { /* * New cpus are expected to start in the same radix/hash mode * as the existing CPUs */ if (ppc64_v3_radix(callcpu)) { lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; } else { lpcr &= ~(LPCR_UPRT | LPCR_GTSE | LPCR_HR); } env->spr[SPR_PSSCR] &= ~PSSCR_EC; } ppc_store_lpcr(newcpu, lpcr); /* * Set the timebase offset of the new CPU to that of the invoking * CPU. This helps hotplugged CPU to have the correct timebase * offset. */ newcpu->env.tb_env->tb_offset = callcpu->env.tb_env->tb_offset; spapr_cpu_set_entry_state(newcpu, start, 0, r3, 0); qemu_cpu_kick(CPU(newcpu)); rtas_st(rets, 0, RTAS_OUT_SUCCESS); }
9f9f82dacebbb816c62d730658f14a615c3ea003
https://github.com/qemu/qemu
1not_vulnerable
spapr: nvdimm: Fix the persistent-memory root node name in device tree The FDT code is adding the pmem root node by name "persistent-memory" which should have been "ibm,persistent-memory". The linux fetches the device tree nodes by type and it has been working correctly as the type is correct. If someone searches by its intended name it would fail, so fix that. Reported-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com> Message-Id: <162204278956.219.9061511386011411578.stgit@cc493db1e665> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
int spapr_pmem_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, void *fdt, int *fdt_start_offset, Error **errp) { NVDIMMDevice *nvdimm = NVDIMM(drc->dev); *fdt_start_offset = spapr_dt_nvdimm(spapr, fdt, 0, nvdimm); return 0; }
3bf0844f3be77b24cc8f56fc8df9ff199f8324cb
https://github.com/qemu/qemu
1not_vulnerable
spapr: Don't hijack current_machine->boot_order QEMU 6.0 moved all the -boot variables to the machine. Especially, the removal of the boot_order static changed the handling of '-boot once' from: if (boot_once) { qemu_boot_set(boot_once, &error_fatal); qemu_register_reset(restore_boot_order, g_strdup(boot_order)); } to if (current_machine->boot_once) { qemu_boot_set(current_machine->boot_once, &error_fatal); qemu_register_reset(restore_boot_order, g_strdup(current_machine->boot_order)); } This means that we now register as subsequent boot order a copy of current_machine->boot_once that was just set with the previous call to qemu_boot_set(), i.e. we never transition away from the once boot order. It is certainly fragile^Wwrong for the spapr code to hijack a field of the base machine type object like that. The boot order rework simply turned this software boundary violation into an actual bug. Have the spapr code to handle that with its own field in SpaprMachineState. Also kfree() the initial boot device string when "once" was used. Fixes: 4b7acd2ac821 ("vl: clean up -boot variables") Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1960119 Cc: pbonzini@redhat.com Signed-off-by: Greg Kurz <groug@kaod.org> Message-Id: <20210521160735.1901914-1-groug@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) { MachineState *machine = MACHINE(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); int chosen; _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); if (reset) { const char *boot_device = spapr->boot_device; char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); size_t cb = 0; char *bootlist = get_boot_devices_list(&cb); if (machine->kernel_cmdline && machine->kernel_cmdline[0]) { _FDT(fdt_setprop_string(fdt, chosen, "bootargs", machine->kernel_cmdline)); } if (spapr->initrd_size) { _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start", spapr->initrd_base)); _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end", spapr->initrd_base + spapr->initrd_size)); } if (spapr->kernel_size) { uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr), cpu_to_be64(spapr->kernel_size) }; _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel", &kprop, sizeof(kprop))); if (spapr->kernel_le) { _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0)); } } if (boot_menu) { _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu))); } _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width)); _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height)); _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth)); if (cb && bootlist) { int i; for (i = 0; i < cb; i++) { if (bootlist[i] == '\n') { bootlist[i] = ' '; } } _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist)); } if (boot_device && strlen(boot_device)) { _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device)); } if (!spapr->has_graphics && stdout_path) { /* * "linux,stdout-path" and "stdout" properties are * deprecated by linux kernel. New platforms should only * use the "stdout-path" property. Set the new property * and continue using older property to remain compatible * with the existing firmware. */ _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path)); _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path)); } /* * We can deal with BAR reallocation just fine, advertise it * to the guest */ if (smc->linux_pci_probe) { _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0)); } spapr_dt_ov5_platform_support(spapr, fdt, chosen); g_free(stdout_path); g_free(bootlist); } _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5")); }
8146b357d0cb3a3f5d500a1536f9f0e1ff3302cc
https://github.com/qemu/qemu
1not_vulnerable
block-copy: fix block_copy_task_entry() progress update Don't report successful progress on failure, when call_state->ret is set. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210528141628.44287-2-vsementsov@virtuozzo.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static coroutine_fn int block_copy_task_entry(AioTask *task);
8081f064e404dd524b3c43248b2084dee9d32d7c
https://github.com/qemu/qemu
1not_vulnerable
block/vvfat: inherit child_vvfat_qcow from child_of_bds Recently we've fixed a crash by adding .get_parent_aio_context handler to child_vvfat_qcow. Now we want it to support .get_parent_desc as well. child_vvfat_qcow wants to implement own .inherit_options, it's not bad. But omitting all other handlers is a bad idea. Let's inherit the class from child_of_bds instead, similar to chain_child_class and detach_by_driver_cb_class in test-bdrv-drain.c. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210601075218.79249-5-vsementsov@virtuozzo.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static void vvfat_qcow_options(BdrvChildRole role, bool parent_is_format, int *child_flags, QDict *child_options, int parent_flags, QDict *parent_options) { qdict_set_default_str(child_options, BDRV_OPT_READ_ONLY, "off"); qdict_set_default_str(child_options, BDRV_OPT_AUTO_READ_ONLY, "off"); qdict_set_default_str(child_options, BDRV_OPT_CACHE_NO_FLUSH, "on"); }
fa95e9fbab2c19fc07ba82988b1690f8a6ff171b
https://github.com/qemu/qemu
1not_vulnerable
block/file-posix: Try other fallbacks after invalid FALLOC_FL_ZERO_RANGE If fallocate(... FALLOC_FL_ZERO_RANGE ...) returns EINVAL, it's likely an indication that the file system is buggy and does not implement unaligned accesses right. We still might be lucky with the other fallback fallocate() calls later in this function, though, so we should not return immediately and try the others first. Since FALLOC_FL_ZERO_RANGE could also return EINVAL if the file descriptor is not a regular file, we ignore this filesystem bug silently, without printing an error message for the user. Signed-off-by: Thomas Huth <thuth@redhat.com> Message-Id: <20210527172020.847617-3-thuth@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static int handle_aiocb_write_zeroes(void *opaque) { RawPosixAIOData *aiocb = opaque; #ifdef CONFIG_FALLOCATE BDRVRawState *s = aiocb->bs->opaque; int64_t len; #endif if (aiocb->aio_type & QEMU_AIO_BLKDEV) { return handle_aiocb_write_zeroes_block(aiocb); } #ifdef CONFIG_FALLOCATE_ZERO_RANGE if (s->has_write_zeroes) { int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == -ENOTSUP) { s->has_write_zeroes = false; } else if (ret == 0 || ret != -EINVAL) { return ret; } /* * Note: Some file systems do not like unaligned byte ranges, and * return EINVAL in such a case, though they should not do it according * to the man-page of fallocate(). Thus we simply ignore this return * value and try the other fallbacks instead. */ } #endif #ifdef CONFIG_FALLOCATE_PUNCH_HOLE if (s->has_discard && s->has_fallocate) { int ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0) { ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0 || ret != -ENOTSUP) { return ret; } s->has_fallocate = false; } else if (ret == -EINVAL) { /* * Some file systems like older versions of GPFS do not like un- * aligned byte ranges, and return EINVAL in such a case, though * they should not do it according to the man-page of fallocate(). * Warn about the bad filesystem and try the final fallback instead. */ warn_report_once("Your file system is misbehaving: " "fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. " "Please report this bug to your file sytem " "vendor."); } else if (ret != -ENOTSUP) { return ret; } else { s->has_discard = false; } } #endif #ifdef CONFIG_FALLOCATE /* Last resort: we are trying to extend the file with zeroed data. This * can be done via fallocate(fd, 0) */ len = bdrv_getlength(aiocb->bs); if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) { int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0 || ret != -ENOTSUP) { return ret; } s->has_fallocate = false; } #endif return -ENOTSUP; }
73ebf29729d1a40feaa9f8ab8951b6ee6dbfbede
https://github.com/qemu/qemu
1not_vulnerable
block/file-posix: Fix problem with fallocate(PUNCH_HOLE) on GPFS A customer reported that running qemu-img convert -t none -O qcow2 -f qcow2 input.qcow2 output.qcow2 fails for them with the following error message when the images are stored on a GPFS file system : qemu-img: error while writing sector 0: Invalid argument After analyzing the strace output, it seems like the problem is in handle_aiocb_write_zeroes(): The call to fallocate(FALLOC_FL_PUNCH_HOLE) returns EINVAL, which can apparently happen if the file system has a different idea of the granularity of the operation. It's arguably a bug in GPFS, since the PUNCH_HOLE mode should not result in EINVAL according to the man-page of fallocate(), but the file system is out there in production and so we have to deal with it. In commit 294682cc3a ("block: workaround for unaligned byte range in fallocate()") we also already applied the a work-around for the same problem to the earlier fallocate(FALLOC_FL_ZERO_RANGE) call, so do it now similar with the PUNCH_HOLE call. But instead of silently catching and returning -ENOTSUP (which causes the caller to fall back to writing zeroes), let's rather inform the user once about the buggy file system and try the other fallback instead. Signed-off-by: Thomas Huth <thuth@redhat.com> Message-Id: <20210527172020.847617-2-thuth@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static int handle_aiocb_write_zeroes(void *opaque) { RawPosixAIOData *aiocb = opaque; #ifdef CONFIG_FALLOCATE BDRVRawState *s = aiocb->bs->opaque; int64_t len; #endif if (aiocb->aio_type & QEMU_AIO_BLKDEV) { return handle_aiocb_write_zeroes_block(aiocb); } #ifdef CONFIG_FALLOCATE_ZERO_RANGE if (s->has_write_zeroes) { int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == -EINVAL) { /* * Allow falling back to pwrite for file systems that * do not support fallocate() for an unaligned byte range. */ return -ENOTSUP; } if (ret == 0 || ret != -ENOTSUP) { return ret; } s->has_write_zeroes = false; } #endif #ifdef CONFIG_FALLOCATE_PUNCH_HOLE if (s->has_discard && s->has_fallocate) { int ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0) { ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0 || ret != -ENOTSUP) { return ret; } s->has_fallocate = false; } else if (ret == -EINVAL) { /* * Some file systems like older versions of GPFS do not like un- * aligned byte ranges, and return EINVAL in such a case, though * they should not do it according to the man-page of fallocate(). * Warn about the bad filesystem and try the final fallback instead. */ warn_report_once("Your file system is misbehaving: " "fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. " "Please report this bug to your file sytem " "vendor."); } else if (ret != -ENOTSUP) { return ret; } else { s->has_discard = false; } } #endif #ifdef CONFIG_FALLOCATE /* Last resort: we are trying to extend the file with zeroed data. This * can be done via fallocate(fd, 0) */ len = bdrv_getlength(aiocb->bs); if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) { int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); if (ret == 0 || ret != -ENOTSUP) { return ret; } s->has_fallocate = false; } #endif return -ENOTSUP; }
39df2c6d57b9eaa30d37a34b5a20cbc0474725c0
https://github.com/qemu/qemu
1not_vulnerable
block/vvfat: fix vvfat_child_perm crash It's wrong to rely on s->qcow in vvfat_child_perm, as on permission update during bdrv_open_child() call this field is not set yet. Still prior to aa5a04c7db27eea6b36de32f241b155f0d9ce34d, it didn't crash, as bdrv_open_child passed NULL as child to bdrv_child_perm(), and NULL was equal to NULL in assertion (still, it was bad guarantee for child being s->qcow, not backing :). Since aa5a04c7db27eea6b36de32f241b155f0d9ce34d "add bdrv_attach_child_noperm" bdrv_refresh_perms called on parent node when attaching child, and new correct child pointer is passed to .bdrv_child_perm. Still, s->qcow is NULL at the moment. Let's rely only on role instead. Without that fix, ./build/qemu-system-x86_64 -usb -device usb-storage,drive=fat16 \ -drive \ file=fat:rw:fat-type=16:"<path of a host folder>",id=fat16,format=raw,if=none crashes: (gdb) bt 0 raise () at /lib64/libc.so.6 1 abort () at /lib64/libc.so.6 2 _nl_load_domain.cold () at /lib64/libc.so.6 3 annobin_assert.c_end () at /lib64/libc.so.6 4 vvfat_child_perm (bs=0x559186f3d690, c=0x559186f1ed20, role=3, reopen_queue=0x0, perm=0, shared=31, nperm=0x7ffe56f28298, nshared=0x7ffe56f282a0) at ../block/vvfat.c:3214 5 bdrv_child_perm (bs=0x559186f3d690, child_bs=0x559186f60190, c=0x559186f1ed20, role=3, reopen_queue=0x0, parent_perm=0, parent_shared=31, nperm=0x7ffe56f28298, nshared=0x7ffe56f282a0) at ../block.c:2094 6 bdrv_node_refresh_perm (bs=0x559186f3d690, q=0x0, tran=0x559186f65850, errp=0x7ffe56f28530) at ../block.c:2336 7 bdrv_list_refresh_perms (list=0x559186db5b90 = {...}, q=0x0, tran=0x559186f65850, errp=0x7ffe56f28530) at ../block.c:2358 8 bdrv_refresh_perms (bs=0x559186f3d690, errp=0x7ffe56f28530) at ../block.c:2419 9 bdrv_attach_child (parent_bs=0x559186f3d690, child_bs=0x559186f60190, child_name=0x559184d83e3d "write-target", child_class=0x5591852f3b00 <child_vvfat_qcow>, child_role=3, errp=0x7ffe56f28530) at ../block.c:2959 10 bdrv_open_child (filename=0x559186f5cb80 "/var/tmp/vl.7WYmFU", options=0x559186f66c20, bdref_key=0x559184d83e3d "write-target", parent=0x559186f3d690, child_class=0x5591852f3b00 <child_vvfat_qcow>, child_role=3, allow_none=false, errp=0x7ffe56f28530) at ../block.c:3351 11 enable_write_target (bs=0x559186f3d690, errp=0x7ffe56f28530) at ../block/vvfat.c:3177 12 vvfat_open (bs=0x559186f3d690, options=0x559186f42db0, flags=155650, errp=0x7ffe56f28530) at ../block/vvfat.c:1236 13 bdrv_open_driver (bs=0x559186f3d690, drv=0x5591853d97e0 <bdrv_vvfat>, node_name=0x0, options=0x559186f42db0, open_flags=155650, errp=0x7ffe56f28640) at ../block.c:1557 14 bdrv_open_common (bs=0x559186f3d690, file=0x0, options=0x559186f42db0, errp=0x7ffe56f28640) at ../block.c:1833 ... (gdb) fr 4 #4 vvfat_child_perm (bs=0x559186f3d690, c=0x559186f1ed20, role=3, reopen_queue=0x0, perm=0, shared=31, nperm=0x7ffe56f28298, nshared=0x7ffe56f282a0) at ../block/vvfat.c:3214 3214 assert(c == s->qcow || (role & BDRV_CHILD_COW)); (gdb) p role $1 = 3 # BDRV_CHILD_DATA | BDRV_CHILD_METADATA (gdb) p *c $2 = {bs = 0x559186f60190, name = 0x559186f669d0 "write-target", klass = 0x5591852f3b00 <child_vvfat_qcow>, role = 3, opaque = 0x559186f3d690, perm = 3, shared_perm = 4, frozen = false, parent_quiesce_counter = 0, next = {le_next = 0x0, le_prev = 0x559186f41818}, next_parent = {le_next = 0x0, le_prev = 0x559186f64320}} (gdb) p s->qcow $3 = (BdrvChild *) 0x0 Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210524101257.119377-3-vsementsov@virtuozzo.com> Tested-by: John Arbuckle <programmingkidx@gmail.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static void vvfat_child_perm(BlockDriverState *bs, BdrvChild *c, BdrvChildRole role, BlockReopenQueue *reopen_queue, uint64_t perm, uint64_t shared, uint64_t *nperm, uint64_t *nshared) { if (role & BDRV_CHILD_DATA) { /* This is a private node, nobody should try to attach to it */ *nperm = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE; *nshared = BLK_PERM_WRITE_UNCHANGED; } else { assert(role & BDRV_CHILD_COW); /* The backing file is there so 'commit' can use it. vvfat doesn't * access it in any way. */ *nperm = 0; *nshared = BLK_PERM_ALL; } }
8eaf10187a2fd25aa27cb81b602815b07f9a7f89
https://github.com/qemu/qemu
1not_vulnerable
qemu-io-cmds: assert that we don't have .perm requested in no-blk case Coverity thinks blk may be NULL. It's a false-positive, as described in a new comment. Fixes: Coverity CID 1453194 Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210519090532.3753-1-vsementsov@virtuozzo.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static int command(BlockBackend *blk, const cmdinfo_t *ct, int argc, char **argv) { char *cmd = argv[0]; if (!init_check_command(blk, ct)) { return -EINVAL; } if (argc - 1 < ct->argmin || (ct->argmax != -1 && argc - 1 > ct->argmax)) { if (ct->argmax == -1) { fprintf(stderr, "bad argument count %d to %s, expected at least %d arguments\n", argc-1, cmd, ct->argmin); } else if (ct->argmin == ct->argmax) { fprintf(stderr, "bad argument count %d to %s, expected %d arguments\n", argc-1, cmd, ct->argmin); } else { fprintf(stderr, "bad argument count %d to %s, expected between %d and %d arguments\n", argc-1, cmd, ct->argmin, ct->argmax); } return -EINVAL; } /* * Request additional permissions if necessary for this command. The caller * is responsible for restoring the original permissions afterwards if this * is what it wants. * * Coverity thinks that blk may be NULL in the following if condition. It's * not so: in init_check_command() we fail if blk is NULL for command with * both CMD_FLAG_GLOBAL and CMD_NOFILE_OK flags unset. And in * qemuio_add_command() we assert that command with non-zero .perm field * doesn't set this flags. So, the following assertion is to silence * Coverity: */ assert(blk || !ct->perm); if (ct->perm && blk_is_available(blk)) { uint64_t orig_perm, orig_shared_perm; blk_get_perm(blk, &orig_perm, &orig_shared_perm); if (ct->perm & ~orig_perm) { uint64_t new_perm; Error *local_err = NULL; int ret; new_perm = orig_perm | ct->perm; ret = blk_set_perm(blk, new_perm, orig_shared_perm, &local_err); if (ret < 0) { error_report_err(local_err); return ret; } } } qemu_reset_optind(); return ct->cfunc(blk, argc, argv); }
ce7015d9e8669e2a45aba7a95fe6ef8a8f55bfe0
https://github.com/qemu/qemu
1not_vulnerable
hw/display/qxl: Set pci rom address aligned with page size On some MIPS system, page size is 16K, and qxl vga device can be used for VM in kvm mode. Qxl pci rom size is set 8K fixed, smaller than 16K page size on host system, it fails to be added into memslots in kvm mode where memory_size and GPA are required to align with page size. This patch fixes this issue. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Message-Id: <1621340448-31617-1-git-send-email-maobibo@loongson.cn> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static ram_addr_t qxl_rom_size(void) { #define QXL_REQUIRED_SZ (sizeof(QXLRom) + sizeof(QXLModes) + sizeof(qxl_modes)) #define QXL_ROM_SZ 8192 QEMU_BUILD_BUG_ON(QXL_REQUIRED_SZ > QXL_ROM_SZ); return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size); }
9f22893adcb02580aee5968f32baa2cd109b3ec2
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix OOB write in 'virgl_cmd_get_capset' (CVE-2021-3546) If 'virgl_cmd_get_capset' set 'max_size' to 0, the 'virgl_renderer_fill_caps' will write the data after the 'resp'. This patch avoid this by checking the returned 'max_size'. virtio-gpu fix: abd7f08b23 ("display: virtio-gpu-3d: check virgl capabilities max_size") Fixes: CVE-2021-3546 Reported-by: Li Qiang <liq3ea@163.com> Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org> Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-8-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void virgl_cmd_get_capset(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_get_capset gc; struct virtio_gpu_resp_capset *resp; uint32_t max_ver, max_size; VUGPU_FILL_CMD(gc); virgl_renderer_get_cap_set(gc.capset_id, &max_ver, &max_size); if (!max_size) { cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; return; } resp = g_malloc0(sizeof(*resp) + max_size); resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; virgl_renderer_fill_caps(gc.capset_id, gc.capset_version, (void *)resp->capset_data); vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); g_free(resp); }
63736af5a6571d9def93769431e0d7e38c6677bf
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix memory leak in 'virgl_resource_attach_backing' (CVE-2021-3544) If 'virgl_renderer_resource_attach_iov' failed, the 'res_iovs' will be leaked. Fixes: CVE-2021-3544 Reported-by: Li Qiang <liq3ea@163.com> virtio-gpu fix: 33243031da ("virtio-gpu-3d: fix memory leak in resource attach backing") Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-7-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void virgl_resource_attach_backing(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_resource_attach_backing att_rb; struct iovec *res_iovs; int ret; VUGPU_FILL_CMD(att_rb); ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs); if (ret != 0) { cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; return; } ret = virgl_renderer_resource_attach_iov(att_rb.resource_id, res_iovs, att_rb.nr_entries); if (ret != 0) { g_free(res_iovs); } }
f6091d86ba9ea05f4e111b9b42ee0005c37a6779
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix memory leak in 'virgl_cmd_resource_unref' (CVE-2021-3544) The 'res->iov' will be leaked if the guest trigger following sequences: virgl_cmd_create_resource_2d virgl_resource_attach_backing virgl_cmd_resource_unref This patch fixes this. Fixes: CVE-2021-3544 Reported-by: Li Qiang <liq3ea@163.com> virtio-gpu fix: 5e8e3c4c75 ("virtio-gpu: fix resource leak in virgl_cmd_resource_unref" Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-6-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void virgl_cmd_resource_unref(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_resource_unref unref; struct iovec *res_iovs = NULL; int num_iovs = 0; VUGPU_FILL_CMD(unref); virgl_renderer_resource_detach_iov(unref.resource_id, &res_iovs, &num_iovs); g_free(res_iovs); virgl_renderer_resource_unref(unref.resource_id); }
b7afebcf9e6ecf3cf9b5a9b9b731ed04bca6aa3e
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix memory leak while calling 'vg_resource_unref' (CVE-2021-3544) If the guest trigger following sequences, the attach_backing will be leaked: vg_resource_create_2d vg_resource_attach_backing vg_resource_unref This patch fix this by freeing 'res->iov' in vg_resource_destroy. Fixes: CVE-2021-3544 Reported-by: Li Qiang <liq3ea@163.com> virtio-gpu fix: 5e8e3c4c75 ("virtio-gpu: fix resource leak in virgl_cmd_resource_unref") Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org> Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-5-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void vg_resource_destroy(VuGpu *g, struct virtio_gpu_simple_resource *res) { int i; if (res->scanout_bitmask) { for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { if (res->scanout_bitmask & (1 << i)) { vg_disable_scanout(g, i); } } } vugbm_buffer_destroy(&res->buffer); g_free(res->iov); pixman_image_unref(res->image); QTAILQ_REMOVE(&g->reslist, res, next); g_free(res); }
b9f79858a614d95f5de875d0ca31096eaab72c3b
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix memory leak in vg_resource_attach_backing (CVE-2021-3544) Check whether the 'res' has already been attach_backing to avoid memory leak. Fixes: CVE-2021-3544 Reported-by: Li Qiang <liq3ea@163.com> virtio-gpu fix: 204f01b309 ("virtio-gpu: fix memory leak in resource attach backing") Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-4-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void vg_resource_attach_backing(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_attach_backing ab; int ret; VUGPU_FILL_CMD(ab); virtio_gpu_bswap_32(&ab, sizeof(ab)); res = virtio_gpu_find_resource(g, ab.resource_id); if (!res) { g_critical("%s: illegal resource specified %d", __func__, ab.resource_id); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } if (res->iov) { cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; return; } ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov); if (ret != 0) { cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; return; } res->iov_cnt = ab.nr_entries; }
86dd8fac2acc366930a5dc08d3fb1b1e816f4e1e
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix resource leak in 'vg_resource_create_2d' (CVE-2021-3544) Call 'vugbm_buffer_destroy' in error path to avoid resource leak. Fixes: CVE-2021-3544 Reported-by: Li Qiang <liq3ea@163.com> Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org> Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-3-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void vg_resource_create_2d(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { pixman_format_code_t pformat; struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_create_2d c2d; VUGPU_FILL_CMD(c2d); virtio_gpu_bswap_32(&c2d, sizeof(c2d)); if (c2d.resource_id == 0) { g_critical("%s: resource id 0 is not allowed", __func__); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } res = virtio_gpu_find_resource(g, c2d.resource_id); if (res) { g_critical("%s: resource already exists %d", __func__, c2d.resource_id); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } res = g_new0(struct virtio_gpu_simple_resource, 1); res->width = c2d.width; res->height = c2d.height; res->format = c2d.format; res->resource_id = c2d.resource_id; pformat = virtio_gpu_get_pixman_format(c2d.format); if (!pformat) { g_critical("%s: host couldn't handle guest format %d", __func__, c2d.format); g_free(res); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; return; } vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height); res->image = pixman_image_create_bits(pformat, c2d.width, c2d.height, (uint32_t *)res->buffer.mmap, res->buffer.stride); if (!res->image) { g_critical("%s: resource creation failed %d %d %d", __func__, c2d.resource_id, c2d.width, c2d.height); g_free(res); vugbm_buffer_destroy(&res->buffer); cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; return; } QTAILQ_INSERT_HEAD(&g->reslist, res, next); }
121841b25d72d13f8cad554363138c360f1250ea
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-gpu: fix memory disclosure in virgl_cmd_get_capset_info (CVE-2021-3545) Otherwise some of the 'resp' will be leaked to guest. Fixes: CVE-2021-3545 Reported-by: Li Qiang <liq3ea@163.com> virtio-gpu fix: 42a8dadc74 ("virtio-gpu: fix information leak in getting capset info dispatch") Signed-off-by: Li Qiang <liq3ea@163.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210516030403.107723-2-liq3ea@163.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void virgl_cmd_get_capset_info(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_get_capset_info info; struct virtio_gpu_resp_capset_info resp; VUGPU_FILL_CMD(info); memset(&resp, 0, sizeof(resp)); if (info.capset_index == 0) { resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; virgl_renderer_get_cap_set(resp.capset_id, &resp.capset_max_version, &resp.capset_max_size); } else if (info.capset_index == 1) { resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; virgl_renderer_get_cap_set(resp.capset_id, &resp.capset_max_version, &resp.capset_max_size); } else { resp.capset_max_version = 0; resp.capset_max_size = 0; } resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); }
1e157667d7657418b68fadb5cc016c6804e17501
https://github.com/qemu/qemu
1not_vulnerable
virtio-net: failover: add missing remove_migration_state_change_notifier() In the failover case configuration, virtio_net_device_realize() uses an add_migration_state_change_notifier() to add a state notifier, but this notifier is not removed by the unrealize function when the virtio-net card is unplugged. If the card is unplugged and a migration is started, the notifier is called and as it is not valid anymore QEMU crashes. This patch fixes the problem by adding the remove_migration_state_change_notifier() in virtio_net_device_unrealize(). The problem can be reproduced with: $ qemu-system-x86_64 -enable-kvm -m 1g -M q35 \ -device pcie-root-port,slot=4,id=root1 \ -device pcie-root-port,slot=5,id=root2 \ -device virtio-net-pci,id=net1,mac=52:54:00:6f:55:cc,failover=on,bus=root1 \ -monitor stdio disk.qcow2 (qemu) device_del net1 (qemu) migrate "exec:gzip -c > STATEFILE.gz" Thread 1 "qemu-system-x86" received signal SIGSEGV, Segmentation fault. 0x0000000000000000 in ?? () (gdb) bt #0 0x0000000000000000 in () #1 0x0000555555d726d7 in notifier_list_notify (...) at .../util/notify.c:39 #2 0x0000555555842c1a in migrate_fd_connect (...) at .../migration/migration.c:3975 #3 0x0000555555950f7d in migration_channel_connect (...) error@entry=0x0) at .../migration/channel.c:107 #4 0x0000555555910922 in exec_start_outgoing_migration (...) at .../migration/exec.c:42 Reported-by: Igor Mammedov <imammedo@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Laurent Vivier <lvivier@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void virtio_net_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIONet *n = VIRTIO_NET(dev); int i, max_queues; /* This will stop vhost backend if appropriate. */ virtio_net_set_status(vdev, 0); g_free(n->netclient_name); n->netclient_name = NULL; g_free(n->netclient_type); n->netclient_type = NULL; g_free(n->mac_table.macs); g_free(n->vlans); if (n->failover) { device_listener_unregister(&n->primary_listener); remove_migration_state_change_notifier(&n->migration_state); } max_queues = n->multiqueue ? n->max_queues : 1; for (i = 0; i < max_queues; i++) { virtio_net_del_queue(n, i); } /* delete also control vq */ virtio_del_queue(vdev, max_queues * 2); qemu_announce_timer_del(&n->announce_timer, false); g_free(n->vqs); qemu_del_nic(n->nic); virtio_net_rsc_cleanup(n); g_free(n->rss_data.indirections_table); net_rx_pkt_uninit(n->rx_pkt); virtio_cleanup(vdev); }
4e812d2338acb354b969b59f792f413f567c0ace
https://github.com/qemu/qemu
1not_vulnerable
migration/rdma: cleanup rdma in rdma_start_incoming_migration error path the error path after calling qemu_rdma_dest_init() should do rdma cleanup Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com> Message-Id: <20210520081148.17001-1-lizhijian@cn.fujitsu.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
void rdma_start_incoming_migration(const char *host_port, Error **errp) { int ret; RDMAContext *rdma, *rdma_return_path = NULL; Error *local_err = NULL; trace_rdma_start_incoming_migration(); /* Avoid ram_block_discard_disable(), cannot change during migration. */ if (ram_block_discard_is_required()) { error_setg(errp, "RDMA: cannot disable RAM discard"); return; } rdma = qemu_rdma_data_init(host_port, &local_err); if (rdma == NULL) { goto err; } ret = qemu_rdma_dest_init(rdma, &local_err); if (ret) { goto err; } trace_rdma_start_incoming_migration_after_dest_init(); ret = rdma_listen(rdma->listen_id, 5); if (ret) { ERROR(errp, "listening on socket!"); goto cleanup_rdma; } trace_rdma_start_incoming_migration_after_rdma_listen(); /* initialize the RDMAContext for return path */ if (migrate_postcopy()) { rdma_return_path = qemu_rdma_data_init(host_port, &local_err); if (rdma_return_path == NULL) { goto cleanup_rdma; } qemu_rdma_return_path_dest_init(rdma_return_path, rdma); } qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration, NULL, (void *)(intptr_t)rdma); return; cleanup_rdma: qemu_rdma_cleanup(rdma); err: error_propagate(errp, local_err); if (rdma) { g_free(rdma->host); } g_free(rdma); g_free(rdma_return_path); }
efb208dc9c3f1e881aecff21fb1c7a7b6b869480
https://github.com/qemu/qemu
1not_vulnerable
migration/rdma: Fix cm_event used before being initialized A segmentation fault was triggered when i try to abort a postcopy + rdma migration. since rdma_ack_cm_event releases a uninitialized cm_event in these case. like below: 2496 ret = rdma_get_cm_event(rdma->channel, &cm_event); 2497 if (ret) { 2498 perror("rdma_get_cm_event after rdma_connect"); 2499 ERROR(errp, "connecting to destination!"); 2500 rdma_ack_cm_event(cm_event); <<<< cause segmentation fault 2501 goto err_rdma_source_connect; 2502 } Refer to the rdma_get_cm_event() code, cm_event will be updated/changed only if rdma_get_cm_event() returns 0. So it's okey to remove the ack in error patch. Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com> Message-Id: <20210519064740.10828-1-lizhijian@cn.fujitsu.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
static int qemu_rdma_connect(RDMAContext *rdma, Error **errp) { RDMACapabilities cap = { .version = RDMA_CONTROL_VERSION_CURRENT, .flags = 0, }; struct rdma_conn_param conn_param = { .initiator_depth = 2, .retry_count = 5, .private_data = &cap, .private_data_len = sizeof(cap), }; struct rdma_cm_event *cm_event; int ret; /* * Only negotiate the capability with destination if the user * on the source first requested the capability. */ if (rdma->pin_all) { trace_qemu_rdma_connect_pin_all_requested(); cap.flags |= RDMA_CAPABILITY_PIN_ALL; } caps_to_network(&cap); ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); if (ret) { ERROR(errp, "posting second control recv"); goto err_rdma_source_connect; } ret = rdma_connect(rdma->cm_id, &conn_param); if (ret) { perror("rdma_connect"); ERROR(errp, "connecting to destination!"); goto err_rdma_source_connect; } ret = rdma_get_cm_event(rdma->channel, &cm_event); if (ret) { perror("rdma_get_cm_event after rdma_connect"); ERROR(errp, "connecting to destination!"); goto err_rdma_source_connect; } if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) { perror("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect"); ERROR(errp, "connecting to destination!"); rdma_ack_cm_event(cm_event); goto err_rdma_source_connect; } rdma->connected = true; memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap)); network_to_caps(&cap); /* * Verify that the *requested* capabilities are supported by the destination * and disable them otherwise. */ if (rdma->pin_all && !(cap.flags & RDMA_CAPABILITY_PIN_ALL)) { ERROR(errp, "Server cannot support pinning all memory. " "Will register memory dynamically."); rdma->pin_all = false; } trace_qemu_rdma_connect_pin_all_outcome(rdma->pin_all); rdma_ack_cm_event(cm_event); rdma->control_ready_expected = 1; rdma->nb_sent = 0; return 0; err_rdma_source_connect: qemu_rdma_cleanup(rdma); return -1; }
c53cd04e70641fdf9410aac40c617d074047b3e1
https://github.com/qemu/qemu
1not_vulnerable
hmp: Fix loadvm to resume the VM on success instead of failure Commit f61fe11aa6f broke hmp_loadvm() by adding an incorrect negation when converting from 0/-errno return values to a bool value. The result is that loadvm resumes the VM now if it failed and keeps it stopped if it failed. Fix it to restore the old behaviour and do it the other way around. Fixes: f61fe11aa6f7f8f0ffe4ddaa56a8108f3ab57854 Cc: qemu-stable@nongnu.org Reported-by: Yanhui Ma <yama@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20210511163151.45167-1-kwolf@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
void hmp_loadvm(Monitor *mon, const QDict *qdict) { int saved_vm_running = runstate_is_running(); const char *name = qdict_get_str(qdict, "name"); Error *err = NULL; vm_stop(RUN_STATE_RESTORE_VM); if (load_snapshot(name, NULL, false, NULL, &err) && saved_vm_running) { vm_start(); } hmp_handle_error(mon, err); }
b802d14dc6f3fba988baa9804af8f4cf837c6886
https://github.com/qemu/qemu
1not_vulnerable
hw/scsi: Fix sector translation bug in scsi_unmap_complete_noio check_lba_range expects sectors to be expressed in original qdev blocksize, but scsi_unmap_complete_noio was translating them to 512 block sizes, which was causing sense errors in the larger LBAs in devices using a 4k block size. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/345 Signed-off-by: Kit Westneat <kit.westneat@gmail.com> Message-Id: <20210521142829.326217-1-kit.westneat@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) { SCSIDiskReq *r = data->r; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); assert(r->req.aiocb == NULL); if (data->count > 0) { uint64_t sector_num = ldq_be_p(&data->inbuf[0]); uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE); r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); if (!check_lba_range(s, sector_num, nb_sectors)) { block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); goto done; } block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, r->sector_count * BDRV_SECTOR_SIZE, BLOCK_ACCT_UNMAP); r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, r->sector * BDRV_SECTOR_SIZE, r->sector_count * BDRV_SECTOR_SIZE, scsi_unmap_complete, data); data->count--; data->inbuf += 16; return; } scsi_req_complete(&r->req, GOOD); done: scsi_req_unref(&r->req); g_free(data); }
d349f92f78d26db2805ca39a7745cc70affea021
https://github.com/qemu/qemu
1not_vulnerable
vl: allow not specifying size in -m when using -M memory-backend Starting in QEMU 6.0's commit f5c9fcb82d ("vl: separate qemu_create_machine", 2020-12-10), a function have_custom_ram_size() replaced the return value of set_memory_options(). The purpose of the return value was to record the presence of "-m size", and if it was not there, change the default RAM size to the size of the memory backend passed with "-M memory-backend". With that commit, however, have_custom_ram_size() is now queried only after set_memory_options has stored the fixed-up RAM size in QemuOpts for "future use". This was actually the only future use of the fixed-up RAM size, so remove that code and fix the bug. Cc: qemu-stable@nongnu.org Fixes: f5c9fcb82d ("vl: separate qemu_create_machine", 2020-12-10) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void set_memory_options(MachineClass *mc) { uint64_t sz; const char *mem_str; const ram_addr_t default_ram_size = mc->default_ram_size; QemuOpts *opts = qemu_find_opts_singleton("memory"); Location loc; loc_push_none(&loc); qemu_opts_loc_restore(opts); sz = 0; mem_str = qemu_opt_get(opts, "size"); if (mem_str) { if (!*mem_str) { error_report("missing 'size' option value"); exit(EXIT_FAILURE); } sz = qemu_opt_get_size(opts, "size", ram_size); /* Fix up legacy suffix-less format */ if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) { uint64_t overflow_check = sz; sz *= MiB; if (sz / MiB != overflow_check) { error_report("too large 'size' option value"); exit(EXIT_FAILURE); } } } /* backward compatibility behaviour for case "-m 0" */ if (sz == 0) { sz = default_ram_size; } sz = QEMU_ALIGN_UP(sz, 8192); if (mc->fixup_ram_size) { sz = mc->fixup_ram_size(sz); } ram_size = sz; if (ram_size != sz) { error_report("ram size too large"); exit(EXIT_FAILURE); } maxram_size = ram_size; if (qemu_opt_get(opts, "maxmem")) { uint64_t slots; sz = qemu_opt_get_size(opts, "maxmem", 0); slots = qemu_opt_get_number(opts, "slots", 0); if (sz < ram_size) { error_report("invalid value of -m option maxmem: " "maximum memory size (0x%" PRIx64 ") must be at least " "the initial memory size (0x" RAM_ADDR_FMT ")", sz, ram_size); exit(EXIT_FAILURE); } else if (slots && sz == ram_size) { error_report("invalid value of -m option maxmem: " "memory slots were specified but maximum memory size " "(0x%" PRIx64 ") is equal to the initial memory size " "(0x" RAM_ADDR_FMT ")", sz, ram_size); exit(EXIT_FAILURE); } maxram_size = sz; ram_slots = slots; } else if (qemu_opt_get(opts, "slots")) { error_report("invalid -m option value: missing 'maxmem' option"); exit(EXIT_FAILURE); } loc_pop(&loc); }
a81a592698c5cf3d84486b00c84b7979dfa0a3da
https://github.com/qemu/qemu
1not_vulnerable
KVM: Disable manual dirty log when dirty ring enabled KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is for KVM_CLEAR_DIRTY_LOG, which is only useful for KVM_GET_DIRTY_LOG. Skip enabling it for kvm dirty ring. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will not wr-protect all the pages initially, which is against how kvm dirty ring is used - there's no way for kvm dirty ring to re-protect a page before it's notified as being written first with a GFN entry in the ring! So when KVM_DIRTY_LOG_INITIALLY_SET is enabled with dirty ring, we'll see silent data loss after migration. Signed-off-by: Peter Xu <peterx@redhat.com> Message-Id: <20210506160549.130416-10-peterx@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static int kvm_init(MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); static const char upgrade_note[] = "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" "(see http://sourceforge.net/projects/kvm).\n"; struct { const char *name; int num; } num_cpus[] = { { "SMP", ms->smp.cpus }, { "hotpluggable", ms->smp.max_cpus }, { NULL, } }, *nc = num_cpus; int soft_vcpus_limit, hard_vcpus_limit; KVMState *s; const KVMCapabilityInfo *missing_cap; int ret; int type = 0; uint64_t dirty_log_manual_caps; qemu_mutex_init(&kml_slots_lock); s = KVM_STATE(ms->accelerator); /* * On systems where the kernel can support different base page * sizes, host page size may be different from TARGET_PAGE_SIZE, * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum * page size for the system though. */ assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size); s->sigmask_len = 8; #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif QLIST_INIT(&s->kvm_parked_vcpus); s->fd = qemu_open_old("/dev/kvm", O_RDWR); if (s->fd == -1) { fprintf(stderr, "Could not access KVM kernel module: %m\n"); ret = -errno; goto err; } ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { if (ret >= 0) { ret = -EINVAL; } fprintf(stderr, "kvm version too old\n"); goto err; } if (ret > KVM_API_VERSION) { ret = -EINVAL; fprintf(stderr, "kvm version not supported\n"); goto err; } kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT); s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); /* If unspecified, use the default value */ if (!s->nr_slots) { s->nr_slots = 32; } s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); if (s->nr_as <= 1) { s->nr_as = 1; } s->as = g_new0(struct KVMAs, s->nr_as); if (object_property_find(OBJECT(current_machine), "kvm-type")) { g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), "kvm-type", &error_abort); type = mc->kvm_type(ms, kvm_type); } else if (mc->kvm_type) { type = mc->kvm_type(ms, NULL); } do { ret = kvm_ioctl(s, KVM_CREATE_VM, type); } while (ret == -EINTR); if (ret < 0) { fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret, strerror(-ret)); #ifdef TARGET_S390X if (ret == -EINVAL) { fprintf(stderr, "Host kernel setup problem detected. Please verify:\n"); fprintf(stderr, "- for kernels supporting the switch_amode or" " user_mode parameters, whether\n"); fprintf(stderr, " user space is running in primary address space\n"); fprintf(stderr, "- for kernels supporting the vm.allocate_pgste sysctl, " "whether it is enabled\n"); } #endif goto err; } s->vmfd = ret; /* check the vcpu limits */ soft_vcpus_limit = kvm_recommended_vcpus(s); hard_vcpus_limit = kvm_max_vcpus(s); while (nc->name) { if (nc->num > soft_vcpus_limit) { warn_report("Number of %s cpus requested (%d) exceeds " "the recommended cpus supported by KVM (%d)", nc->name, nc->num, soft_vcpus_limit); if (nc->num > hard_vcpus_limit) { fprintf(stderr, "Number of %s cpus requested (%d) exceeds " "the maximum cpus supported by KVM (%d)\n", nc->name, nc->num, hard_vcpus_limit); exit(1); } } nc++; } missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); if (!missing_cap) { missing_cap = kvm_check_extension_list(s, kvm_arch_required_capabilities); } if (missing_cap) { ret = -EINVAL; fprintf(stderr, "kvm does not support %s\n%s", missing_cap->name, upgrade_note); goto err; } s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); s->coalesced_pio = s->coalesced_mmio && kvm_check_extension(s, KVM_CAP_COALESCED_PIO); /* * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no * page is wr-protected initially, which is against how kvm dirty ring is * usage - kvm dirty ring requires all pages are wr-protected at the very * beginning. Enabling this feature for dirty ring causes data corruption. */ if (!s->kvm_dirty_ring_size) { dirty_log_manual_caps = kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET); s->manual_dirty_log_protect = dirty_log_manual_caps; if (dirty_log_manual_caps) { ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, dirty_log_manual_caps); if (ret) { warn_report("Trying to enable capability %"PRIu64" of " "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. " "Falling back to the legacy mode. ", dirty_log_manual_caps); s->manual_dirty_log_protect = 0; } } } #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif s->robust_singlestep = kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); #ifdef KVM_CAP_DEBUGREGS s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); #endif s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); #ifdef KVM_CAP_IRQ_ROUTING kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); #endif s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); s->irq_set_ioctl = KVM_IRQ_LINE; if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; } kvm_readonly_mem_allowed = (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); kvm_eventfds_allowed = (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0); kvm_irqfds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD) > 0); kvm_resamplefds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); kvm_vm_attributes_allowed = (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); kvm_ioeventfd_any_length_allowed = (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0); kvm_state = s; ret = kvm_arch_init(ms, s); if (ret < 0) { goto err; } if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; } qemu_register_reset(kvm_unpoison_all, NULL); if (s->kernel_irqchip_allowed) { kvm_irqchip_create(s); } if (kvm_eventfds_allowed) { s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; } s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; kvm_memory_listener_register(s, &s->memory_listener, &address_space_memory, 0); if (kvm_eventfds_allowed) { memory_listener_register(&kvm_io_listener, &address_space_io); } memory_listener_register(&kvm_coalesced_pio_listener, &address_space_io); s->many_ioeventfds = kvm_check_many_ioeventfds(); s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); if (!s->sync_mmu) { ret = ram_block_discard_disable(true); assert(!ret); } return 0; err: assert(ret < 0); if (s->vmfd >= 0) { close(s->vmfd); } if (s->fd != -1) { close(s->fd); } g_free(s->memory_listener.slots); return ret; }
7704bb02dd73070b218fb091cdda79679dab2b8f
https://github.com/qemu/qemu
1not_vulnerable
ps2: don't raise an interrupt if queue is full ps2_queue() behaves differently than the very similar functions ps2_queue_2() to ps2_queue_4(). The first one calls update_irq() even if the queue is full, the others don't. Change ps2_queue() to be consistent with the others. Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Volker Rümelin <vr_qemu@t-online.de> Message-Id: <20210525181441.27768-2-vr_qemu@t-online.de> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
void ps2_raise_irq(PS2State *s) { s->update_irq(s->update_arg, 1); }
76968101f549fb6bb51b4bdea65e8a48307c765d
https://github.com/qemu/qemu
1not_vulnerable
ps2: fix mouse stream corruption Commit 7abe7eb294 "ps2: Fix mouse stream corruption due to lost data" added code to avoid mouse stream corruptions but the calculation of the needed free queue size was wrong. Fix this. To reproduce, open a text file with the vim 7.3 32 bit for DOS exe- cutable in a FreeDOS client started with -display sdl and move the mouse around for a few seconds. You will quickly see erratic mouse movements and unexpected mouse clicks. CuteMouse (ctmouse.exe) in FreeDOS doesn't try to re-sync the mouse stream. Fixes: 7abe7eb294 ("ps2: Fix mouse stream corruption due to lost data") Signed-off-by: Volker Rümelin <vr_qemu@t-online.de> Message-Id: <20210525181441.27768-1-vr_qemu@t-online.de> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
void ps2_keyboard_set_translation(void *opaque, int mode) { PS2KbdState *s = (PS2KbdState *)opaque; trace_ps2_keyboard_set_translation(opaque, mode); s->translate = mode; }
64ea60869be0fc80e32055912fe3c1a55290231c
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Fix decode for VDOT (indexed) We were extracting the M register twice, once incorrectly as M:vm and once correctly as rm. Remove the incorrect name and remove the incorrect decode. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210525010358.152808-87-richard.henderson@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a) { gen_helper_gvec_4 *fn_gvec; int opr_sz; if (!dc_isar_feature(aa32_dp, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vn) & 0x10)) { return false; } if ((a->vd | a->vn) & a->q) { return false; } if (!vfp_access_check(s)) { return true; } fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b; opr_sz = (1 + a->q) * 8; tcg_gen_gvec_4_ool(vfp_reg_offset(1, a->vd), vfp_reg_offset(1, a->vn), vfp_reg_offset(1, a->vm), vfp_reg_offset(1, a->vd), opr_sz, opr_sz, a->index, fn_gvec); return true; }
382c7160d1cd9e815fb94d3889a5ddcf0e1845ab
https://github.com/qemu/qemu
1not_vulnerable
hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic In icc_eoir_write() we assume that we can identify the group of the IRQ being completed based purely on which register is being written to and the current CPU state, and that "CPU state matches group indicated by register" is the only necessary access check. This isn't correct: if the CPU is not in Secure state then EOIR1 will only complete Group 1 NS IRQs, but if the CPU is in EL3 it can complete both Group 1 S and Group 1 NS IRQs. (The pseudocode ICC_EOIR1_EL1 makes this clear.) We were also missing the logic to prevent EOIR0 writes completing G0 IRQs when they should not. Rearrange the logic to first identify the group of the current highest priority interrupt and then look at whether we should complete it or ignore the access based on which register was accessed and the state of the CPU. The resulting behavioural change is: * EL3 can now complete G1NS interrupts * G0 interrupt completion is now ignored if the GIC and the CPU have the security extension enabled and the CPU is not secure Reported-by: Chan Kim <ckim@etri.re.kr> Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210510150016.24910-1-peter.maydell@linaro.org
static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* End of Interrupt */ GICv3CPUState *cs = icc_cs_from_env(env); int irq = value & 0xffffff; int grp; bool is_eoir0 = ri->crm == 8; if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) { icv_eoir_write(env, ri, value); return; } trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1, gicv3_redist_affid(cs), value); if (irq >= cs->gic->num_irq) { /* This handles two cases: * 1. If software writes the ID of a spurious interrupt [ie 1020-1023] * to the GICC_EOIR, the GIC ignores that write. * 2. If software writes the number of a non-existent interrupt * this must be a subcase of "value written does not match the last * valid interrupt value read from the Interrupt Acknowledge * register" and so this is UNPREDICTABLE. We choose to ignore it. */ return; } grp = icc_highest_active_group(cs); switch (grp) { case GICV3_G0: if (!is_eoir0) { return; } if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) { return; } break; case GICV3_G1: if (is_eoir0) { return; } if (!arm_is_secure(env)) { return; } break; case GICV3_G1NS: if (is_eoir0) { return; } if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { return; } break; default: g_assert_not_reached(); } icc_drop_prio(cs, grp); if (!icc_eoi_split(env, cs)) { /* Priority drop and deactivate not split: deactivate irq now */ icc_deactivate_irq(cs, irq); } }
219729cfbf9e979020bffedac6a790144173ec62
https://github.com/qemu/qemu
1not_vulnerable
hw/arm/smmuv3: Another range invalidation fix 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range") failed to completely fix misalignment issues with range invalidation. For instance invalidations patterns like "invalidate 32 4kB pages starting from 0xff395000 are not correctly handled" due to the fact the previous fix only made sure the number of invalidated pages were a power of 2 but did not properly handle the start address was not aligned with the range. This can be noticed when boothing a fedora 33 with protected virtio-blk-pci. Signed-off-by: Eric Auger <eric.auger@redhat.com> Fixes: 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range") Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, uint8_t tg, uint64_t num_pages) { SMMUDevice *sdev; QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { IOMMUMemoryRegion *mr = &sdev->iommu; IOMMUNotifier *n; trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova, tg, num_pages); IOMMU_NOTIFIER_FOREACH(n, mr) { smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages); } } }
fb74a286feaa4ec2cdcda61ba570244464581ca7
https://github.com/qemu/qemu
1not_vulnerable
coroutine-sleep: disallow NULL QemuCoSleepState** argument Simplify the code by removing conditionals. qemu_co_sleep_ns can simply point the argument to an on-stack temporary. Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-id: 20210517100548.28806-3-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
void qemu_co_sleep_wake(QemuCoSleepState *sleep_state) { /* Write of schedule protected by barrier write in aio_co_schedule */ const char *scheduled = qatomic_cmpxchg(&sleep_state->co->scheduled, qemu_co_sleep_ns__scheduled, NULL); assert(scheduled == qemu_co_sleep_ns__scheduled); *sleep_state->user_state_pointer = NULL; timer_del(&sleep_state->ts); aio_co_wake(sleep_state->co); }
e932e9f327650d4a3f745539e8c4bf4f72a08015
https://github.com/qemu/qemu
1not_vulnerable
ui/spice-display: check NULL pointer in interface_release_resource() Check rext.info to avoid potential NULL pointer dereference. A similar check exists in interface_release_resource() in hw/display/qxl.c. Reported-by: Yu Lu <ini.universe@gmail.com> Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com> Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org> Message-Id: <20210520105833.183160-1-mcascell@redhat.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void interface_release_resource(QXLInstance *sin, QXLReleaseInfoExt rext) { SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl); SimpleSpiceUpdate *update; SimpleSpiceCursor *cursor; QXLCommandExt *ext; if (!rext.info) { return; } ext = (void *)(intptr_t)(rext.info->id); switch (ext->cmd.type) { case QXL_CMD_DRAW: update = container_of(ext, SimpleSpiceUpdate, ext); qemu_spice_destroy_update(ssd, update); break; case QXL_CMD_CURSOR: cursor = container_of(ext, SimpleSpiceCursor, ext); g_free(cursor); break; default: g_assert_not_reached(); } }
735aa900e4bf57b777ac620bed7c88234ec4b601
https://github.com/qemu/qemu
1not_vulnerable
target/xtensa: fix access ring in l32ex l32ex does memory access as all regular load/store operations at CRING level. Fix apparent pasto from l32e that caused it to use RING instead. This is a correctness issue, not a security issue, because in the worst case the privilege level of memory access may be lowered, resulting in an exception when the correct implementation would've succeeded. In no case it would allow memory access that would've raised an exception in the correct implementation. Cc: qemu-stable@nongnu.org Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_mov_i32(addr, arg[1].in); gen_load_store_alignment(dc, 2, addr, true); gen_check_exclusive(dc, addr, false); tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->cring, MO_TEUL); tcg_gen_mov_i32(cpu_exclusive_addr, addr); tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out); tcg_temp_free(addr); }
9b21049edd3c352efc615e030cd8e931e0c6f910
https://github.com/qemu/qemu
1not_vulnerable
target/i386: Make sure that vsyscall's tb->size != 0 tb_gen_code() assumes that tb->size must never be zero, otherwise it may produce spurious exceptions. For x86_64 this may happen when creating a translation block for the vsyscall page. Fix by pretending that vsyscall translation blocks have at least one instruction. Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210519045738.1335210-2-iii@linux.ibm.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); target_ulong pc_next; #ifdef TARGET_VSYSCALL_PAGE /* * Detect entry into the vsyscall page and invoke the syscall. */ if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) { gen_exception(dc, EXCP_VSYSCALL, dc->base.pc_next); dc->base.pc_next = dc->pc + 1; return; } #endif pc_next = disas_insn(dc, cpu); if (dc->tf || (dc->base.tb->flags & HF_INHIBIT_IRQ_MASK)) { /* if single step mode, we generate only one instruction and generate an exception */ /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear the flag and abort the translation to give the irqs a chance to happen */ dc->base.is_jmp = DISAS_TOO_MANY; } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT) && ((pc_next & TARGET_PAGE_MASK) != ((pc_next + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK) || (pc_next & ~TARGET_PAGE_MASK) == 0)) { /* Do not cross the boundary of the pages in icount mode, it can cause an exception. Do it only when boundary is crossed by the first instruction in the block. If current instruction already crossed the bound - it's ok, because an exception hasn't stopped this code. */ dc->base.is_jmp = DISAS_TOO_MANY; } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) { dc->base.is_jmp = DISAS_TOO_MANY; } dc->base.pc_next = pc_next; }
0b00b0c1e05b34904635cf1b5cfdd945d1a8475e
https://github.com/qemu/qemu
1not_vulnerable
accel/tcg: Assert that tb->size != 0 after translation If arch-specific code generates a translation block of size 0, tb_gen_code() may generate a spurious exception. Add an assertion in order to catch such situations early. Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Reviewed-by: David Hildenbrand <david@redhat.com> Message-Id: <20210416154939.32404-5-iii@linux.ibm.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, int cflags) { CPUArchState *env = cpu->env_ptr; TranslationBlock *tb, *existing_tb; tb_page_addr_t phys_pc, phys_page2; target_ulong virt_page2; tcg_insn_unit *gen_code_buf; int gen_code_size, search_size, max_insns; #ifdef CONFIG_PROFILER TCGProfile *prof = &tcg_ctx->prof; int64_t ti; #endif assert_memory_lock(); qemu_thread_jit_write(); phys_pc = get_page_addr_code(env, pc); if (phys_pc == -1) { /* Generate a one-shot TB with 1 insn in it */ cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1; } max_insns = cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = CF_COUNT_MASK; } if (max_insns > TCG_MAX_INSNS) { max_insns = TCG_MAX_INSNS; } if (cpu->singlestep_enabled || singlestep) { max_insns = 1; } buffer_overflow: tb = tcg_tb_alloc(tcg_ctx); if (unlikely(!tb)) { /* flush must be done */ tb_flush(cpu); mmap_unlock(); /* Make the execution loop process the flush as soon as possible. */ cpu->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cpu); } gen_code_buf = tcg_ctx->code_gen_ptr; tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); tb->pc = pc; tb->cs_base = cs_base; tb->flags = flags; tb->cflags = cflags; tb->trace_vcpu_dstate = *cpu->trace_dstate; tcg_ctx->tb_cflags = cflags; tb_overflow: #ifdef CONFIG_PROFILER /* includes aborted translations because of exceptions */ qatomic_set(&prof->tb_count1, prof->tb_count1 + 1); ti = profile_getclock(); #endif gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0); if (unlikely(gen_code_size != 0)) { goto error_return; } tcg_func_start(tcg_ctx); tcg_ctx->cpu = env_cpu(env); gen_intermediate_code(cpu, tb, max_insns); assert(tb->size != 0); tcg_ctx->cpu = NULL; max_insns = tb->icount; trace_translate_block(tb, tb->pc, tb->tc.ptr); /* generate machine code */ tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; if (TCG_TARGET_HAS_direct_jump) { tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; tcg_ctx->tb_jmp_target_addr = NULL; } else { tcg_ctx->tb_jmp_insn_offset = NULL; tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; } #ifdef CONFIG_PROFILER qatomic_set(&prof->tb_count, prof->tb_count + 1); qatomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti); ti = profile_getclock(); #endif gen_code_size = tcg_gen_code(tcg_ctx, tb); if (unlikely(gen_code_size < 0)) { error_return: switch (gen_code_size) { case -1: /* * Overflow of code_gen_buffer, or the current slice of it. * * TODO: We don't need to re-do gen_intermediate_code, nor * should we re-do the tcg optimization currently hidden * inside tcg_gen_code. All that should be required is to * flush the TBs, allocate a new TB, re-initialize it per * above, and re-do the actual code generation. */ qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, "Restarting code generation for " "code_gen_buffer overflow\n"); goto buffer_overflow; case -2: /* * The code generated for the TranslationBlock is too large. * The maximum size allowed by the unwind info is 64k. * There may be stricter constraints from relocations * in the tcg backend. * * Try again with half as many insns as we attempted this time. * If a single insn overflows, there's a bug somewhere... */ assert(max_insns > 1); max_insns /= 2; qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, "Restarting code generation with " "smaller translation block (max %d insns)\n", max_insns); goto tb_overflow; default: g_assert_not_reached(); } } search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); if (unlikely(search_size < 0)) { goto buffer_overflow; } tb->tc.size = gen_code_size; #ifdef CONFIG_PROFILER qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size); qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); qatomic_set(&prof->search_out_len, prof->search_out_len + search_size); #endif #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && qemu_log_in_addr_range(tb->pc)) { FILE *logfile = qemu_log_lock(); int code_size, data_size; const tcg_target_ulong *rx_data_gen_ptr; size_t chunk_start; int insn = 0; if (tcg_ctx->data_gen_ptr) { rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr); code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr; data_size = gen_code_size - code_size; } else { rx_data_gen_ptr = 0; code_size = gen_code_size; data_size = 0; } /* Dump header and the first instruction */ qemu_log("OUT: [size=%d]\n", gen_code_size); qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", tcg_ctx->gen_insn_data[insn][0]); chunk_start = tcg_ctx->gen_insn_end_off[insn]; log_disas(tb->tc.ptr, chunk_start); /* * Dump each instruction chunk, wrapping up empty chunks into * the next instruction. The whole array is offset so the * first entry is the beginning of the 2nd instruction. */ while (insn < tb->icount) { size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; if (chunk_end > chunk_start) { qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n", tcg_ctx->gen_insn_data[insn][0]); log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start); chunk_start = chunk_end; } insn++; } if (chunk_start < code_size) { qemu_log(" -- tb slow paths + alignment\n"); log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start); } /* Finally dump any data we may have after the block */ if (data_size) { int i; qemu_log(" data: [size=%d]\n", data_size); for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) { if (sizeof(tcg_target_ulong) == 8) { qemu_log("0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n", (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); } else if (sizeof(tcg_target_ulong) == 4) { qemu_log("0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n", (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); } else { qemu_build_not_reached(); } } } qemu_log("\n"); qemu_log_flush(); qemu_log_unlock(logfile); } #endif qatomic_set(&tcg_ctx->code_gen_ptr, (void *) ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, CODE_GEN_ALIGN)); /* init jump list */ qemu_spin_init(&tb->jmp_lock); tb->jmp_list_head = (uintptr_t)NULL; tb->jmp_list_next[0] = (uintptr_t)NULL; tb->jmp_list_next[1] = (uintptr_t)NULL; tb->jmp_dest[0] = (uintptr_t)NULL; tb->jmp_dest[1] = (uintptr_t)NULL; /* init original jump addresses which have been set during tcg_gen_code() */ if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { tb_reset_jump(tb, 0); } if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { tb_reset_jump(tb, 1); } /* * If the TB is not associated with a physical RAM page then * it must be a temporary one-insn TB, and we have nothing to do * except fill in the page_addr[] fields. Return early before * attempting to link to other TBs or add to the lookup table. */ if (phys_pc == -1) { tb->page_addr[0] = tb->page_addr[1] = -1; return tb; } /* check next page if needed */ virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; phys_page2 = -1; if ((pc & TARGET_PAGE_MASK) != virt_page2) { phys_page2 = get_page_addr_code(env, virt_page2); } /* * No explicit memory barrier is required -- tb_link_page() makes the * TB visible in a consistent state. */ existing_tb = tb_link_page(tb, phys_pc, phys_page2); /* if the TB already exists, discard what we just translated */ if (unlikely(existing_tb != tb)) { uintptr_t orig_aligned = (uintptr_t)gen_code_buf; orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); tb_destroy(tb); return existing_tb; } tcg_tb_insert(tb); return tb; }
86131c71b13257e095d8c4f4453d52cbc6553c07
https://github.com/qemu/qemu
1not_vulnerable
target/s390x: Fix translation exception on illegal instruction Hitting an uretprobe in a s390x TCG guest causes a SIGSEGV. What happens is: * uretprobe maps a userspace page containing an invalid instruction. * uretprobe replaces the target function's return address with the address of that page. * When tb_gen_code() is called on that page, tb->size ends up being 0 (because the page starts with the invalid instruction), which causes virt_page2 to point to the previous page. * The previous page is not mapped, so this causes a spurious translation exception. tb->size must never be 0: even if there is an illegal instruction, the instruction bytes that have been looked at must count towards tb->size. So adjust s390x's translate_one() to act this way for both illegal instructions and instructions that are known to generate exceptions. Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Reviewed-by: David Hildenbrand <david@redhat.com> Message-Id: <20210416154939.32404-2-iii@linux.ibm.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) { const DisasInsn *insn; DisasJumpType ret = DISAS_NEXT; DisasOps o = {}; bool icount = false; /* Search for the insn in the table. */ insn = extract_insn(env, s); /* Emit insn_start now that we know the ILEN. */ tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen); /* Not found means unimplemented/illegal opcode. */ if (insn == NULL) { qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n", s->fields.op, s->fields.op2); gen_illegal_opcode(s); ret = DISAS_NORETURN; goto out; } #ifndef CONFIG_USER_ONLY if (s->base.tb->flags & FLAG_MASK_PER) { TCGv_i64 addr = tcg_const_i64(s->base.pc_next); gen_helper_per_ifetch(cpu_env, addr); tcg_temp_free_i64(addr); } #endif /* process flags */ if (insn->flags) { /* privileged instruction */ if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) { gen_program_exception(s, PGM_PRIVILEGED); ret = DISAS_NORETURN; goto out; } /* if AFP is not enabled, instructions and registers are forbidden */ if (!(s->base.tb->flags & FLAG_MASK_AFP)) { uint8_t dxc = 0; if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) { dxc = 1; } if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) { dxc = 1; } if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) { dxc = 1; } if (insn->flags & IF_BFP) { dxc = 2; } if (insn->flags & IF_DFP) { dxc = 3; } if (insn->flags & IF_VEC) { dxc = 0xfe; } if (dxc) { gen_data_exception(dxc); ret = DISAS_NORETURN; goto out; } } /* if vector instructions not enabled, executing them is forbidden */ if (insn->flags & IF_VEC) { if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) { gen_data_exception(0xfe); ret = DISAS_NORETURN; goto out; } } /* input/output is the special case for icount mode */ if (unlikely(insn->flags & IF_IO)) { icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT; if (icount) { gen_io_start(); } } } /* Check for insn specification exceptions. */ if (insn->spec) { if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) || (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) || (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) || (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) || (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) { gen_program_exception(s, PGM_SPECIFICATION); ret = DISAS_NORETURN; goto out; } } /* Implement the instruction. */ if (insn->help_in1) { insn->help_in1(s, &o); } if (insn->help_in2) { insn->help_in2(s, &o); } if (insn->help_prep) { insn->help_prep(s, &o); } if (insn->help_op) { ret = insn->help_op(s, &o); } if (ret != DISAS_NORETURN) { if (insn->help_wout) { insn->help_wout(s, &o); } if (insn->help_cout) { insn->help_cout(s, &o); } } /* Free any temporaries created by the helpers. */ if (o.out && !o.g_out) { tcg_temp_free_i64(o.out); } if (o.out2 && !o.g_out2) { tcg_temp_free_i64(o.out2); } if (o.in1 && !o.g_in1) { tcg_temp_free_i64(o.in1); } if (o.in2 && !o.g_in2) { tcg_temp_free_i64(o.in2); } if (o.addr1) { tcg_temp_free_i64(o.addr1); } /* io should be the last instruction in tb when icount is enabled */ if (unlikely(icount && ret == DISAS_NEXT)) { ret = DISAS_PC_STALE; } #ifndef CONFIG_USER_ONLY if (s->base.tb->flags & FLAG_MASK_PER) { /* An exception might be triggered, save PSW if not already done. */ if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) { tcg_gen_movi_i64(psw_addr, s->pc_tmp); } /* Call the helper to check for a possible PER exception. */ gen_helper_per_check_exception(cpu_env); } #endif out: /* Advance to the next instruction. */ s->base.pc_next = s->pc_tmp; return ret; }
04ceb61a4075fadbf374ef89662c41999da83489
https://github.com/qemu/qemu
1not_vulnerable
virtio: Fail if iommu_platform is requested, but unsupported Commit 2943b53f6 (' virtio: force VIRTIO_F_IOMMU_PLATFORM') made sure that vhost can't just reject VIRTIO_F_IOMMU_PLATFORM when it was requested. However, just adding it back to the negotiated flags isn't right either because it promises support to the guest that the device actually doesn't support. One example of a vhost-user device that doesn't have support for the flag is the vhost-user-blk export of QEMU. Instead of successfully creating a device that doesn't work, just fail to plug the device when it doesn't support the feature, but it was requested. This results in much clearer error messages. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1935019 Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Message-Id: <20210429171316.162022-6-kwolf@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) { DeviceState *qdev = DEVICE(vdev); BusState *qbus = BUS(qdev_get_parent_bus(qdev)); VirtioBusState *bus = VIRTIO_BUS(qbus); VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus); VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); bool has_iommu = virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); Error *local_err = NULL; DPRINTF("%s: plug device.\n", qbus->name); if (klass->pre_plugged != NULL) { klass->pre_plugged(qbus->parent, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } /* Get the features of the plugged device. */ assert(vdc->get_features != NULL); vdev->host_features = vdc->get_features(vdev, vdev->host_features, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (has_iommu && !virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { error_setg(errp, "iommu_platform=true is not supported by the device"); return; } if (klass->device_plugged != NULL) { klass->device_plugged(qbus->parent, &local_err); } if (local_err) { error_propagate(errp, local_err); return; } if (klass->get_dma_as != NULL && has_iommu) { virtio_add_feature(&vdev->host_features, VIRTIO_F_IOMMU_PLATFORM); vdev->dma_as = klass->get_dma_as(qbus->parent); } else { vdev->dma_as = &address_space_memory; } }
f26729715ef21325f972f693607580a829ad1cbb
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-blk: Make sure to set Error on realize failure We have to set errp before jumping to virtio_err, otherwise the caller (virtio_device_realize()) will take this as success and crash when it later tries to access things that we've already freed in the error path. Fixes: 77542d431491788d1e8e79d93ce10172ef207775 Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20210429171316.162022-2-kwolf@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Acked-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserBlk *s = VHOST_USER_BLK(vdev); int i, ret; if (!s->chardev.chr) { error_setg(errp, "vhost-user-blk: chardev is mandatory"); return; } if (s->num_queues == VHOST_USER_BLK_AUTO_NUM_QUEUES) { s->num_queues = 1; } if (!s->num_queues || s->num_queues > VIRTIO_QUEUE_MAX) { error_setg(errp, "vhost-user-blk: invalid number of IO queues"); return; } if (!s->queue_size) { error_setg(errp, "vhost-user-blk: queue size must be non-zero"); return; } if (s->queue_size > VIRTQUEUE_MAX_SIZE) { error_setg(errp, "vhost-user-blk: queue size must not exceed %d", VIRTQUEUE_MAX_SIZE); return; } if (!vhost_user_init(&s->vhost_user, &s->chardev, errp)) { return; } virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, sizeof(struct virtio_blk_config)); s->virtqs = g_new(VirtQueue *, s->num_queues); for (i = 0; i < s->num_queues; i++) { s->virtqs[i] = virtio_add_queue(vdev, s->queue_size, vhost_user_blk_handle_output); } s->inflight = g_new0(struct vhost_inflight, 1); s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues); s->connected = false; qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event_realize, NULL, (void *)dev, NULL, true); reconnect: if (qemu_chr_fe_wait_connected(&s->chardev, errp) < 0) { goto virtio_err; } /* check whether vhost_user_blk_connect() failed or not */ if (!s->connected) { goto reconnect; } ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg, sizeof(struct virtio_blk_config)); if (ret < 0) { error_report("vhost-user-blk: get block config failed"); goto reconnect; } /* we're fully initialized, now we can operate, so change the handler */ qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event_oper, NULL, (void *)dev, NULL, true); return; virtio_err: g_free(s->vhost_vqs); s->vhost_vqs = NULL; g_free(s->inflight); s->inflight = NULL; for (i = 0; i < s->num_queues; i++) { virtio_delete_queue(s->virtqs[i]); } g_free(s->virtqs); virtio_cleanup(vdev); vhost_user_cleanup(&s->vhost_user); }
e3fc91aaaacbdc2a88bbb4109c8a1cded628e36f
https://github.com/qemu/qemu
1not_vulnerable
block: Fix Transaction leak in bdrv_reopen_multiple() Like other error paths, this one needs to call tran_finalize() and clean up the BlockReopenQueue, too. Fixes: CID 1452772 Fixes: 72373e40fbc7e4218061a8211384db362d3e7348 Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20210503110555.24001-3-kwolf@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) { int ret = -1; BlockReopenQueueEntry *bs_entry, *next; Transaction *tran = tran_new(); g_autoptr(GHashTable) found = NULL; g_autoptr(GSList) refresh_list = NULL; assert(bs_queue != NULL); QTAILQ_FOREACH(bs_entry, bs_queue, entry) { ret = bdrv_flush(bs_entry->state.bs); if (ret < 0) { error_setg_errno(errp, -ret, "Error flushing drive"); goto abort; } } QTAILQ_FOREACH(bs_entry, bs_queue, entry) { assert(bs_entry->state.bs->quiesce_counter > 0); ret = bdrv_reopen_prepare(&bs_entry->state, bs_queue, tran, errp); if (ret < 0) { goto abort; } bs_entry->prepared = true; } found = g_hash_table_new(NULL, NULL); QTAILQ_FOREACH(bs_entry, bs_queue, entry) { BDRVReopenState *state = &bs_entry->state; refresh_list = bdrv_topological_dfs(refresh_list, found, state->bs); if (state->old_backing_bs) { refresh_list = bdrv_topological_dfs(refresh_list, found, state->old_backing_bs); } } /* * Note that file-posix driver rely on permission update done during reopen * (even if no permission changed), because it wants "new" permissions for * reconfiguring the fd and that's why it does it in raw_check_perm(), not * in raw_reopen_prepare() which is called with "old" permissions. */ ret = bdrv_list_refresh_perms(refresh_list, bs_queue, tran, errp); if (ret < 0) { goto abort; } /* * If we reach this point, we have success and just need to apply the * changes. * * Reverse order is used to comfort qcow2 driver: on commit it need to write * IN_USE flag to the image, to mark bitmaps in the image as invalid. But * children are usually goes after parents in reopen-queue, so go from last * to first element. */ QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) { bdrv_reopen_commit(&bs_entry->state); } tran_commit(tran); QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) { BlockDriverState *bs = bs_entry->state.bs; if (bs->drv->bdrv_reopen_commit_post) { bs->drv->bdrv_reopen_commit_post(&bs_entry->state); } } ret = 0; goto cleanup; abort: tran_abort(tran); QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { if (bs_entry->prepared) { bdrv_reopen_abort(&bs_entry->state); } qobject_unref(bs_entry->state.explicit_options); qobject_unref(bs_entry->state.options); } cleanup: QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { g_free(bs_entry); } g_free(bs_queue); return ret; }
e878bb1293d2cd0082550b320c3ccf245d0a69d4
https://github.com/qemu/qemu
1not_vulnerable
block: Fix Transaction leak in bdrv_root_attach_child() The error path needs to call tran_finalize(), too. Fixes: CID 1452773 Fixes: 548a74c0dbc858edd1a7ee3045b5f2fe710bd8b1 Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20210503110555.24001-2-kwolf@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, const char *child_name, const BdrvChildClass *child_class, BdrvChildRole child_role, uint64_t perm, uint64_t shared_perm, void *opaque, Error **errp) { int ret; BdrvChild *child = NULL; Transaction *tran = tran_new(); ret = bdrv_attach_child_common(child_bs, child_name, child_class, child_role, perm, shared_perm, opaque, &child, tran, errp); if (ret < 0) { assert(child == NULL); goto out; } ret = bdrv_refresh_perms(child_bs, errp); out: tran_finalize(tran, ret); bdrv_unref(child_bs); return child; }
8f5141a9e10c8621c902eeb969bd188d995ecc18
https://github.com/qemu/qemu
1not_vulnerable
linux-user/alpha: Fix rt sigframe return We incorrectly used the offset of the non-rt sigframe. Reviewed-by: Laurent Vivier <laurent@vivier.eu> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20210422230227.314751-2-richard.henderson@linaro.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUAlphaState *env) { abi_ulong frame_addr, r26; struct target_rt_sigframe *frame; int i, err = 0; frame_addr = get_sigframe(ka, env, sizeof(*frame)); trace_user_setup_rt_frame(env, frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { goto give_sigsegv; } tswap_siginfo(&frame->info, info); __put_user(0, &frame->uc.tuc_flags); __put_user(0, &frame->uc.tuc_link); __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); target_save_altstack(&frame->uc.tuc_stack, env); setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); for (i = 0; i < TARGET_NSIG_WORDS; ++i) { __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); } if (ka->sa_restorer) { r26 = ka->sa_restorer; } else { __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, &frame->retcode[1]); __put_user(INSN_CALLSYS, &frame->retcode[2]); /* imb(); */ r26 = frame_addr + offsetof(struct target_rt_sigframe, retcode); } if (err) { give_sigsegv: force_sigsegv(sig); return; } env->ir[IR_RA] = r26; env->ir[IR_PV] = env->pc = ka->_sa_handler; env->ir[IR_A0] = sig; env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); env->ir[IR_SP] = frame_addr; }
b4a983239343efd0a2d8a6cdf0690d0d707ec4ea
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: consider metadata read aio return value in compare Currently in compare command metadata aio read blk_aio_preadv return value ignored. Consider it and complete the block accounting. Signed-off-by: Gollu Appalanaidu <anaidu.gollu@samsung.com> Fixes: 0a384f923f51 ("hw/block/nvme: add compare command") Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static void nvme_compare_mdata_cb(void *opaque, int ret) { NvmeRequest *req = opaque; NvmeNamespace *ns = req->ns; NvmeCtrl *n = nvme_ctrl(req); NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; uint16_t ctrl = le16_to_cpu(rw->control); uint16_t apptag = le16_to_cpu(rw->apptag); uint16_t appmask = le16_to_cpu(rw->appmask); uint32_t reftag = le32_to_cpu(rw->reftag); struct nvme_compare_ctx *ctx = req->opaque; g_autofree uint8_t *buf = NULL; BlockBackend *blk = ns->blkconf.blk; BlockAcctCookie *acct = &req->acct; BlockAcctStats *stats = blk_get_stats(blk); uint16_t status = NVME_SUCCESS; trace_pci_nvme_compare_mdata_cb(nvme_cid(req)); if (ret) { block_acct_failed(stats, acct); nvme_aio_err(req, ret); goto out; } buf = g_malloc(ctx->mdata.iov.size); status = nvme_bounce_mdata(n, buf, ctx->mdata.iov.size, NVME_TX_DIRECTION_TO_DEVICE, req); if (status) { req->status = status; goto out; } if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { uint64_t slba = le64_to_cpu(rw->slba); uint8_t *bufp; uint8_t *mbufp = ctx->mdata.bounce; uint8_t *end = mbufp + ctx->mdata.iov.size; size_t msize = nvme_msize(ns); int16_t pil = 0; status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, ctx->mdata.bounce, ctx->mdata.iov.size, ctrl, slba, apptag, appmask, reftag); if (status) { req->status = status; goto out; } /* * When formatted with protection information, do not compare the DIF * tuple. */ if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { pil = nvme_msize(ns) - sizeof(NvmeDifTuple); } for (bufp = buf; mbufp < end; bufp += msize, mbufp += msize) { if (memcmp(bufp + pil, mbufp + pil, msize - pil)) { req->status = NVME_CMP_FAILURE; goto out; } } goto out; } if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { req->status = NVME_CMP_FAILURE; goto out; } block_acct_done(stats, acct); out: qemu_iovec_destroy(&ctx->data.iov); g_free(ctx->data.bounce); qemu_iovec_destroy(&ctx->mdata.iov); g_free(ctx->mdata.bounce); g_free(ctx); nvme_enqueue_req_completion(nvme_cq(req), req); }
57547c6023344f4b4562d7cadb1799a31c8a4549
https://github.com/qemu/qemu
1not_vulnerable
softfloat: fix return_nan vs default_nan_mode Do not call parts_silence_nan when default_nan_mode is in effect. This will avoid an assert in a later patch. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
static float64 float64_round_pack_canonical(FloatParts p, float_status *s) { return float64_pack_raw(round_canonical(p, s, &float64_params)); }
ac1a92ec8f1328141707965bb1df4252fdb76b68
https://github.com/qemu/qemu
1not_vulnerable
linux-user/s390x: Fix frame_addr corruption in setup_frame The original value of frame_addr is still required for its use in the call to unlock_user_struct below. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: David Hildenbrand <david@redhat.com> Message-Id: <20210428193408.233706-13-richard.henderson@linaro.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUS390XState *env) { sigframe *frame; abi_ulong frame_addr; frame_addr = get_sigframe(ka, env, sizeof(*frame)); trace_user_setup_frame(env, frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { force_sigsegv(sig); return; } __put_user(set->sig[0], &frame->sc.oldmask[0]); save_sigregs(env, &frame->sregs); __put_user(frame_addr + offsetof(sigframe, sregs), &frame->sc.sregs); /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { env->regs[14] = ka->sa_restorer; } else { env->regs[14] = frame_addr + offsetof(sigframe, retcode); __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, &frame->retcode); } /* Set up backchain. */ __put_user(env->regs[15], (abi_ulong *) frame); /* Set up registers for signal handler */ env->regs[15] = frame_addr; /* Force default amode and default user address space control. */ env->psw.mask = PSW_MASK_64 | PSW_MASK_32 | PSW_ASC_PRIMARY | (env->psw.mask & ~PSW_MASK_ASC); env->psw.addr = ka->_sa_handler; env->regs[2] = sig; env->regs[3] = frame_addr + offsetof(typeof(*frame), sc); /* * We forgot to include these in the sigcontext. * To avoid breaking binary compatibility, they are passed as args. */ env->regs[4] = 0; /* FIXME: regs->int_code & 127 */ env->regs[5] = 0; /* FIXME: regs->int_parm_long */ env->regs[6] = 0; /* FIXME: current->thread.last_break */ /* Place signal number on stack to allow backtrace from handler. */ __put_user(env->regs[2], &frame->signo); unlock_user_struct(frame, frame_addr, 1); }
4e4a08200b6ed60055c45a0f05f4515365785a92
https://github.com/qemu/qemu
1not_vulnerable
linux-user/s390x: Fix sigcontext sregs value Using the host address of &frame->sregs is incorrect. We need the guest address. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: David Hildenbrand <david@redhat.com> Message-Id: <20210428193408.233706-7-richard.henderson@linaro.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUS390XState *env) { sigframe *frame; abi_ulong frame_addr; frame_addr = get_sigframe(ka, env, sizeof(*frame)); trace_user_setup_frame(env, frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { goto give_sigsegv; } __put_user(set->sig[0], &frame->sc.oldmask[0]); save_sigregs(env, &frame->sregs); __put_user(frame_addr + offsetof(sigframe, sregs), &frame->sc.sregs); /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { env->regs[14] = ka->sa_restorer; } else { env->regs[14] = frame_addr + offsetof(sigframe, retcode); __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, &frame->retcode); } /* Set up backchain. */ __put_user(env->regs[15], (abi_ulong *) frame); /* Set up registers for signal handler */ env->regs[15] = frame_addr; env->psw.addr = ka->_sa_handler; env->regs[2] = sig; //map_signal(sig); env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); /* We forgot to include these in the sigcontext. To avoid breaking binary compatibility, they are passed as args. */ env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; /* Place signal number on stack to allow backtrace from handler. */ __put_user(env->regs[2], &frame->signo); unlock_user_struct(frame, frame_addr, 1); return; give_sigsegv: force_sigsegv(sig); }
5d79bd111ff4f9ed0b19c20f6708a770651a9048
https://github.com/qemu/qemu
1not_vulnerable
linux-user/s390x: Fix sigframe types Noticed via gitlab clang-user job: TEST signals on s390x ../linux-user/s390x/signal.c:258:9: runtime error: \ 1.84467e+19 is outside the range of representable values of \ type 'unsigned long' Which points to the fact that we were performing a double-to-uint64_t conversion while storing the fp registers, instead of just copying the data across. Turns out there are several errors: target_ulong is the size of the target register, whereas abi_ulong is the target 'unsigned long' type. Not a big deal here, since we only support 64-bit s390x, but not correct either. In target_sigcontext and target ucontext, we used a host pointer instead of a target pointer, aka abi_ulong. Fixing this allows the removal of a cast to __put_user. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: David Hildenbrand <david@redhat.com> Message-Id: <20210428193408.233706-2-richard.henderson@linaro.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUS390XState *env) { sigframe *frame; abi_ulong frame_addr; frame_addr = get_sigframe(ka, env, sizeof(*frame)); trace_user_setup_frame(env, frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { goto give_sigsegv; } __put_user(set->sig[0], &frame->sc.oldmask[0]); save_sigregs(env, &frame->sregs); __put_user((abi_ulong)(unsigned long)&frame->sregs, &frame->sc.sregs); /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; } else { env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) | PSW_ADDR_AMODE; __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, (uint16_t *)(frame->retcode)); } /* Set up backchain. */ __put_user(env->regs[15], (abi_ulong *) frame); /* Set up registers for signal handler */ env->regs[15] = frame_addr; env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; env->regs[2] = sig; //map_signal(sig); env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); /* We forgot to include these in the sigcontext. To avoid breaking binary compatibility, they are passed as args. */ env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; /* Place signal number on stack to allow backtrace from handler. */ __put_user(env->regs[2], &frame->signo); unlock_user_struct(frame, frame_addr, 1); return; give_sigsegv: force_sigsegv(sig); }
f8ea624e7456b10bee8e82b788885a438af8084d
https://github.com/qemu/qemu
1not_vulnerable
linux-user/sparc: Fix the stackframe structure Move target_reg_window up and use it. Fold structptr and xxargs into xargs -- the use of a host pointer was incorrect anyway. Rename the structure to target_stackf for consistency. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210426025334.1168495-15-richard.henderson@linaro.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUSPARCState *env) { abi_ulong sf_addr; struct target_signal_frame *sf; int sigframe_size, err, i; /* 1. Make sure everything is clean */ //synchronize_user_stack(); sigframe_size = NF_ALIGNEDSZ; sf_addr = get_sigframe(ka, env, sigframe_size); trace_user_setup_frame(env, sf_addr); sf = lock_user(VERIFY_WRITE, sf_addr, sizeof(struct target_signal_frame), 0); if (!sf) { goto sigsegv; } #if 0 if (invalid_frame_pointer(sf, sigframe_size)) goto sigill_and_return; #endif /* 2. Save the current process state */ err = setup___siginfo(&sf->info, env, set->sig[0]); __put_user(0, &sf->extra_size); //save_fpu_state(regs, &sf->fpu_state); //__put_user(&sf->fpu_state, &sf->fpu_save); __put_user(set->sig[0], &sf->info.si_mask); for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { __put_user(set->sig[i + 1], &sf->extramask[i]); } for (i = 0; i < 8; i++) { __put_user(env->regwptr[i + WREG_L0], &sf->ss.win.locals[i]); } for (i = 0; i < 8; i++) { __put_user(env->regwptr[i + WREG_I0], &sf->ss.win.ins[i]); } if (err) goto sigsegv; /* 3. signal handler back-trampoline and parameters */ env->regwptr[WREG_SP] = sf_addr; env->regwptr[WREG_O0] = sig; env->regwptr[WREG_O1] = sf_addr + offsetof(struct target_signal_frame, info); env->regwptr[WREG_O2] = sf_addr + offsetof(struct target_signal_frame, info); /* 4. signal handler */ env->pc = ka->_sa_handler; env->npc = (env->pc + 4); /* 5. return to kernel instructions */ if (ka->ka_restorer) { env->regwptr[WREG_O7] = ka->ka_restorer; } else { uint32_t val32; env->regwptr[WREG_O7] = sf_addr + offsetof(struct target_signal_frame, insns) - 2 * 4; /* mov __NR_sigreturn, %g1 */ val32 = 0x821020d8; __put_user(val32, &sf->insns[0]); /* t 0x10 */ val32 = 0x91d02010; __put_user(val32, &sf->insns[1]); } unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); return; #if 0 sigill_and_return: force_sig(TARGET_SIGILL); #endif sigsegv: unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); force_sigsegv(sig); }
0a50285ee8bf471936325f5ccd870752d2a038cb
https://github.com/qemu/qemu
1not_vulnerable
linux-user/arm: Do not fill in si_code for fpa11 exceptions There is no such decoding in linux/arch/arm/nwfpe/fpmodule.c. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Message-Id: <20210423165413.338259-4-richard.henderson@linaro.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode) { TaskState *ts = env_cpu(env)->opaque; int rc = EmulateAll(opcode, &ts->fpa, env); if (rc == 0) { /* Illegal instruction */ return false; } if (rc > 0) { /* Everything ok. */ env->regs[15] += 4; return true; } /* FP exception */ int arm_fpe = 0; /* Translate softfloat flags to FPSR flags */ if (-rc & float_flag_invalid) { arm_fpe |= BIT_IOC; } if (-rc & float_flag_divbyzero) { arm_fpe |= BIT_DZC; } if (-rc & float_flag_overflow) { arm_fpe |= BIT_OFC; } if (-rc & float_flag_underflow) { arm_fpe |= BIT_UFC; } if (-rc & float_flag_inexact) { arm_fpe |= BIT_IXC; } /* Exception enabled? */ FPSR fpsr = ts->fpa.fpsr; if (fpsr & (arm_fpe << 16)) { target_siginfo_t info = { }; /* * The kernel's nwfpe emulator does not pass a real si_code. * It merely uses send_sig(SIGFPE, current, 1). */ info.si_signo = TARGET_SIGFPE; info.si_code = TARGET_SI_KERNEL; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); } else { env->regs[15] += 4; } /* Accumulate unenabled exceptions */ if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC)) { fpsr |= BIT_IXC; } if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC)) { fpsr |= BIT_UFC; } if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC)) { fpsr |= BIT_OFC; } if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC)) { fpsr |= BIT_DZC; } if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC)) { fpsr |= BIT_IOC; } ts->fpa.fpsr = fpsr; return true; }
d827f6d5fdb0826e17c80f63547c5c2dee3f0fac
https://github.com/qemu/qemu
1not_vulnerable
linux-user/arm: Do not emulate fpa11 in thumb mode These antiquated instructions are arm-mode only. Buglink: https://bugs.launchpad.net/bugs/1925512 Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Message-Id: <20210423165413.338259-3-richard.henderson@linaro.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
void cpu_loop(CPUARMState *env) { CPUState *cs = env_cpu(env); int trapnr; unsigned int n, insn; target_siginfo_t info; uint32_t addr; abi_ulong ret; for(;;) { cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); process_queued_cpu_work(cs); switch(trapnr) { case EXCP_UDEF: case EXCP_NOCP: case EXCP_INVSTATE: { uint32_t opcode; /* we handle the FPU emulation here, as Linux */ /* we get the opcode */ /* FIXME - what to do if get_user() fails? */ get_user_code_u32(opcode, env->regs[15], env); /* * The Linux kernel treats some UDF patterns specially * to use as breakpoints (instead of the architectural * bkpt insn). These should trigger a SIGTRAP rather * than SIGILL. */ if (insn_is_linux_bkpt(opcode, env->thumb)) { goto excp_debug; } if (!env->thumb && emulate_arm_fpa11(env, opcode)) { break; } info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLOPN; info._sifields._sigfault._addr = env->regs[15]; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); } break; case EXCP_SWI: { env->eabi = 1; /* system call */ if (env->thumb) { /* Thumb is always EABI style with syscall number in r7 */ n = env->regs[7]; } else { /* * Equivalent of kernel CONFIG_OABI_COMPAT: read the * Arm SVC insn to extract the immediate, which is the * syscall number in OABI. */ /* FIXME - what to do if get_user() fails? */ get_user_code_u32(insn, env->regs[15] - 4, env); n = insn & 0xffffff; if (n == 0) { /* zero immediate: EABI, syscall number in r7 */ n = env->regs[7]; } else { /* * This XOR matches the kernel code: an immediate * in the valid range (0x900000 .. 0x9fffff) is * converted into the correct EABI-style syscall * number; invalid immediates end up as values * > 0xfffff and are handled below as out-of-range. */ n ^= ARM_SYSCALL_BASE; env->eabi = 0; } } if (n > ARM_NR_BASE) { switch (n) { case ARM_NR_cacheflush: /* nop */ break; case ARM_NR_set_tls: cpu_set_tls(env, env->regs[0]); env->regs[0] = 0; break; case ARM_NR_breakpoint: env->regs[15] -= env->thumb ? 2 : 4; goto excp_debug; case ARM_NR_get_tls: env->regs[0] = cpu_get_tls(env); break; default: if (n < 0xf0800) { /* * Syscalls 0xf0000..0xf07ff (or 0x9f0000.. * 0x9f07ff in OABI numbering) are defined * to return -ENOSYS rather than raising * SIGILL. Note that we have already * removed the 0x900000 prefix. */ qemu_log_mask(LOG_UNIMP, "qemu: Unsupported ARM syscall: 0x%x\n", n); env->regs[0] = -TARGET_ENOSYS; } else { /* * Otherwise SIGILL. This includes any SWI with * immediate not originally 0x9fxxxx, because * of the earlier XOR. */ info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLTRP; info._sifields._sigfault._addr = env->regs[15]; if (env->thumb) { info._sifields._sigfault._addr -= 2; } else { info._sifields._sigfault._addr -= 4; } queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); } break; } } else { ret = do_syscall(env, n, env->regs[0], env->regs[1], env->regs[2], env->regs[3], env->regs[4], env->regs[5], 0, 0); if (ret == -TARGET_ERESTARTSYS) { env->regs[15] -= env->thumb ? 2 : 4; } else if (ret != -TARGET_QEMU_ESIGRETURN) { env->regs[0] = ret; } } } break; case EXCP_SEMIHOST: env->regs[0] = do_common_semihosting(cs); env->regs[15] += env->thumb ? 2 : 4; break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: addr = env->exception.vaddress; { info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; /* XXX: check env->error_code */ info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = addr; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); } break; case EXCP_DEBUG: case EXCP_BKPT: excp_debug: info.si_signo = TARGET_SIGTRAP; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_KERNEL_TRAP: if (do_kernel_trap(env)) goto error; break; case EXCP_YIELD: /* nothing to do here for user-mode, just resume guest code */ break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; default: error: EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); abort(); } process_pending_signals(env); } }
570fe439e5d1b8626cf344c6bc97d90cfcaf0c79
https://github.com/qemu/qemu
1not_vulnerable
virtio-blk: Fix rollback path in virtio_blk_data_plane_start() When dataplane multiqueue support was added in QEMU 2.7, the path that would rollback guest notifiers assignment in case of error simply got dropped. Later on, when Error was added to blk_set_aio_context() in QEMU 4.1, another error path was introduced, but it ommits to rollback both host and guest notifiers. It seems cleaner to fix the rollback path in one go. The patch is simple enough that it can be adjusted if backported to a pre-4.1 QEMU. Fixes: 51b04ac5c6a6 ("virtio-blk: dataplane multiqueue support") Cc: stefanha@redhat.com Fixes: 97896a4887a0 ("block: Add Error to blk_set_aio_context()") Cc: kwolf@redhat.com Signed-off-by: Greg Kurz <groug@kaod.org> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20210407143501.244343-2-groug@kaod.org> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
int virtio_blk_data_plane_start(VirtIODevice *vdev) { VirtIOBlock *vblk = VIRTIO_BLK(vdev); VirtIOBlockDataPlane *s = vblk->dataplane; BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); AioContext *old_context; unsigned i; unsigned nvqs = s->conf->num_queues; Error *local_err = NULL; int r; if (vblk->dataplane_started || s->starting) { return 0; } s->starting = true; if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { s->batch_notifications = true; } else { s->batch_notifications = false; } /* Set up guest notifier (irq) */ r = k->set_guest_notifiers(qbus->parent, nvqs, true); if (r != 0) { error_report("virtio-blk failed to set guest notifier (%d), " "ensure -accel kvm is set.", r); goto fail_guest_notifiers; } /* Set up virtqueue notify */ for (i = 0; i < nvqs; i++) { r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true); if (r != 0) { fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); while (i--) { virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); } goto fail_host_notifiers; } } s->starting = false; vblk->dataplane_started = true; trace_virtio_blk_data_plane_start(s); old_context = blk_get_aio_context(s->conf->conf.blk); aio_context_acquire(old_context); r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err); aio_context_release(old_context); if (r < 0) { error_report_err(local_err); goto fail_aio_context; } /* Process queued requests before the ones in vring */ virtio_blk_process_queued_requests(vblk, false); /* Kick right away to begin processing requests already in vring */ for (i = 0; i < nvqs; i++) { VirtQueue *vq = virtio_get_queue(s->vdev, i); event_notifier_set(virtio_queue_get_host_notifier(vq)); } /* Get this show started by hooking up our callbacks */ aio_context_acquire(s->ctx); for (i = 0; i < nvqs; i++) { VirtQueue *vq = virtio_get_queue(s->vdev, i); virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, virtio_blk_data_plane_handle_output); } aio_context_release(s->ctx); return 0; fail_aio_context: for (i = 0; i < nvqs; i++) { virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); } fail_host_notifiers: k->set_guest_notifiers(qbus->parent, nvqs, false); fail_guest_notifiers: /* * If we failed to set up the guest notifiers queued requests will be * processed on the main context. */ virtio_blk_process_queued_requests(vblk, false); vblk->dataplane_disabled = true; s->starting = false; vblk->dataplane_started = true; return -ENOSYS; }
20868330a9d13228e59d0818d77f271cf1141280
https://github.com/qemu/qemu
1not_vulnerable
libqtest: refuse QTEST_QEMU_BINARY=qemu-kvm Some downstreams rename the QEMU binary to "qemu-kvm". This breaks qtest_get_arch(), which attempts to parse the target architecture from the QTEST_QEMU_BINARY environment variable. Print an error instead of returning the architecture "kvm". Things fail in weird ways when the architecture string is bogus. Arguably qtests should always be run in a build directory instead of against an installed QEMU. In any case, printing a clear error when this happens is helpful. Since this is an error that is triggered by the user and not a test failure, use exit(1) instead of abort(). Change the existing abort() call in qtest_get_arch() to exit(1) too for the same reason and to be consistent. Reported-by: Qin Wang <qinwang@rehdat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> Reviewed-by: Thomas Huth <thuth@redhat.com> Cc: Emanuele Giuseppe Esposito <eesposit@redhat.com> Message-Id: <20210412143050.725918-1-stefanha@redhat.com> Signed-off-by: Thomas Huth <thuth@redhat.com>
const char *qtest_get_arch(void) { const char *qemu = qtest_qemu_binary(); const char *end = strrchr(qemu, '-'); if (!end) { fprintf(stderr, "Can't determine architecture from binary name.\n"); exit(1); } if (!strstr(qemu, "-system-")) { fprintf(stderr, "QTEST_QEMU_BINARY must end with *-system-<arch> " "where 'arch' is the target\narchitecture (x86_64, aarch64, " "etc).\n"); exit(1); } return end + 1; }
cc61c703b6a8b9dfdfb92d5f3fa1961f6d232926
https://github.com/qemu/qemu
1not_vulnerable
migration/ram: Discard RAM when growing RAM blocks after ram_postcopy_incoming_init() In case we grow our RAM after ram_postcopy_incoming_init() (e.g., when synchronizing the RAM block state with the migration source), the resized part would not get discarded. Let's perform that when being notified about a resize while postcopy has been advised, but is not listening yet. With precopy, the process is as following: 1. VM created - RAM blocks are created 2. Incomming migration started - Postcopy is advised - All pages in RAM blocks are discarded 3. Precopy starts - RAM blocks are resized to match the size on the migration source. - RAM pages from precopy stream are loaded - Uffd handler is registered, postcopy starts listening 4. Guest started, postcopy running - Pagefaults get resolved, pages get placed Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20210429112708.12291-7-david@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, size_t old_size, size_t new_size) { PostcopyState ps = postcopy_state_get(); ram_addr_t offset; RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset); Error *err = NULL; if (ramblock_is_ignored(rb)) { return; } if (!migration_is_idle()) { /* * Precopy code on the source cannot deal with the size of RAM blocks * changing at random points in time - especially after sending the * RAM block sizes in the migration stream, they must no longer change. * Abort and indicate a proper reason. */ error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); migrate_set_error(migrate_get_current(), err); error_free(err); migration_cancel(); } switch (ps) { case POSTCOPY_INCOMING_ADVISE: /* * Update what ram_postcopy_incoming_init()->init_range() does at the * time postcopy was advised. Syncing RAM blocks with the source will * result in RAM resizes. */ if (old_size < new_size) { if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) { error_report("RAM block '%s' discard of resized RAM failed", rb->idstr); } } break; case POSTCOPY_INCOMING_NONE: case POSTCOPY_INCOMING_RUNNING: case POSTCOPY_INCOMING_END: /* * Once our guest is running, postcopy does no longer care about * resizes. When growing, the new memory was not available on the * source, no handler needed. */ break; default: error_report("RAM block '%s' resized during postcopy state: %d", rb->idstr, ps); exit(-1); } }
dcdc460767ed0a650e06ff256fa2a52ff1b57047
https://github.com/qemu/qemu
1not_vulnerable
exec: Relax range check in ram_block_discard_range() We want to make use of ram_block_discard_range() in the RAM block resize callback when growing a RAM block, *before* used_length is changed. Let's relax the check. As RAM blocks always mmap the whole max_length area, we cannot corrupt unrelated data. Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20210429112708.12291-6-david@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) { int ret = -1; uint8_t *host_startaddr = rb->host + start; if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { error_report("ram_block_discard_range: Unaligned start address: %p", host_startaddr); goto err; } if ((start + length) <= rb->max_length) { bool need_madvise, need_fallocate; if (!QEMU_IS_ALIGNED(length, rb->page_size)) { error_report("ram_block_discard_range: Unaligned length: %zx", length); goto err; } errno = ENOTSUP; /* If we are missing MADVISE etc */ /* The logic here is messy; * madvise DONTNEED fails for hugepages * fallocate works on hugepages and shmem */ need_madvise = (rb->page_size == qemu_host_page_size); need_fallocate = rb->fd != -1; if (need_fallocate) { /* For a file, this causes the area of the file to be zero'd * if read, and for hugetlbfs also causes it to be unmapped * so a userfault will trigger. */ #ifdef CONFIG_FALLOCATE_PUNCH_HOLE ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, start, length); if (ret) { ret = -errno; error_report("ram_block_discard_range: Failed to fallocate " "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; error_report("ram_block_discard_range: fallocate not available/file" "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; #endif } if (need_madvise) { /* For normal RAM this causes it to be unmapped, * for shared memory it causes the local mapping to disappear * and to fall back on the file contents (which we just * fallocate'd away). */ #if defined(CONFIG_MADVISE) ret = madvise(host_startaddr, length, MADV_DONTNEED); if (ret) { ret = -errno; error_report("ram_block_discard_range: Failed to discard range " "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; error_report("ram_block_discard_range: MADVISE not available" "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; #endif } trace_ram_block_discard_range(rb->idstr, host_startaddr, length, need_madvise, need_fallocate, ret); } else { error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", rb->idstr, start, length, rb->max_length); } err: return ret; }
29f9c636894c462fa54fad08049e51877905e93b
https://github.com/qemu/qemu
1not_vulnerable
target/avr: Ignore unimplemented WDR opcode Running the WDR opcode triggers a segfault: $ cat > foo.S << EOF > __start: > wdr > EOF $ avr-gcc -nostdlib -nostartfiles -mmcu=avr6 foo.S -o foo.elf $ qemu-system-avr -serial mon:stdio -nographic -no-reboot \ -M mega -bios foo.elf -d in_asm --singlestep IN: 0x00000000: WDR Segmentation fault (core dumped) (gdb) bt #0 0x00005555add0b23a in gdb_get_cpu_pid (cpu=0x5555af5a4af0) at ../gdbstub.c:718 #1 0x00005555add0b2dd in gdb_get_cpu_process (cpu=0x5555af5a4af0) at ../gdbstub.c:743 #2 0x00005555add0e477 in gdb_set_stop_cpu (cpu=0x5555af5a4af0) at ../gdbstub.c:2742 #3 0x00005555adc99b96 in cpu_handle_guest_debug (cpu=0x5555af5a4af0) at ../softmmu/cpus.c:306 #4 0x00005555adcc66ab in rr_cpu_thread_fn (arg=0x5555af5a4af0) at ../accel/tcg/tcg-accel-ops-rr.c:224 #5 0x00005555adefaf12 in qemu_thread_start (args=0x5555af5d9870) at ../util/qemu-thread-posix.c:521 #6 0x00007f692d940ea5 in start_thread () from /lib64/libpthread.so.0 #7 0x00007f692d6699fd in clone () from /lib64/libc.so.6 Since the watchdog peripheral is not implemented, simply log the opcode as unimplemented and keep going. Reported-by: Fred Konrad <konrad@adacore.com> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: KONRAD Frederic <frederic.konrad@adacore.com> Message-Id: <20210502190900.604292-1-f4bug@amsat.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
void helper_break(CPUAVRState *env) { CPUState *cs = env_cpu(env); cs->exception_index = EXCP_DEBUG; cpu_loop_exit(cs); }
52a1c621f9d56d18212273c64b4119513a2db1f1
https://github.com/qemu/qemu
1not_vulnerable
target/sh4: Return error if CPUClass::get_phys_page_debug() fails If the get_physical_address() call fails, the SH4 get_phys_page_debug() handler returns an uninitialized address. Instead return -1, which correspond to "no page found" (see cpu_get_phys_page_debug() doc string). This fixes a warning emitted when building with CFLAGS=-O3 (using GCC 10.2.1 20201125): target/sh4/helper.c: In function ‘superh_cpu_get_phys_page_debug’: target/sh4/helper.c:446:12: warning: ‘physical’ may be used uninitialized in this function [-Wmaybe-uninitialized] 446 | return physical; | ^~~~~~~~ Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Yoshinori Sato <ysato@users.sourceforge.jp> Message-Id: <20210505161046.1397608-1-f4bug@amsat.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { SuperHCPU *cpu = SUPERH_CPU(cs); target_ulong physical; int prot; if (get_physical_address(&cpu->env, &physical, &prot, addr, MMU_DATA_LOAD) == MMU_OK) { return physical; } return -1; }