sha
stringlengths
40
40
remote_url
stringclasses
3 values
labels
class label
2 classes
commit_msg
stringlengths
16
10.3k
function
stringlengths
30
17.6k
8dca037b484fc8caeb6d6689745bc7475ce27174
https://github.com/qemu/qemu
1not_vulnerable
vfio: Do not register any IOMMU_NOTIFIER_DEVIOTLB_UNMAP notifier In an attempt to fix smmu/virtio-iommu - vhost regression, commit 958ec334bca3 ("vhost: Unbreak SMMU and virtio-iommu on dev-iotlb support") broke virtio-iommu integration. This is due to the fact VFIO registers IOMMU_NOTIFIER_ALL notifiers, which includes IOMMU_NOTIFIER_DEVIOTLB_UNMAP and this latter now is rejected by the virtio-iommu. As a consequence, the registration fails. VHOST behaves like a device with an ATC cache. The VFIO device does not support this scheme yet. Let's register only legacy MAP and UNMAP notifiers. Fixes: 958ec334bca3 ("vhost: Unbreak SMMU and virtio-iommu on dev-iotlb support") Signed-off-by: Eric Auger <eric.auger@redhat.com> Message-Id: <20210209213233.40985-2-eric.auger@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Acked-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
static void vfio_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { VFIOContainer *container = container_of(listener, VFIOContainer, listener); hwaddr iova, end; Int128 llend, llsize; void *vaddr; int ret; VFIOHostDMAWindow *hostwin; bool hostwin_found; Error *err = NULL; if (vfio_listener_skipped_section(section)) { trace_vfio_listener_region_add_skip( section->offset_within_address_space, section->offset_within_address_space + int128_get64(int128_sub(section->size, int128_one()))); return; } if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != (section->offset_within_region & ~TARGET_PAGE_MASK))) { error_report("%s received unaligned region", __func__); return; } iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); llend = int128_make64(section->offset_within_address_space); llend = int128_add(llend, section->size); llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); if (int128_ge(int128_make64(iova), llend)) { return; } end = int128_get64(int128_sub(llend, int128_one())); if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { hwaddr pgsize = 0; /* For now intersections are not allowed, we may relax this later */ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { if (ranges_overlap(hostwin->min_iova, hostwin->max_iova - hostwin->min_iova + 1, section->offset_within_address_space, int128_get64(section->size))) { error_setg(&err, "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing" "host DMA window [0x%"PRIx64",0x%"PRIx64"]", section->offset_within_address_space, section->offset_within_address_space + int128_get64(section->size) - 1, hostwin->min_iova, hostwin->max_iova); goto fail; } } ret = vfio_spapr_create_window(container, section, &pgsize); if (ret) { error_setg_errno(&err, -ret, "Failed to create SPAPR window"); goto fail; } vfio_host_win_add(container, section->offset_within_address_space, section->offset_within_address_space + int128_get64(section->size) - 1, pgsize); #ifdef CONFIG_KVM if (kvm_enabled()) { VFIOGroup *group; IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); struct kvm_vfio_spapr_tce param; struct kvm_device_attr attr = { .group = KVM_DEV_VFIO_GROUP, .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE, .addr = (uint64_t)(unsigned long)&param, }; if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD, &param.tablefd)) { QLIST_FOREACH(group, &container->group_list, container_next) { param.groupfd = group->fd; if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { error_report("vfio: failed to setup fd %d " "for a group with fd %d: %s", param.tablefd, param.groupfd, strerror(errno)); return; } trace_vfio_spapr_group_attach(param.groupfd, param.tablefd); } } } #endif } hostwin_found = false; QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { hostwin_found = true; break; } } if (!hostwin_found) { error_setg(&err, "Container %p can't map guest IOVA region" " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end); goto fail; } memory_region_ref(section->mr); if (memory_region_is_iommu(section->mr)) { VFIOGuestIOMMU *giommu; IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); int iommu_idx; trace_vfio_listener_region_add_iommu(iova, end); /* * FIXME: For VFIO iommu types which have KVM acceleration to * avoid bouncing all map/unmaps through qemu this way, this * would be the right place to wire that up (tell the KVM * device emulation the VFIO iommu handles to use). */ giommu = g_malloc0(sizeof(*giommu)); giommu->iommu = iommu_mr; giommu->iommu_offset = section->offset_within_address_space - section->offset_within_region; giommu->container = container; llend = int128_add(int128_make64(section->offset_within_region), section->size); llend = int128_sub(llend, int128_one()); iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, MEMTXATTRS_UNSPECIFIED); iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, IOMMU_NOTIFIER_IOTLB_EVENTS, section->offset_within_region, int128_get64(llend), iommu_idx); ret = memory_region_iommu_set_page_size_mask(giommu->iommu, container->pgsizes, &err); if (ret) { g_free(giommu); goto fail; } ret = memory_region_register_iommu_notifier(section->mr, &giommu->n, &err); if (ret) { g_free(giommu); goto fail; } QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); memory_region_iommu_replay(giommu->iommu, &giommu->n); return; } /* Here we assume that memory_region_is_ram(section->mr)==true */ vaddr = memory_region_get_ram_ptr(section->mr) + section->offset_within_region + (iova - section->offset_within_address_space); trace_vfio_listener_region_add_ram(iova, end, vaddr); llsize = int128_sub(llend, int128_make64(iova)); if (memory_region_is_ram_device(section->mr)) { hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { trace_vfio_listener_region_add_no_dma_map( memory_region_name(section->mr), section->offset_within_address_space, int128_getlo(section->size), pgmask + 1); return; } } ret = vfio_dma_map(container, iova, int128_get64(llsize), vaddr, section->readonly); if (ret) { error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx", %p) = %d (%m)", container, iova, int128_get64(llsize), vaddr, ret); if (memory_region_is_ram_device(section->mr)) { /* Allow unexpected mappings not to be fatal for RAM devices */ error_report_err(err); return; } goto fail; } return; fail: if (memory_region_is_ram_device(section->mr)) { error_report("failed to vfio_dma_map. pci p2p may not work"); return; } /* * On the initfn path, store the first error in the container so we * can gracefully fail. Runtime, there's not much we can do other * than throw a hardware error. */ if (!container->initialized) { if (!container->error) { error_propagate_prepend(&container->error, err, "Region %s: ", memory_region_name(section->mr)); } else { error_free(err); } } else { error_report_err(err); hw_error("vfio: DMA mapping failed, unable to continue"); } }
fca676429ca7f309b5d492c7675d35fec484197c
https://github.com/qemu/qemu
1not_vulnerable
migration/tls: add error handling in multifd_tls_handshake_thread If any error happens during multifd send thread creating (e.g. channel broke because new domain is destroyed by the dst), multifd_tls_handshake_thread may exit silently, leaving main migration thread hanging (ram_save_setup -> multifd_send_sync_main -> qemu_sem_wait(&p->sem_sync)). Fix that by adding error handling in multifd_tls_handshake_thread. Signed-off-by: Hao Wang <wanghao232@huawei.com> Message-Id: <20210209104237.2250941-3-wanghao232@huawei.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Reviewed-by: Chuan Zheng <zhengchuan@huawei.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
static void multifd_tls_outgoing_handshake(QIOTask *task, gpointer opaque) { MultiFDSendParams *p = opaque; QIOChannel *ioc = QIO_CHANNEL(qio_task_get_source(task)); Error *err = NULL; if (qio_task_propagate_error(task, &err)) { trace_multifd_tls_outgoing_handshake_error(ioc, error_get_pretty(err)); } else { trace_multifd_tls_outgoing_handshake_complete(ioc); } if (!multifd_channel_connect(p, ioc, err)) { /* * Error happen, mark multifd_send_thread status as 'quit' although it * is not created, and then tell who pay attention to me. */ p->quit = true; qemu_sem_post(&multifd_send_state->channels_ready); qemu_sem_post(&p->sem_sync); } }
20afcc23b3212784c84fb06062f66d9d2ce6865d
https://github.com/qemu/qemu
1not_vulnerable
virtiofsd: Don't allow empty paths in lookup_name() When passed an empty filename, lookup_name() returns the inode of the parent directory, unless the parent is the root in which case the st_dev doesn't match and lo_find() returns NULL. This is because lookup_name() passes AT_EMPTY_PATH down to fstatat() or statx(). This behavior doesn't quite make sense because users of lookup_name() then pass the name to unlinkat(), renameat() or renameat2(), all of which will always fail on empty names. Drop AT_EMPTY_PATH from the flags in lookup_name() so that it has the consistent behavior of "returning an existing child inode or NULL" for all directories. Signed-off-by: Greg Kurz <groug@kaod.org> Message-Id: <20210312141003.819108-2-groug@kaod.org> Reviewed-by: Connor Kuehl <ckuehl@redhat.com> Reviewed-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
static struct lo_inode *lookup_name(fuse_req_t req, fuse_ino_t parent, const char *name) { int res; uint64_t mnt_id; struct stat attr; struct lo_data *lo = lo_data(req); struct lo_inode *dir = lo_inode(req, parent); if (!dir) { return NULL; } res = do_statx(lo, dir->fd, name, &attr, AT_SYMLINK_NOFOLLOW, &mnt_id); lo_inode_put(lo, &dir); if (res == -1) { return NULL; } return lo_find(lo, &attr, mnt_id); }
5bb8327b655dbce10a91ef809acb0875dd0ee0ed
https://github.com/qemu/qemu
1not_vulnerable
virtiofsd: Release vu_dispatch_lock when stopping queue QEMU can stop a virtqueue by sending a VHOST_USER_GET_VRING_BASE request to virtiofsd. As with all other vhost-user protocol messages, the thread that runs the main event loop in virtiofsd takes the vu_dispatch lock in write mode. This ensures that no other thread can access virtqueues or memory tables at the same time. In the case of VHOST_USER_GET_VRING_BASE, the main thread basically notifies the queue thread that it should terminate and waits for its termination: main() virtio_loop() vu_dispatch_wrlock() vu_dispatch() vu_process_message() vu_get_vring_base_exec() fv_queue_cleanup_thread() pthread_join() Unfortunately, the queue thread ends up calling virtio_send_msg() at some point, which itself needs to grab the lock: fv_queue_thread() g_list_foreach() fv_queue_worker() fuse_session_process_buf_int() do_release() lo_release() fuse_reply_err() send_reply() send_reply_iov() fuse_send_reply_iov_nofree() fuse_send_msg() virtio_send_msg() vu_dispatch_rdlock() <-- Deadlock ! Simply have the main thread to release the lock before going to sleep and take it back afterwards. A very similar patch was already sent by Vivek Goyal sometime back: https://listman.redhat.com/archives/virtio-fs/2021-January/msg00073.html The only difference here is that this done in fv_queue_set_started() because fv_queue_cleanup_thread() can also be called from virtio_loop() without the lock being held. Signed-off-by: Greg Kurz <groug@kaod.org> Reviewed-by: Vivek Goyal <vgoyal@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20210312092212.782255-8-groug@kaod.org> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
static void fv_queue_set_started(VuDev *dev, int qidx, bool started) { struct fv_VuDev *vud = container_of(dev, struct fv_VuDev, dev); struct fv_QueueInfo *ourqi; fuse_log(FUSE_LOG_INFO, "%s: qidx=%d started=%d\n", __func__, qidx, started); assert(qidx >= 0); /* * Ignore additional request queues for now. passthrough_ll.c must be * audited for thread-safety issues first. It was written with a * well-behaved client in mind and may not protect against all types of * races yet. */ if (qidx > 1) { fuse_log(FUSE_LOG_ERR, "%s: multiple request queues not yet implemented, please only " "configure 1 request queue\n", __func__); exit(EXIT_FAILURE); } if (started) { /* Fire up a thread to watch this queue */ if (qidx >= vud->nqueues) { vud->qi = realloc(vud->qi, (qidx + 1) * sizeof(vud->qi[0])); assert(vud->qi); memset(vud->qi + vud->nqueues, 0, sizeof(vud->qi[0]) * (1 + (qidx - vud->nqueues))); vud->nqueues = qidx + 1; } if (!vud->qi[qidx]) { vud->qi[qidx] = calloc(sizeof(struct fv_QueueInfo), 1); assert(vud->qi[qidx]); vud->qi[qidx]->virtio_dev = vud; vud->qi[qidx]->qidx = qidx; } else { /* Shouldn't have been started */ assert(vud->qi[qidx]->kick_fd == -1); } ourqi = vud->qi[qidx]; ourqi->kick_fd = dev->vq[qidx].kick_fd; ourqi->kill_fd = eventfd(0, EFD_CLOEXEC | EFD_SEMAPHORE); assert(ourqi->kill_fd != -1); pthread_mutex_init(&ourqi->vq_lock, NULL); if (pthread_create(&ourqi->thread, NULL, fv_queue_thread, ourqi)) { fuse_log(FUSE_LOG_ERR, "%s: Failed to create thread for queue %d\n", __func__, qidx); assert(0); } } else { /* * Temporarily drop write-lock taken in virtio_loop() so that * the queue thread doesn't block in virtio_send_msg(). */ vu_dispatch_unlock(vud); fv_queue_cleanup_thread(vud, qidx); vu_dispatch_wrlock(vud); } }
14c235eb40eb82e0d7e89601b1a47028fe24deca
https://github.com/qemu/qemu
1not_vulnerable
opengl: Do not convert format with glTexImage2D on OpenGL ES OpenGL ES does not support conversion from the given data format to the internal format with glTexImage2D. Use the given data format as the internal format, and ignore the given alpha channels with GL_TEXTURE_SWIZZLE_A in case the format contains alpha channels. Signed-off-by: Akihiko Odaki <akihiko.odaki@gmail.com> Message-Id: <20210219094803.90860-1-akihiko.odaki@gmail.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
void surface_gl_create_texture(QemuGLShader *gls, DisplaySurface *surface) { assert(gls); assert(QEMU_IS_ALIGNED(surface_stride(surface), surface_bytes_per_pixel(surface))); switch (surface->format) { case PIXMAN_BE_b8g8r8x8: case PIXMAN_BE_b8g8r8a8: surface->glformat = GL_BGRA_EXT; surface->gltype = GL_UNSIGNED_BYTE; break; case PIXMAN_BE_x8r8g8b8: case PIXMAN_BE_a8r8g8b8: surface->glformat = GL_RGBA; surface->gltype = GL_UNSIGNED_BYTE; break; case PIXMAN_r5g6b5: surface->glformat = GL_RGB; surface->gltype = GL_UNSIGNED_SHORT_5_6_5; break; default: g_assert_not_reached(); } glGenTextures(1, &surface->texture); glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, surface->texture); glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, surface_stride(surface) / surface_bytes_per_pixel(surface)); if (epoxy_is_desktop_gl()) { glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, surface_width(surface), surface_height(surface), 0, surface->glformat, surface->gltype, surface_data(surface)); } else { glTexImage2D(GL_TEXTURE_2D, 0, surface->glformat, surface_width(surface), surface_height(surface), 0, surface->glformat, surface->gltype, surface_data(surface)); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_A, GL_ONE); } glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); }
39912c14da07a2dbc73854addcfa0a42596340ac
https://github.com/qemu/qemu
1not_vulnerable
usb/storage: clear csw on reset Stale data in csw (specifically residue) can confuse the state machine and allows the guest trigger an assert(). So clear csw on reset to avoid this happening in case the guest resets the device in the middle of a request. Buglink: https://bugs.launchpad.net/qemu/+bug/1523811 Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20210312094954.796799-1-kraxel@redhat.com>
void usb_msd_handle_reset(USBDevice *dev) { MSDState *s = (MSDState *)dev; trace_usb_msd_reset(); if (s->req) { scsi_req_cancel(s->req); } assert(s->req == NULL); if (s->packet) { s->packet->status = USB_RET_STALL; usb_msd_packet_complete(s); } memset(&s->csw, 0, sizeof(s->csw)); s->mode = USB_MSDM_CBW; }
5793f5aafb05dae30e9dcb57d0d1c8f1a9633f6d
https://github.com/qemu/qemu
1not_vulnerable
s390x/pci: Add missing initialization for g_autofree variables When declaring g_autofree variable without initialization, compiler will raise "may be used uninitialized in this function" warning due to automatic free handling. This is mentioned in docs/devel/style.rst (quote from section "Automatic memory deallocation"): * Variables declared with g_auto* MUST always be initialized, otherwise the cleanup function will use uninitialized stack memory Add initialization for these declarations to prevent the warning and comply with coding style. Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Fixes: cd7498d07fbb ("s390x/pci: Add routine to get the vfio dma available count") Fixes: 1e7552ff5c34 ("s390x/pci: get zPCI function info from host") Reviewed-by: Thomas Huth <thuth@redhat.com> Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> Message-Id: <20210315101352.152888-1-mrezanin@redhat.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
static void s390_pci_read_pfip(S390PCIBusDevice *pbdev, struct vfio_device_info *info) { struct vfio_info_cap_header *hdr; struct vfio_device_info_cap_zpci_pfip *cap; VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_PFIP); /* If capability not provided, just leave the defaults in place */ if (hdr == NULL) { trace_s390_pci_clp_cap(vpci->vbasedev.name, VFIO_DEVICE_INFO_CAP_ZPCI_PFIP); return; } cap = (void *) hdr; if (cap->size > CLP_PFIP_NR_SEGMENTS) { trace_s390_pci_clp_cap_size(vpci->vbasedev.name, cap->size, VFIO_DEVICE_INFO_CAP_ZPCI_PFIP); return; } memcpy(pbdev->zpci_fn.pfip, cap->pfip, CLP_PFIP_NR_SEGMENTS); }
37cee01784ff0df13e5209517e1b3594a5e792d1
https://github.com/qemu/qemu
1not_vulnerable
lan9118: switch to use qemu_receive_packet() for loopback This patch switches to use qemu_receive_packet() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <ppandit@redhat.com> Cc: qemu-stable@nongnu.org Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com Signed-off-by: Alexander Bulekov <alxndr@bu.edu> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void do_tx_packet(lan9118_state *s) { int n; uint32_t status; /* FIXME: Honor TX disable, and allow queueing of packets. */ if (s->phy_control & 0x4000) { /* This assumes the receive routine doesn't touch the VLANClient. */ qemu_receive_packet(qemu_get_queue(s->nic), s->txp->data, s->txp->len); } else { qemu_send_packet(qemu_get_queue(s->nic), s->txp->data, s->txp->len); } s->txp->fifo_used = 0; if (s->tx_status_fifo_used == 512) { /* Status FIFO full */ return; } /* Add entry to status FIFO. */ status = s->txp->cmd_b & 0xffff0000u; DPRINTF("Sent packet tag:%04x len %d\n", status >> 16, s->txp->len); n = (s->tx_status_fifo_head + s->tx_status_fifo_used) & 511; s->tx_status_fifo[n] = status; s->tx_status_fifo_used++; if (s->tx_status_fifo_used == 512) { s->int_sts |= TSFF_INT; /* TODO: Stop transmission. */ } }
e73adfbeec9d4e008630c814759052ed945c3fed
https://github.com/qemu/qemu
1not_vulnerable
cadence_gem: switch to use qemu_receive_packet() for loopback This patch switches to use qemu_receive_packet() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <ppandit@redhat.com> Cc: qemu-stable@nongnu.org Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Alexander Bulekov <alxndr@bu.edu> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void gem_transmit(CadenceGEMState *s) { uint32_t desc[DESC_MAX_NUM_WORDS]; hwaddr packet_desc_addr; uint8_t *p; unsigned total_bytes; int q = 0; /* Do nothing if transmit is not enabled. */ if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { return; } DB_PRINT("\n"); /* The packet we will hand off to QEMU. * Packets scattered across multiple descriptors are gathered to this * one contiguous buffer first. */ p = s->tx_packet; total_bytes = 0; for (q = s->num_priority_queues - 1; q >= 0; q--) { /* read current descriptor */ packet_desc_addr = gem_get_tx_desc_addr(s, q); DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); address_space_read(&s->dma_as, packet_desc_addr, MEMTXATTRS_UNSPECIFIED, desc, sizeof(uint32_t) * gem_get_desc_len(s, false)); /* Handle all descriptors owned by hardware */ while (tx_desc_get_used(desc) == 0) { /* Do nothing if transmit is not enabled. */ if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { return; } print_gem_tx_desc(desc, q); /* The real hardware would eat this (and possibly crash). * For QEMU let's lend a helping hand. */ if ((tx_desc_get_buffer(s, desc) == 0) || (tx_desc_get_length(desc) == 0)) { DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n", packet_desc_addr); break; } if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) - (p - s->tx_packet)) { qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \ HWADDR_PRIx " too large: size 0x%x space 0x%zx\n", packet_desc_addr, tx_desc_get_length(desc), gem_get_max_buf_len(s, true) - (p - s->tx_packet)); gem_set_isr(s, q, GEM_INT_AMBA_ERR); break; } /* Gather this fragment of the packet from "dma memory" to our * contig buffer. */ address_space_read(&s->dma_as, tx_desc_get_buffer(s, desc), MEMTXATTRS_UNSPECIFIED, p, tx_desc_get_length(desc)); p += tx_desc_get_length(desc); total_bytes += tx_desc_get_length(desc); /* Last descriptor for this packet; hand the whole thing off */ if (tx_desc_get_last(desc)) { uint32_t desc_first[DESC_MAX_NUM_WORDS]; hwaddr desc_addr = gem_get_tx_desc_addr(s, q); /* Modify the 1st descriptor of this packet to be owned by * the processor. */ address_space_read(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED, desc_first, sizeof(desc_first)); tx_desc_set_used(desc_first); address_space_write(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED, desc_first, sizeof(desc_first)); /* Advance the hardware current descriptor past this packet */ if (tx_desc_get_wrap(desc)) { s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q); } else { s->tx_desc_addr[q] = packet_desc_addr + 4 * gem_get_desc_len(s, false); } DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]); s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL; gem_set_isr(s, q, GEM_INT_TXCMPL); /* Handle interrupt consequences */ gem_update_int_status(s); /* Is checksum offload enabled? */ if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) { net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL); } /* Update MAC statistics */ gem_transmit_updatestats(s, s->tx_packet, total_bytes); /* Send the packet somewhere */ if (s->phy_loop || (s->regs[GEM_NWCTRL] & GEM_NWCTRL_LOCALLOOP)) { qemu_receive_packet(qemu_get_queue(s->nic), s->tx_packet, total_bytes); } else { qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet, total_bytes); } /* Prepare for next packet */ p = s->tx_packet; total_bytes = 0; } /* read next descriptor */ if (tx_desc_get_wrap(desc)) { if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { packet_desc_addr = s->regs[GEM_TBQPH]; packet_desc_addr <<= 32; } else { packet_desc_addr = 0; } packet_desc_addr |= gem_get_tx_queue_base_addr(s, q); } else { packet_desc_addr += 4 * gem_get_desc_len(s, false); } DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); address_space_read(&s->dma_as, packet_desc_addr, MEMTXATTRS_UNSPECIFIED, desc, sizeof(uint32_t) * gem_get_desc_len(s, false)); } if (tx_desc_get_used(desc)) { s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED; /* IRQ TXUSED is defined only for queue 0 */ if (q == 0) { gem_set_isr(s, 0, GEM_INT_TXUSED); } gem_update_int_status(s); } } }
5311fb805a4403bba024e83886fa0e7572265de4
https://github.com/qemu/qemu
1not_vulnerable
rtl8139: switch to use qemu_receive_packet() for loopback This patch switches to use qemu_receive_packet() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <ppandit@redhat.com> Cc: qemu-stable@nongnu.org Buglink: https://bugs.launchpad.net/qemu/+bug/1910826 Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com Signed-off-by: Alexander Bulekov <alxndr@bu.edu> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void rtl8139_transfer_frame(RTL8139State *s, uint8_t *buf, int size, int do_interrupt, const uint8_t *dot1q_buf) { struct iovec *iov = NULL; struct iovec vlan_iov[3]; if (!size) { DPRINTF("+++ empty ethernet frame\n"); return; } if (dot1q_buf && size >= ETH_ALEN * 2) { iov = (struct iovec[3]) { { .iov_base = buf, .iov_len = ETH_ALEN * 2 }, { .iov_base = (void *) dot1q_buf, .iov_len = VLAN_HLEN }, { .iov_base = buf + ETH_ALEN * 2, .iov_len = size - ETH_ALEN * 2 }, }; memcpy(vlan_iov, iov, sizeof(vlan_iov)); iov = vlan_iov; } if (TxLoopBack == (s->TxConfig & TxLoopBack)) { size_t buf2_size; uint8_t *buf2; if (iov) { buf2_size = iov_size(iov, 3); buf2 = g_malloc(buf2_size); iov_to_buf(iov, 3, 0, buf2, buf2_size); buf = buf2; } DPRINTF("+++ transmit loopback mode\n"); qemu_receive_packet(qemu_get_queue(s->nic), buf, size); if (iov) { g_free(buf2); } } else { if (iov) { qemu_sendv_packet(qemu_get_queue(s->nic), iov, 3); } else { qemu_send_packet(qemu_get_queue(s->nic), buf, size); } } }
8c552542b81e56ff532dd27ec6e5328954bdda73
https://github.com/qemu/qemu
1not_vulnerable
tx_pkt: switch to use qemu_receive_packet_iov() for loopback This patch switches to use qemu_receive_receive_iov() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <ppandit@redhat.com> Cc: qemu-stable@nongnu.org Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
static inline void net_tx_pkt_sendv(struct NetTxPkt *pkt, NetClientState *nc, const struct iovec *iov, int iov_cnt) { if (pkt->is_loopback) { qemu_receive_packet_iov(nc, iov, iov_cnt); } else { qemu_sendv_packet(nc, iov, iov_cnt); } }
8c92060d3c0248bd4d515719a35922cd2391b9b4
https://github.com/qemu/qemu
1not_vulnerable
sungem: switch to use qemu_receive_packet() for loopback This patch switches to use qemu_receive_packet() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <ppandit@redhat.com> Cc: qemu-stable@nongnu.org Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void sungem_send_packet(SunGEMState *s, const uint8_t *buf, int size) { NetClientState *nc = qemu_get_queue(s->nic); if (s->macregs[MAC_XIFCFG >> 2] & MAC_XIFCFG_LBCK) { qemu_receive_packet(nc, buf, size); } else { qemu_send_packet(nc, buf, size); } }
26194a58f4eb83c5bdf4061a1628508084450ba1
https://github.com/qemu/qemu
1not_vulnerable
msf2-mac: switch to use qemu_receive_packet() for loopback This patch switches to use qemu_receive_packet() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <ppandit@redhat.com> Cc: qemu-stable@nongnu.org Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void msf2_dma_tx(MSF2EmacState *s) { NetClientState *nc = qemu_get_queue(s->nic); hwaddr desc = s->regs[R_DMA_TX_DESC]; uint8_t buf[MAX_PKT_SIZE]; EmacDesc d; int size; uint8_t pktcnt; uint32_t status; if (!(s->regs[R_CFG1] & R_CFG1_TX_EN_MASK)) { return; } while (1) { emac_load_desc(s, &d, desc); if (d.pktsize & EMPTY_MASK) { break; } size = d.pktsize & PKT_SIZE; address_space_read(&s->dma_as, d.pktaddr, MEMTXATTRS_UNSPECIFIED, buf, size); /* * This is very basic way to send packets. Ideally there should be * a FIFO and packets should be sent out from FIFO only when * R_CFG1 bit 0 is set. */ if (s->regs[R_CFG1] & R_CFG1_LB_EN_MASK) { qemu_receive_packet(nc, buf, size); } else { qemu_send_packet(nc, buf, size); } d.pktsize |= EMPTY_MASK; emac_store_desc(s, &d, desc); /* update sent packets count */ status = s->regs[R_DMA_TX_STATUS]; pktcnt = FIELD_EX32(status, DMA_TX_STATUS, PKTCNT); pktcnt++; s->regs[R_DMA_TX_STATUS] = FIELD_DP32(status, DMA_TX_STATUS, PKTCNT, pktcnt); s->regs[R_DMA_TX_STATUS] |= R_DMA_TX_STATUS_PKT_SENT_MASK; desc = d.next; } s->regs[R_DMA_TX_STATUS] |= R_DMA_TX_STATUS_UNDERRUN_MASK; s->regs[R_DMA_TX_CTL] &= ~R_DMA_TX_CTL_EN_MASK; }
331d2ac9ea307c990dc86e6493e8f0c48d14bb33
https://github.com/qemu/qemu
1not_vulnerable
dp8393x: switch to use qemu_receive_packet() for loopback packet This patch switches to use qemu_receive_packet() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <ppandit@redhat.com> Cc: qemu-stable@nongnu.org Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com Signed-off-by: Jason Wang <jasowang@redhat.com>
static void dp8393x_do_transmit_packets(dp8393xState *s) { NetClientState *nc = qemu_get_queue(s->nic); int width, size; int tx_len, len; uint16_t i; width = (s->regs[SONIC_DCR] & SONIC_DCR_DW) ? 2 : 1; while (1) { /* Read memory */ size = sizeof(uint16_t) * 6 * width; s->regs[SONIC_TTDA] = s->regs[SONIC_CTDA]; DPRINTF("Transmit packet at %08x\n", dp8393x_ttda(s)); address_space_read(&s->as, dp8393x_ttda(s) + sizeof(uint16_t) * width, MEMTXATTRS_UNSPECIFIED, s->data, size); tx_len = 0; /* Update registers */ s->regs[SONIC_TCR] = dp8393x_get(s, width, 0) & 0xf000; s->regs[SONIC_TPS] = dp8393x_get(s, width, 1); s->regs[SONIC_TFC] = dp8393x_get(s, width, 2); s->regs[SONIC_TSA0] = dp8393x_get(s, width, 3); s->regs[SONIC_TSA1] = dp8393x_get(s, width, 4); s->regs[SONIC_TFS] = dp8393x_get(s, width, 5); /* Handle programmable interrupt */ if (s->regs[SONIC_TCR] & SONIC_TCR_PINT) { s->regs[SONIC_ISR] |= SONIC_ISR_PINT; } else { s->regs[SONIC_ISR] &= ~SONIC_ISR_PINT; } for (i = 0; i < s->regs[SONIC_TFC]; ) { /* Append fragment */ len = s->regs[SONIC_TFS]; if (tx_len + len > sizeof(s->tx_buffer)) { len = sizeof(s->tx_buffer) - tx_len; } address_space_read(&s->as, dp8393x_tsa(s), MEMTXATTRS_UNSPECIFIED, &s->tx_buffer[tx_len], len); tx_len += len; i++; if (i != s->regs[SONIC_TFC]) { /* Read next fragment details */ size = sizeof(uint16_t) * 3 * width; address_space_read(&s->as, dp8393x_ttda(s) + sizeof(uint16_t) * width * (4 + 3 * i), MEMTXATTRS_UNSPECIFIED, s->data, size); s->regs[SONIC_TSA0] = dp8393x_get(s, width, 0); s->regs[SONIC_TSA1] = dp8393x_get(s, width, 1); s->regs[SONIC_TFS] = dp8393x_get(s, width, 2); } } /* Handle Ethernet checksum */ if (!(s->regs[SONIC_TCR] & SONIC_TCR_CRCI)) { /* Don't append FCS there, to look like slirp packets * which don't have one */ } else { /* Remove existing FCS */ tx_len -= 4; if (tx_len < 0) { SONIC_ERROR("tx_len is %d\n", tx_len); break; } } if (s->regs[SONIC_RCR] & (SONIC_RCR_LB1 | SONIC_RCR_LB0)) { /* Loopback */ s->regs[SONIC_TCR] |= SONIC_TCR_CRSL; if (nc->info->can_receive(nc)) { s->loopback_packet = 1; qemu_receive_packet(nc, s->tx_buffer, tx_len); } } else { /* Transmit packet */ qemu_send_packet(nc, s->tx_buffer, tx_len); } s->regs[SONIC_TCR] |= SONIC_TCR_PTX; /* Write status */ dp8393x_put(s, width, 0, s->regs[SONIC_TCR] & 0x0fff); /* status */ size = sizeof(uint16_t) * width; address_space_write(&s->as, dp8393x_ttda(s), MEMTXATTRS_UNSPECIFIED, s->data, size); if (!(s->regs[SONIC_CR] & SONIC_CR_HTX)) { /* Read footer of packet */ size = sizeof(uint16_t) * width; address_space_read(&s->as, dp8393x_ttda(s) + sizeof(uint16_t) * width * (4 + 3 * s->regs[SONIC_TFC]), MEMTXATTRS_UNSPECIFIED, s->data, size); s->regs[SONIC_CTDA] = dp8393x_get(s, width, 0); if (s->regs[SONIC_CTDA] & SONIC_DESC_EOL) { /* EOL detected */ break; } } } /* Done */ s->regs[SONIC_CR] &= ~SONIC_CR_TXP; s->regs[SONIC_ISR] |= SONIC_ISR_TXDN; dp8393x_update_irq(s); }
1caff0340f49c93d535c6558a5138d20d475315c
https://github.com/qemu/qemu
1not_vulnerable
e1000: switch to use qemu_receive_packet() for loopback This patch switches to use qemu_receive_packet() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <ppandit@redhat.com> Cc: qemu-stable@nongnu.org Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
static void e1000_send_packet(E1000State *s, const uint8_t *buf, int size) { static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511, PTC1023, PTC1522 }; NetClientState *nc = qemu_get_queue(s->nic); if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) { qemu_receive_packet(nc, buf, size); } else { qemu_send_packet(nc, buf, size); } inc_tx_bcast_or_mcast_count(s, buf); e1000x_increase_size_stats(s->mac_reg, PTCregs, size); }
3de46e6fc489c52c9431a8a832ad8170a7569bd8
https://github.com/qemu/qemu
1not_vulnerable
e1000: fail early for evil descriptor During procss_tx_desc(), driver can try to chain data descriptor with legacy descriptor, when will lead underflow for the following calculation in process_tx_desc() for bytes: if (tp->size + bytes > msh) bytes = msh - tp->size; This will lead a infinite loop. So check and fail early if tp->size if greater or equal to msh. Reported-by: Alexander Bulekov <alxndr@bu.edu> Reported-by: Cheolwoo Myung <cwmyung@snu.ac.kr> Reported-by: Ruhr-University Bochum <bugs-syssec@rub.de> Cc: Prasad J Pandit <ppandit@redhat.com> Cc: qemu-stable@nongnu.org Signed-off-by: Jason Wang <jasowang@redhat.com>
static void process_tx_desc(E1000State *s, struct e1000_tx_desc *dp) { PCIDevice *d = PCI_DEVICE(s); uint32_t txd_lower = le32_to_cpu(dp->lower.data); uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D); unsigned int split_size = txd_lower & 0xffff, bytes, sz; unsigned int msh = 0xfffff; uint64_t addr; struct e1000_context_desc *xp = (struct e1000_context_desc *)dp; struct e1000_tx *tp = &s->tx; s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE); if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */ if (le32_to_cpu(xp->cmd_and_length) & E1000_TXD_CMD_TSE) { e1000x_read_tx_ctx_descr(xp, &tp->tso_props); s->use_tso_for_migration = 1; tp->tso_frames = 0; } else { e1000x_read_tx_ctx_descr(xp, &tp->props); s->use_tso_for_migration = 0; } return; } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) { // data descriptor if (tp->size == 0) { tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8; } tp->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0; } else { // legacy descriptor tp->cptse = 0; } if (e1000x_vlan_enabled(s->mac_reg) && e1000x_is_vlan_txd(txd_lower) && (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) { tp->vlan_needed = 1; stw_be_p(tp->vlan_header, le16_to_cpu(s->mac_reg[VET])); stw_be_p(tp->vlan_header + 2, le16_to_cpu(dp->upper.fields.special)); } addr = le64_to_cpu(dp->buffer_addr); if (tp->cptse) { msh = tp->tso_props.hdr_len + tp->tso_props.mss; do { bytes = split_size; if (tp->size >= msh) { goto eop; } if (tp->size + bytes > msh) bytes = msh - tp->size; bytes = MIN(sizeof(tp->data) - tp->size, bytes); pci_dma_read(d, addr, tp->data + tp->size, bytes); sz = tp->size + bytes; if (sz >= tp->tso_props.hdr_len && tp->size < tp->tso_props.hdr_len) { memmove(tp->header, tp->data, tp->tso_props.hdr_len); } tp->size = sz; addr += bytes; if (sz == msh) { xmit_seg(s); memmove(tp->data, tp->header, tp->tso_props.hdr_len); tp->size = tp->tso_props.hdr_len; } split_size -= bytes; } while (bytes && split_size); } else { split_size = MIN(sizeof(tp->data) - tp->size, split_size); pci_dma_read(d, addr, tp->data + tp->size, split_size); tp->size += split_size; } eop: if (!(txd_lower & E1000_TXD_CMD_EOP)) return; if (!(tp->cptse && tp->size < tp->tso_props.hdr_len)) { xmit_seg(s); } tp->tso_frames = 0; tp->sum_needed = 0; tp->vlan_needed = 0; tp->size = 0; tp->cptse = 0; }
56b90e60c4019b08012bd8bd1459efc00b055577
https://github.com/qemu/qemu
1not_vulnerable
target/avr: Fix interrupt execution Only one interrupt is in progress at the moment. It is only necessary to set to reset interrupt_request after all interrupts have been executed. Signed-off-by: Ivanov Arkasha <ivanovrkasha@gmail.com> Message-Id: <20210312164754.18437-1-arkaisp2021@gmail.com> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { bool ret = false; CPUClass *cc = CPU_GET_CLASS(cs); AVRCPU *cpu = AVR_CPU(cs); CPUAVRState *env = &cpu->env; if (interrupt_request & CPU_INTERRUPT_RESET) { if (cpu_interrupts_enabled(env)) { cs->exception_index = EXCP_RESET; cc->tcg_ops->do_interrupt(cs); cs->interrupt_request &= ~CPU_INTERRUPT_RESET; ret = true; } } if (interrupt_request & CPU_INTERRUPT_HARD) { if (cpu_interrupts_enabled(env) && env->intsrc != 0) { int index = ctz32(env->intsrc); cs->exception_index = EXCP_INT(index); cc->tcg_ops->do_interrupt(cs); env->intsrc &= env->intsrc - 1; /* clear the interrupt */ if (!env->intsrc) { cs->interrupt_request &= ~CPU_INTERRUPT_HARD; } ret = true; } } return ret; }
a21993c7f98862823280d1eb6d3e93cf6267896f
https://github.com/qemu/qemu
1not_vulnerable
target/tricore: Fix OPC2_32_RRPW_EXTR for width=0 if width was 0 we would run into the assertion: qemu-system-tricore: tcg/tcg-op.c:217: tcg_gen_sari_i32: Assertion `arg2 >= 0 && arg2 < 32' failed.o The instruction manual specifies undefined behaviour for this case. So we bring this in line with the golden Infineon simlator 'tsim', which simply writes 0 to the result in case of width=0. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Bastian Koppelmann <kbastian@mail.uni-paderborn.de>
static void decode_rrpw_extract_insert(DisasContext *ctx) { uint32_t op2; int r1, r2, r3; int32_t pos, width; TCGv temp; op2 = MASK_OP_RRPW_OP2(ctx->opcode); r1 = MASK_OP_RRPW_S1(ctx->opcode); r2 = MASK_OP_RRPW_S2(ctx->opcode); r3 = MASK_OP_RRPW_D(ctx->opcode); pos = MASK_OP_RRPW_POS(ctx->opcode); width = MASK_OP_RRPW_WIDTH(ctx->opcode); switch (op2) { case OPC2_32_RRPW_EXTR: if (width == 0) { tcg_gen_movi_tl(cpu_gpr_d[r3], 0); break; } if (pos + width <= 32) { /* optimize special cases */ if ((pos == 0) && (width == 8)) { tcg_gen_ext8s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); } else if ((pos == 0) && (width == 16)) { tcg_gen_ext16s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); } else { tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 32 - pos - width); tcg_gen_sari_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 32 - width); } } break; case OPC2_32_RRPW_EXTR_U: if (width == 0) { tcg_gen_movi_tl(cpu_gpr_d[r3], 0); } else { tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos); tcg_gen_andi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], ~0u >> (32-width)); } break; case OPC2_32_RRPW_IMASK: CHECK_REG_PAIR(r3); if (pos + width <= 32) { temp = tcg_temp_new(); tcg_gen_movi_tl(temp, ((1u << width) - 1) << pos); tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos); tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp); tcg_temp_free(temp); } break; case OPC2_32_RRPW_INSERT: if (pos + width <= 32) { tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], pos, width); } break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } }
007479842b27e03173a333b8c2e0dae14be64f8d
https://github.com/qemu/qemu
1not_vulnerable
target/tricore: Fix imask OPC2_32_RRPW_IMASK for r3+1 == r2 if r3+1 and r2 are the same then we would overwrite r2 with our first move and use the wrong result for the shift. Thus we store the result from the mov in a temp. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Bastian Koppelmann <kbastian@mail.uni-paderborn.de>
static void decode_rrpw_extract_insert(DisasContext *ctx) { uint32_t op2; int r1, r2, r3; int32_t pos, width; TCGv temp; op2 = MASK_OP_RRPW_OP2(ctx->opcode); r1 = MASK_OP_RRPW_S1(ctx->opcode); r2 = MASK_OP_RRPW_S2(ctx->opcode); r3 = MASK_OP_RRPW_D(ctx->opcode); pos = MASK_OP_RRPW_POS(ctx->opcode); width = MASK_OP_RRPW_WIDTH(ctx->opcode); switch (op2) { case OPC2_32_RRPW_EXTR: if (pos + width <= 32) { /* optimize special cases */ if ((pos == 0) && (width == 8)) { tcg_gen_ext8s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); } else if ((pos == 0) && (width == 16)) { tcg_gen_ext16s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); } else { tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 32 - pos - width); tcg_gen_sari_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 32 - width); } } break; case OPC2_32_RRPW_EXTR_U: if (width == 0) { tcg_gen_movi_tl(cpu_gpr_d[r3], 0); } else { tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos); tcg_gen_andi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], ~0u >> (32-width)); } break; case OPC2_32_RRPW_IMASK: CHECK_REG_PAIR(r3); if (pos + width <= 32) { temp = tcg_temp_new(); tcg_gen_movi_tl(temp, ((1u << width) - 1) << pos); tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos); tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp); tcg_temp_free(temp); } break; case OPC2_32_RRPW_INSERT: if (pos + width <= 32) { tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], pos, width); } break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } }
0266e8e3b3981b492e82be20bb97e8ed9792ed00
https://github.com/qemu/qemu
1not_vulnerable
linux-user/elfload: fix address calculation in fallback scenario Previously, guest_loaddr was not taken into account when returning an address from pgb_find_hole when /proc/self/maps was unavailable which caused an improper guest_base address to be calculated. This could cause a SIGSEGV later in load_elf_image -> target_mmap for ET_EXEC type images since the mmap MAP_FIXED flag is specified which could clobber existing mappings at the address returnd by g2h(). mmap(0xd87000, 16846912, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|0x100000, -1, 0) = 0xd87000 munmap(0xd87000, 16846912) = 0 write(2, "Locating guest address space @ 0"..., 40Locating guest address space @ 0xd87000) = 40 mmap(0x1187000, 16850944, PROT_NONE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0) = 0x1187000 --- SIGSEGV {si_signo=SIGSEGV, si_code=SEGV_ACCERR, si_addr=0x2188310} --- +++ killed by SIGSEGV +++ Now, pgd_find_hole accounts for guest_loaddr in this scenario. Fixes: ad592e37dfcc ("linux-user: provide fallback pgd_find_hole for bare chroots") Signed-off-by: Vincent Fazio <vfazio@gmail.com> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Message-Id: <20210131061948.15990-1-vfazio@xes-inc.com> [lv: updated it to check if ret == -1] Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size, long align, uintptr_t offset) { GSList *maps, *iter; uintptr_t this_start, this_end, next_start, brk; intptr_t ret = -1; assert(QEMU_IS_ALIGNED(guest_loaddr, align)); maps = read_self_maps(); /* Read brk after we've read the maps, which will malloc. */ brk = (uintptr_t)sbrk(0); if (!maps) { ret = pgd_find_hole_fallback(guest_size, brk, align, offset); return ret == -1 ? -1 : ret - guest_loaddr; } /* The first hole is before the first map entry. */ this_start = mmap_min_addr; for (iter = maps; iter; this_start = next_start, iter = g_slist_next(iter)) { uintptr_t align_start, hole_size; this_end = ((MapInfo *)iter->data)->start; next_start = ((MapInfo *)iter->data)->end; align_start = ROUND_UP(this_start + offset, align); /* Skip holes that are too small. */ if (align_start >= this_end) { continue; } hole_size = this_end - align_start; if (hole_size < guest_size) { continue; } /* If this hole contains brk, give ourselves some room to grow. */ if (this_start <= brk && brk < this_end) { hole_size -= guest_size; if (sizeof(uintptr_t) == 8 && hole_size >= 1 * GiB) { align_start += 1 * GiB; } else if (hole_size >= 16 * MiB) { align_start += 16 * MiB; } else { align_start = (this_end - guest_size) & -align; if (align_start < this_start) { continue; } } } /* Record the lowest successful match. */ if (ret < 0) { ret = align_start - guest_loaddr; } /* If this hole contains the identity map, select it. */ if (align_start <= guest_loaddr && guest_loaddr + guest_size <= this_end) { ret = 0; } /* If this hole ends above the identity map, stop looking. */ if (this_end >= guest_loaddr) { break; } } free_self_maps(maps); return ret; }
934eed517857ce2de9a8a92c2599612581b4eb4a
https://github.com/qemu/qemu
1not_vulnerable
linux-user/elfload: do not assume MAP_FIXED_NOREPLACE kernel support Previously, pgd_find_hole_fallback assumed that if the build host's libc had MAP_FIXED_NOREPLACE defined that the address returned by mmap would match the requested address. This is not a safe assumption for Linux kernels prior to 4.17 Now, we always compare mmap's resultant address with the requested address and no longer short-circuit based on MAP_FIXED_NOREPLACE. Fixes: 2667e069e7b5 ("linux-user: don't use MAP_FIXED in pgd_find_hole_fallback") Signed-off-by: Vincent Fazio <vfazio@gmail.com> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20210131061930.14554-1-vfazio@xes-inc.com> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk, long align, uintptr_t offset) { uintptr_t base; /* Start (aligned) at the bottom and work our way up */ base = ROUND_UP(mmap_min_addr, align); while (true) { uintptr_t align_start, end; align_start = ROUND_UP(base, align); end = align_start + guest_size + offset; /* if brk is anywhere in the range give ourselves some room to grow. */ if (align_start <= brk && brk < end) { base = brk + (16 * MiB); continue; } else if (align_start + guest_size < align_start) { /* we have run out of space */ return -1; } else { int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE | MAP_FIXED_NOREPLACE; void * mmap_start = mmap((void *) align_start, guest_size, PROT_NONE, flags, -1, 0); if (mmap_start != MAP_FAILED) { munmap(mmap_start, guest_size); if (mmap_start == (void *) align_start) { return (uintptr_t) mmap_start + offset; } } base += qemu_host_page_size; } } }
7e588fbc57397daac02cf23677d1849aab7c7507
https://github.com/qemu/qemu
1not_vulnerable
linux-user/elfload: munmap proper address in pgd_find_hole_fallback Previously, if the build host's libc did not define MAP_FIXED_NOREPLACE or if the running kernel didn't support that flag, it was possible for pgd_find_hole_fallback to munmap an incorrect address which could lead to SIGSEGV if the range happened to overlap with the mapped address of the QEMU binary. mmap(0x1000, 22261224, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0) = 0x7f889d331000 munmap(0x1000, 22261224) = 0 --- SIGSEGV {si_signo=SIGSEGV, si_code=SEGV_MAPERR, si_addr=0x84b817} --- ++ killed by SIGSEGV +++ Now, always munmap the address returned by mmap. Fixes: 2667e069e7b5 ("linux-user: don't use MAP_FIXED in pgd_find_hole_fallback") Signed-off-by: Vincent Fazio <vfazio@gmail.com> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20210131061849.12615-1-vfazio@xes-inc.com> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk, long align, uintptr_t offset) { uintptr_t base; /* Start (aligned) at the bottom and work our way up */ base = ROUND_UP(mmap_min_addr, align); while (true) { uintptr_t align_start, end; align_start = ROUND_UP(base, align); end = align_start + guest_size + offset; /* if brk is anywhere in the range give ourselves some room to grow. */ if (align_start <= brk && brk < end) { base = brk + (16 * MiB); continue; } else if (align_start + guest_size < align_start) { /* we have run out of space */ return -1; } else { int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE | MAP_FIXED_NOREPLACE; void * mmap_start = mmap((void *) align_start, guest_size, PROT_NONE, flags, -1, 0); if (mmap_start != MAP_FAILED) { munmap(mmap_start, guest_size); if (MAP_FIXED_NOREPLACE != 0 || mmap_start == (void *) align_start) { return (uintptr_t) mmap_start + offset; } } base += qemu_host_page_size; } } }
08f3a96b33e7eef39b651af9edb5e6de8ff13371
https://github.com/qemu/qemu
1not_vulnerable
linux-user: Fix executable page of /proc/self/maps The guest binary and libraries are not always map with the executable bit in the host process. The guest may read a /proc/self/maps with no executable address range. The perm fields should be based on the guest permission inside Qemu. Signed-off-by: Nicolas Surbayrole <nsurbayrole@quarkslab.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Acked-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20210308091959.986540-1-nsurbayrole@quarkslab.com> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static int open_self_maps(void *cpu_env, int fd) { CPUState *cpu = env_cpu((CPUArchState *)cpu_env); TaskState *ts = cpu->opaque; GSList *map_info = read_self_maps(); GSList *s; int count; for (s = map_info; s; s = g_slist_next(s)) { MapInfo *e = (MapInfo *) s->data; if (h2g_valid(e->start)) { unsigned long min = e->start; unsigned long max = e->end; int flags = page_get_flags(h2g(min)); const char *path; max = h2g_valid(max - 1) ? max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1; if (page_check_range(h2g(min), max - min, flags) == -1) { continue; } if (h2g(min) == ts->info->stack_limit) { path = "[stack]"; } else { path = e->path; } count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr " %c%c%c%c %08" PRIx64 " %s %"PRId64, h2g(min), h2g(max - 1) + 1, (flags & PAGE_READ) ? 'r' : '-', (flags & PAGE_WRITE_ORG) ? 'w' : '-', (flags & PAGE_EXEC) ? 'x' : '-', e->is_priv ? 'p' : '-', (uint64_t) e->offset, e->dev, e->inode); if (path) { dprintf(fd, "%*s%s\n", 73 - count, "", path); } else { dprintf(fd, "\n"); } } } free_self_maps(map_info); #ifdef TARGET_VSYSCALL_PAGE /* * We only support execution from the vsyscall page. * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3. */ count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx " --xp 00000000 00:00 0", TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE); dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]"); #endif return 0; }
516fc0a081161eab5b3a89c7f243954945ee390e
https://github.com/qemu/qemu
1not_vulnerable
accel: kvm: Fix kvm_type invocation Prior to commit f2ce39b4f067 a MachineClass kvm_type method only needed to be registered to ensure it would be executed. With commit f2ce39b4f067 a kvm-type machine property must also be specified. hw/arm/virt relies on the kvm_type method to pass its selected IPA limit to KVM, but this is not exposed as a machine property. Restore the previous functionality of invoking kvm_type when it's present. Fixes: f2ce39b4f067 ("vl: make qemu_get_machine_opts static") Signed-off-by: Andrew Jones <drjones@redhat.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Message-id: 20210310135218.255205-2-drjones@redhat.com Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static int kvm_init(MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); static const char upgrade_note[] = "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" "(see http://sourceforge.net/projects/kvm).\n"; struct { const char *name; int num; } num_cpus[] = { { "SMP", ms->smp.cpus }, { "hotpluggable", ms->smp.max_cpus }, { NULL, } }, *nc = num_cpus; int soft_vcpus_limit, hard_vcpus_limit; KVMState *s; const KVMCapabilityInfo *missing_cap; int ret; int type = 0; uint64_t dirty_log_manual_caps; s = KVM_STATE(ms->accelerator); /* * On systems where the kernel can support different base page * sizes, host page size may be different from TARGET_PAGE_SIZE, * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum * page size for the system though. */ assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size); s->sigmask_len = 8; #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif QLIST_INIT(&s->kvm_parked_vcpus); s->vmfd = -1; s->fd = qemu_open_old("/dev/kvm", O_RDWR); if (s->fd == -1) { fprintf(stderr, "Could not access KVM kernel module: %m\n"); ret = -errno; goto err; } ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { if (ret >= 0) { ret = -EINVAL; } fprintf(stderr, "kvm version too old\n"); goto err; } if (ret > KVM_API_VERSION) { ret = -EINVAL; fprintf(stderr, "kvm version not supported\n"); goto err; } kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT); s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); /* If unspecified, use the default value */ if (!s->nr_slots) { s->nr_slots = 32; } s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); if (s->nr_as <= 1) { s->nr_as = 1; } s->as = g_new0(struct KVMAs, s->nr_as); if (object_property_find(OBJECT(current_machine), "kvm-type")) { g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), "kvm-type", &error_abort); type = mc->kvm_type(ms, kvm_type); } else if (mc->kvm_type) { type = mc->kvm_type(ms, NULL); } do { ret = kvm_ioctl(s, KVM_CREATE_VM, type); } while (ret == -EINTR); if (ret < 0) { fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret, strerror(-ret)); #ifdef TARGET_S390X if (ret == -EINVAL) { fprintf(stderr, "Host kernel setup problem detected. Please verify:\n"); fprintf(stderr, "- for kernels supporting the switch_amode or" " user_mode parameters, whether\n"); fprintf(stderr, " user space is running in primary address space\n"); fprintf(stderr, "- for kernels supporting the vm.allocate_pgste sysctl, " "whether it is enabled\n"); } #endif goto err; } s->vmfd = ret; /* check the vcpu limits */ soft_vcpus_limit = kvm_recommended_vcpus(s); hard_vcpus_limit = kvm_max_vcpus(s); while (nc->name) { if (nc->num > soft_vcpus_limit) { warn_report("Number of %s cpus requested (%d) exceeds " "the recommended cpus supported by KVM (%d)", nc->name, nc->num, soft_vcpus_limit); if (nc->num > hard_vcpus_limit) { fprintf(stderr, "Number of %s cpus requested (%d) exceeds " "the maximum cpus supported by KVM (%d)\n", nc->name, nc->num, hard_vcpus_limit); exit(1); } } nc++; } missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); if (!missing_cap) { missing_cap = kvm_check_extension_list(s, kvm_arch_required_capabilities); } if (missing_cap) { ret = -EINVAL; fprintf(stderr, "kvm does not support %s\n%s", missing_cap->name, upgrade_note); goto err; } s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); s->coalesced_pio = s->coalesced_mmio && kvm_check_extension(s, KVM_CAP_COALESCED_PIO); dirty_log_manual_caps = kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET); s->manual_dirty_log_protect = dirty_log_manual_caps; if (dirty_log_manual_caps) { ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, dirty_log_manual_caps); if (ret) { warn_report("Trying to enable capability %"PRIu64" of " "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. " "Falling back to the legacy mode. ", dirty_log_manual_caps); s->manual_dirty_log_protect = 0; } } #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif s->robust_singlestep = kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); #ifdef KVM_CAP_DEBUGREGS s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); #endif s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); #ifdef KVM_CAP_IRQ_ROUTING kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); #endif s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); s->irq_set_ioctl = KVM_IRQ_LINE; if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; } kvm_readonly_mem_allowed = (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); kvm_eventfds_allowed = (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0); kvm_irqfds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD) > 0); kvm_resamplefds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); kvm_vm_attributes_allowed = (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); kvm_ioeventfd_any_length_allowed = (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0); kvm_state = s; ret = kvm_arch_init(ms, s); if (ret < 0) { goto err; } if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; } qemu_register_reset(kvm_unpoison_all, NULL); if (s->kernel_irqchip_allowed) { kvm_irqchip_create(s); } if (kvm_eventfds_allowed) { s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; } s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; kvm_memory_listener_register(s, &s->memory_listener, &address_space_memory, 0); if (kvm_eventfds_allowed) { memory_listener_register(&kvm_io_listener, &address_space_io); } memory_listener_register(&kvm_coalesced_pio_listener, &address_space_io); s->many_ioeventfds = kvm_check_many_ioeventfds(); s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); if (!s->sync_mmu) { ret = ram_block_discard_disable(true); assert(!ret); } return 0; err: assert(ret < 0); if (s->vmfd >= 0) { close(s->vmfd); } if (s->fd != -1) { close(s->fd); } g_free(s->memory_listener.slots); return ret; }
fc49b77fd391fdcfc9c0b61a8c301ac0d15232e9
https://github.com/qemu/qemu
1not_vulnerable
hw/timer/sse-timer: Propagate eventual error in sse_timer_realize() If the SSECounter link is absent, we set an error message in sse_timer_realize() but forgot to propagate this error. Add the missing 'return'. Fixes: CID 1450755 (Null pointer dereferences) Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-id: 20210312001845.1562670-1-f4bug@amsat.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void sse_timer_realize(DeviceState *dev, Error **errp) { SSETimer *s = SSE_TIMER(dev); if (!s->counter) { error_setg(errp, "counter property was not set"); return; } s->counter_notifier.notify = sse_timer_counter_callback; sse_counter_register_consumer(s->counter, &s->counter_notifier); timer_init_ns(&s->timer, QEMU_CLOCK_VIRTUAL, sse_timer_cb, s); }
fd911a21414b5a17663fa2b97f1059fb11cee99d
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Fix sve_punpk_p vs odd vector lengths Wrote too much with punpk1 with vl % 512 != 0. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210309155305.11301-4-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
uint32_t HELPER(sve_predtest1)(uint64_t d, uint64_t g) { return iter_predtest_fwd(d, g, PREDTEST_INIT); }
8e7fefed1bdcc0f7e722ccf2a2fc2b4f79fe725e
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Fix sve_zip_p vs odd vector lengths Wrote too much with low-half zip (zip1) with vl % 512 != 0. Adjust all of the x + (y << s) to x | (y << s) as a style fix. We only ever have exact overlap between D, M, and N. Therefore we only need a single temporary, and we do not need to check for partial overlap. Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210309155305.11301-3-richard.henderson@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
uint32_t HELPER(sve_predtest1)(uint64_t d, uint64_t g) { return iter_predtest_fwd(d, g, PREDTEST_INIT); }
226e6c046c0fce8da32575aad020ca56a5a8064d
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Fix sve_uzp_p vs odd vector lengths Missed out on compressing the second half of a predicate with length vl % 512 > 256. Adjust all of the x + (y << s) to x | (y << s) as a general style fix. Drop the extract64 because the input uint64_t are known to be already zero-extended from the current size of the predicate. Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210309155305.11301-2-richard.henderson@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
uint32_t HELPER(sve_predtest1)(uint64_t d, uint64_t g) { return iter_predtest_fwd(d, g, PREDTEST_INIT); }
dde3f08b5cab24e570fc0ccbbbab86b6b50aad23
https://github.com/qemu/qemu
1not_vulnerable
virtio-iommu: Handle non power of 2 range invalidations Unmap notifiers work with an address mask assuming an invalidation range of a power of 2. Nothing mandates this in the VIRTIO-IOMMU spec. So in case the range is not a power of 2, split it into several invalidations. Signed-off-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Message-id: 20210309102742.30442-4-eric.auger@redhat.com Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, hwaddr virt_end) { IOMMUTLBEvent event; uint64_t delta = virt_end - virt_start; if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) { return; } trace_virtio_iommu_notify_unmap(mr->parent_obj.name, virt_start, virt_end); event.type = IOMMU_NOTIFIER_UNMAP; event.entry.target_as = &address_space_memory; event.entry.perm = IOMMU_NONE; event.entry.translated_addr = 0; event.entry.addr_mask = delta; event.entry.iova = virt_start; if (delta == UINT64_MAX) { memory_region_notify_iommu(mr, 0, event); } while (virt_start != virt_end + 1) { uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64); event.entry.addr_mask = mask; event.entry.iova = virt_start; memory_region_notify_iommu(mr, 0, event); virt_start += mask + 1; } }
41ce9a912641cd7f820bcfccea15e30efc32104e
https://github.com/qemu/qemu
1not_vulnerable
intel_iommu: Fix mask may be uninitialized in vtd_context_device_invalidate With -Werror=maybe-uninitialized configuration we get ../hw/i386/intel_iommu.c: In function ‘vtd_context_device_invalidate’: ../hw/i386/intel_iommu.c:1888:10: error: ‘mask’ may be used uninitialized in this function [-Werror=maybe-uninitialized] 1888 | mask = ~mask; | ~~~~~^~~~~~~ Add a g_assert_not_reached() to avoid the error. Signed-off-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-id: 20210309102742.30442-2-eric.auger@redhat.com Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void vtd_context_device_invalidate(IntelIOMMUState *s, uint16_t source_id, uint16_t func_mask) { uint16_t mask; VTDBus *vtd_bus; VTDAddressSpace *vtd_as; uint8_t bus_n, devfn; uint16_t devfn_it; trace_vtd_inv_desc_cc_devices(source_id, func_mask); switch (func_mask & 3) { case 0: mask = 0; /* No bits in the SID field masked */ break; case 1: mask = 4; /* Mask bit 2 in the SID field */ break; case 2: mask = 6; /* Mask bit 2:1 in the SID field */ break; case 3: mask = 7; /* Mask bit 2:0 in the SID field */ break; default: g_assert_not_reached(); } mask = ~mask; bus_n = VTD_SID_TO_BUS(source_id); vtd_bus = vtd_find_as_from_bus_num(s, bus_n); if (vtd_bus) { devfn = VTD_SID_TO_DEVFN(source_id); for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { vtd_as = vtd_bus->dev_as[devfn_it]; if (vtd_as && ((devfn_it & mask) == (devfn & mask))) { trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it), VTD_PCI_FUNC(devfn_it)); vtd_iommu_lock(s); vtd_as->context_cache_entry.context_cache_gen = 0; vtd_iommu_unlock(s); /* * Do switch address space when needed, in case if the * device passthrough bit is switched. */ vtd_switch_address_space(vtd_as); /* * So a device is moving out of (or moving into) a * domain, resync the shadow page table. * This won't bring bad even if we have no such * notifier registered - the IOMMU notification * framework will skip MAP notifications if that * happened. */ vtd_sync_shadow_page_table(vtd_as); } } } }
d6cbd8f7a19e6f0fd22a598aad992c4913f481f2
https://github.com/qemu/qemu
1not_vulnerable
target/m68k: don't set SSW ATC bit for physical bus errors If a NuBus slot doesn't contain a card, the Quadra hardware generates a physical bus error if the CPU attempts to access the slot address space. Both Linux and MacOS use a separate bus error handler during NuBus accesses in order to detect and recover when addressing empty slots. According to the MC68040 users manual the ATC bit of the SSW is used to distinguish between ATC faults and physical bus errors. MacOS specifically checks the stack frame generated by a NuBus error and panics if the SSW ATC bit is set. Update m68k_cpu_transaction_failed() so that the SSW ATC bit is not set if the memory API returns MEMTX_DECODE_ERROR which will be used to indicate that an access to an empty NuBus slot occurred. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Message-Id: <20210308121155.2476-2-mark.cave-ayland@ilande.co.uk> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { M68kCPU *cpu = M68K_CPU(cs); CPUM68KState *env = &cpu->env; cpu_restore_state(cs, retaddr, true); if (m68k_feature(env, M68K_FEATURE_M68040)) { env->mmu.mmusr = 0; /* * According to the MC68040 users manual the ATC bit of the SSW is * used to distinguish between ATC faults and physical bus errors. * In the case of a bus error e.g. during nubus read from an empty * slot this bit should not be set */ if (response != MEMTX_DECODE_ERROR) { env->mmu.ssw |= M68K_ATC_040; } /* FIXME: manage MMU table access error */ env->mmu.ssw &= ~M68K_TM_040; if (env->sr & SR_S) { /* SUPERVISOR */ env->mmu.ssw |= M68K_TM_040_SUPER; } if (access_type == MMU_INST_FETCH) { /* instruction or data */ env->mmu.ssw |= M68K_TM_040_CODE; } else { env->mmu.ssw |= M68K_TM_040_DATA; } env->mmu.ssw &= ~M68K_BA_SIZE_MASK; switch (size) { case 1: env->mmu.ssw |= M68K_BA_SIZE_BYTE; break; case 2: env->mmu.ssw |= M68K_BA_SIZE_WORD; break; case 4: env->mmu.ssw |= M68K_BA_SIZE_LONG; break; } if (access_type != MMU_DATA_STORE) { env->mmu.ssw |= M68K_RW_040; } env->mmu.ar = addr; cs->exception_index = EXCP_ACCESS; cpu_loop_exit(cs); } }
e251b5876383cac918b2cd03be034a5d24310b87
https://github.com/qemu/qemu
1not_vulnerable
ui/gtk: Remove NULL checks in gd_switch c821a58ee7 ("ui/console: Pass placeholder surface to display") eliminated the possibility that NULL is passed as surface to dpy_gfx_switch and removed some NULL checks from gd_switch, but the removal was not thoroughly. Remaining NULL checks were confusing for Coverity and probably also for humans. This change removes those NULL checks. Reported-by: Coverity (CID 1448421) Signed-off-by: Akihiko Odaki <akihiko.odaki@gmail.com> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Message-Id: <20210308140713.17901-1-akihiko.odaki@gmail.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
static void gd_switch(DisplayChangeListener *dcl, DisplaySurface *surface) { VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); bool resized = true; trace_gd_switch(vc->label, surface_width(surface), surface_height(surface)); if (vc->gfx.surface) { cairo_surface_destroy(vc->gfx.surface); vc->gfx.surface = NULL; } if (vc->gfx.convert) { pixman_image_unref(vc->gfx.convert); vc->gfx.convert = NULL; } if (vc->gfx.ds && surface_width(vc->gfx.ds) == surface_width(surface) && surface_height(vc->gfx.ds) == surface_height(surface)) { resized = false; } vc->gfx.ds = surface; if (surface->format == PIXMAN_x8r8g8b8) { /* * PIXMAN_x8r8g8b8 == CAIRO_FORMAT_RGB24 * * No need to convert, use surface directly. Should be the * common case as this is qemu_default_pixelformat(32) too. */ vc->gfx.surface = cairo_image_surface_create_for_data (surface_data(surface), CAIRO_FORMAT_RGB24, surface_width(surface), surface_height(surface), surface_stride(surface)); } else { /* Must convert surface, use pixman to do it. */ vc->gfx.convert = pixman_image_create_bits(PIXMAN_x8r8g8b8, surface_width(surface), surface_height(surface), NULL, 0); vc->gfx.surface = cairo_image_surface_create_for_data ((void *)pixman_image_get_data(vc->gfx.convert), CAIRO_FORMAT_RGB24, pixman_image_get_width(vc->gfx.convert), pixman_image_get_height(vc->gfx.convert), pixman_image_get_stride(vc->gfx.convert)); pixman_image_composite(PIXMAN_OP_SRC, vc->gfx.ds->image, NULL, vc->gfx.convert, 0, 0, 0, 0, 0, 0, pixman_image_get_width(vc->gfx.convert), pixman_image_get_height(vc->gfx.convert)); } if (resized) { gd_update_windowsize(vc); } else { gd_update_full_redraw(vc); } }
81b3ddaf8772ec6f88d372e52f9b433cfa46bc46
https://github.com/qemu/qemu
1not_vulnerable
hw/timer/renesas_tmr: Fix use of uninitialized data in read_tcnt() The read_tcnt() function calculates the TCNT register values for the two channels of the timer module; it sets these up in the local tcnt[] array, and eventually returns either one or both of them, depending on whether the access is 8 or 16 bits. However, not all of the code paths through this function set both elements of this array: if the guest has programmed the TCCR.CSS register fields to values which are either documented as not to be used or which QEMU does not implement, then the function will return uninitialized data. (This was spotted by Coverity.) Add the missing CSS cases to this code, so that we return a consistent value instead of uninitialized data, and so the code structure indicates what's happening. Fixes: CID 1429976 Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-id: 20210219223241.16344-3-peter.maydell@linaro.org
static uint16_t read_tcnt(RTMRState *tmr, unsigned size, int ch) { int64_t delta, now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); int elapsed, ovf = 0; uint16_t tcnt[2]; uint32_t ret; delta = (now - tmr->tick) * NANOSECONDS_PER_SECOND / tmr->input_freq; if (delta > 0) { tmr->tick = now; switch (FIELD_EX8(tmr->tccr[1], TCCR, CSS)) { case CSS_INTERNAL: /* timer1 count update */ elapsed = elapsed_time(tmr, 1, delta); if (elapsed >= 0x100) { ovf = elapsed >> 8; } tcnt[1] = tmr->tcnt[1] + (elapsed & 0xff); break; case CSS_INVALID: /* guest error to have set this */ case CSS_EXTERNAL: /* QEMU doesn't implement these */ case CSS_CASCADING: tcnt[1] = tmr->tcnt[1]; break; } switch (FIELD_EX8(tmr->tccr[0], TCCR, CSS)) { case CSS_INTERNAL: elapsed = elapsed_time(tmr, 0, delta); tcnt[0] = tmr->tcnt[0] + elapsed; break; case CSS_CASCADING: tcnt[0] = tmr->tcnt[0] + ovf; break; case CSS_INVALID: /* guest error to have set this */ case CSS_EXTERNAL: /* QEMU doesn't implement this */ tcnt[0] = tmr->tcnt[0]; break; } } else { tcnt[0] = tmr->tcnt[0]; tcnt[1] = tmr->tcnt[1]; } if (size == 1) { return tcnt[ch]; } else { ret = 0; ret = deposit32(ret, 0, 8, tcnt[1]); ret = deposit32(ret, 8, 8, tcnt[0]); return ret; } }
382907b10077ed4cff48d9afe219a023887c0522
https://github.com/qemu/qemu
1not_vulnerable
spapr_drc.c: do not call spapr_drc_detach() in drc_isolate_logical() drc_isolate_logical() is used to move the DRC from the "Configured" to the "Available" state, erroring out if the DRC is in the unexpected "Unisolate" state and doing nothing (with RTAS_OUT_SUCCESS) if the DRC is already in "Available" or in "Unusable" state. When moving from "Configured" to "Available", the DRC is moved to the LOGICAL_AVAILABLE state, a drc->unplug_requested check is done and, if true, spapr_drc_detach() is called. What spapr_drc_detach() does then is: - set drc->unplug_requested to true. In fact, this is the only place where unplug_request is set to true; - does nothing else if drc->state != drck->empty_state. If the DRC state is equal to drck->empty_state, spapr_drc_release() is called. For logical DRCs, drck->empty_state = LOGICAL_UNUSABLE. In short, calling spapr_drc_detach() in drc_isolate_logical() does nothing. It'll set unplug_request to true again ('again' since it was already true - otherwise the function wouldn't be called), and will return without calling spapr_drc_release() because the DRC is not in LOGICAL_UNUSABLE, since drc_isolate_logical() just moved it to LOGICAL_AVAILABLE. The only place where the logical DRC is released is when called from drc_set_unusable(), when it is moved to the "Unusable" state. As it should, according to PAPR. Even though calling spapr_drc_detach() in drc_isolate_logical() is benign, removing it will avoid further thought about the matter. So let's go ahead and do that. As a note, this logic was introduced in commit bbf5c878ab76. Since then, the DRC handling code was refactored and enhanced, and PAPR itself went through some changes in the DRC area as well. It is expected that some assumptions we had back then are now deprecated. Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com> Message-Id: <20210211225246.17315-2-danielhb413@gmail.com> Reviewed-by: Greg Kurz <groug@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
static uint32_t drc_isolate_logical(SpaprDrc *drc) { switch (drc->state) { case SPAPR_DRC_STATE_LOGICAL_AVAILABLE: case SPAPR_DRC_STATE_LOGICAL_UNUSABLE: return RTAS_OUT_SUCCESS; /* Nothing to do */ case SPAPR_DRC_STATE_LOGICAL_CONFIGURED: break; /* see below */ case SPAPR_DRC_STATE_LOGICAL_UNISOLATE: return RTAS_OUT_PARAM_ERROR; /* not allowed */ default: g_assert_not_reached(); } /* * Fail any requests to ISOLATE the LMB DRC if this LMB doesn't * belong to a DIMM device that is marked for removal. * * Currently the guest userspace tool drmgr that drives the memory * hotplug/unplug will just try to remove a set of 'removable' LMBs * in response to a hot unplug request that is based on drc-count. * If the LMB being removed doesn't belong to a DIMM device that is * actually being unplugged, fail the isolation request here. */ if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_LMB && !drc->unplug_requested) { return RTAS_OUT_HW_ERROR; } drc->state = SPAPR_DRC_STATE_LOGICAL_AVAILABLE; return RTAS_OUT_SUCCESS; }
166a1cf404cdea4c5839e3bd3028a6d28cb25b43
https://github.com/qemu/qemu
1not_vulnerable
backends/dbus-vmstate: Fix short read error handling When dbus_vmstate_post_load() fails, it complains to stderr. Except on short read, where it checks with g_return_val_if_fail(). This fails silently if G_DISABLE_CHECKS is undefined (it should be), or else pads the short read with uninitialized bytes. Replace g_return_val_if_fail() by a proper error check. Fixes: 5010cec2bc87dafab39b3913c8ca91f88df9c540 Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20210126124240.2081959-2-armbru@redhat.com> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static int dbus_vmstate_post_load(void *opaque, int version_id) { DBusVMState *self = DBUS_VMSTATE(opaque); g_autoptr(GInputStream) m = NULL; g_autoptr(GDataInputStream) s = NULL; g_autoptr(GError) err = NULL; g_autoptr(GHashTable) proxies = NULL; uint32_t nelem; trace_dbus_vmstate_post_load(version_id); proxies = dbus_get_proxies(self, &err); if (!proxies) { error_report("%s: Failed to get proxies: %s", __func__, err->message); return -1; } m = g_memory_input_stream_new_from_data(self->data, self->data_size, NULL); s = g_data_input_stream_new(m); g_data_input_stream_set_byte_order(s, G_DATA_STREAM_BYTE_ORDER_BIG_ENDIAN); nelem = g_data_input_stream_read_uint32(s, NULL, &err); if (err) { goto error; } while (nelem > 0) { GDBusProxy *proxy = NULL; uint32_t len; gsize bytes_read, avail; char id[256]; len = g_data_input_stream_read_uint32(s, NULL, &err); if (err) { goto error; } if (len >= 256) { error_report("%s: Invalid DBus vmstate proxy name %u", __func__, len); return -1; } if (!g_input_stream_read_all(G_INPUT_STREAM(s), id, len, &bytes_read, NULL, &err)) { goto error; } if (bytes_read != len) { error_report("%s: Short read", __func__); return -1; } id[len] = 0; trace_dbus_vmstate_loading(id); proxy = g_hash_table_lookup(proxies, id); if (!proxy) { error_report("%s: Failed to find proxy Id '%s'", __func__, id); return -1; } len = g_data_input_stream_read_uint32(s, NULL, &err); avail = g_buffered_input_stream_get_available( G_BUFFERED_INPUT_STREAM(s)); if (len > DBUS_VMSTATE_SIZE_LIMIT || len > avail) { error_report("%s: Invalid vmstate size: %u", __func__, len); return -1; } if (dbus_load_state_proxy(proxy, g_buffered_input_stream_peek_buffer(G_BUFFERED_INPUT_STREAM(s), NULL), len) < 0) { error_report("%s: Failed to restore Id '%s'", __func__, id); return -1; } if (!g_seekable_seek(G_SEEKABLE(s), len, G_SEEK_CUR, NULL, &err)) { goto error; } nelem -= 1; } return 0; error: error_report("%s: Failed to read from stream: %s", __func__, err->message); return -1; }
e91bae8e98a6438156752dfbe9c0e2494d4b80f6
https://github.com/qemu/qemu
1not_vulnerable
scsi: Silence gcc warning On Fedora 33, gcc 10.2.1 notes that scsi_cdb_length(buf) can set len==-1, which in turn overflows g_malloc(): [5/5] Linking target qemu-system-x86_64 In function ‘scsi_disk_new_request_dump’, inlined from ‘scsi_new_request’ at ../hw/scsi/scsi-disk.c:2608:9: ../hw/scsi/scsi-disk.c:2582:19: warning: argument 1 value ‘18446744073709551612’ exceeds maximum object size 9223372036854775807 [-Walloc-size-larger-than=] 2582 | line_buffer = g_malloc(len * 5 + 1); | ^ Silence it with a decent assertion, since we only convert a buffer to bytes when we have a valid cdb length. Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20210209152350.207958-1-eblake@redhat.com> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) { int i; int len = scsi_cdb_length(buf); char *line_buffer, *p; assert(len > 0 && len <= 16); line_buffer = g_malloc(len * 5 + 1); for (i = 0, p = line_buffer; i < len; i++) { p += sprintf(p, " 0x%02x", buf[i]); } trace_scsi_disk_new_request(lun, tag, line_buffer); g_free(line_buffer); }
bb5643ff61291deb1d198f343a03828c5ead993f
https://github.com/qemu/qemu
1not_vulnerable
Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-hex-20210306' into staging Add hexagon to include/exec/poison.h Two Coverity fixes for target/hexagon/ # gpg: Signature made Sun 07 Mar 2021 01:37:05 GMT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth-gitlab/tags/pull-hex-20210306: target/hexagon/opcodes: Add missing varargs cleanup target/hexagon: Fix shift amount check in fASHIFTL/fLSHIFTR exec: Poison Hexagon target-specific definitions Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void init_attribs(int tag, ...) { va_list ap; int attr; va_start(ap, tag); while ((attr = va_arg(ap, int)) != 0) { set_bit(attr, opcode_attribs[tag]); } va_end(ap); }
a4ea92013d265f636d71b58408b67dbecd679d1d
https://github.com/qemu/qemu
1not_vulnerable
arm/ast2600: Fix SMP booting with -kernel The ast2600 machines do not have PSCI firmware, so this property should have never been set. Removing this node fixes SMP booting Linux kernels that have PSCI enabled, as Linux fails to find PSCI in the device tree and falls back to the soc-specific method for enabling secondary CPUs. The comment is out of date as Qemu has supported -kernel booting since 9bb6d14081ce ("aspeed: Add boot stub for smp booting"), in v5.1. Fixes: f25c0ae1079d ("aspeed/soc: Add AST2600 support") Signed-off-by: Joel Stanley <joel@jms.id.au> Reviewed-by: Cédric Le Goater <clg@kaod.org> Tested-by: Cédric Le Goater <clg@kaod.org> Message-Id: <20210303010505.635621-1-joel@jms.id.au> Signed-off-by: Cédric Le Goater <clg@kaod.org>
static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) { int i; AspeedSoCState *s = ASPEED_SOC(dev); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); Error *err = NULL; qemu_irq irq; /* IO space */ create_unimplemented_device("aspeed_soc.io", sc->memmap[ASPEED_DEV_IOMEM], ASPEED_SOC_IOMEM_SIZE); /* Video engine stub */ create_unimplemented_device("aspeed.video", sc->memmap[ASPEED_DEV_VIDEO], 0x1000); /* CPU */ for (i = 0; i < sc->num_cpus; i++) { if (sc->num_cpus > 1) { object_property_set_int(OBJECT(&s->cpu[i]), "reset-cbar", ASPEED_A7MPCORE_ADDR, &error_abort); } object_property_set_int(OBJECT(&s->cpu[i]), "mp-affinity", aspeed_calc_affinity(i), &error_abort); object_property_set_int(OBJECT(&s->cpu[i]), "cntfrq", 1125000000, &error_abort); if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { return; } } /* A7MPCORE */ object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", sc->num_cpus, &error_abort); object_property_set_int(OBJECT(&s->a7mpcore), "num-irq", ASPEED_SOC_AST2600_MAX_IRQ + GIC_INTERNAL, &error_abort); sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort); sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, ASPEED_A7MPCORE_ADDR); for (i = 0; i < sc->num_cpus; i++) { SysBusDevice *sbd = SYS_BUS_DEVICE(&s->a7mpcore); DeviceState *d = DEVICE(qemu_get_cpu(i)); irq = qdev_get_gpio_in(d, ARM_CPU_IRQ); sysbus_connect_irq(sbd, i, irq); irq = qdev_get_gpio_in(d, ARM_CPU_FIQ); sysbus_connect_irq(sbd, i + sc->num_cpus, irq); irq = qdev_get_gpio_in(d, ARM_CPU_VIRQ); sysbus_connect_irq(sbd, i + 2 * sc->num_cpus, irq); irq = qdev_get_gpio_in(d, ARM_CPU_VFIQ); sysbus_connect_irq(sbd, i + 3 * sc->num_cpus, irq); } /* SRAM */ memory_region_init_ram(&s->sram, OBJECT(dev), "aspeed.sram", sc->sram_size, &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(get_system_memory(), sc->memmap[ASPEED_DEV_SRAM], &s->sram); /* SCU */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); /* RTC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_RTC)); /* Timer */ object_property_set_link(OBJECT(&s->timerctrl), "scu", OBJECT(&s->scu), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0, sc->memmap[ASPEED_DEV_TIMER1]); for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } /* UART - attach an 8250 to the IO space as our UART5 */ serial_mm_init(get_system_memory(), sc->memmap[ASPEED_DEV_UART5], 2, aspeed_soc_get_irq(s, ASPEED_DEV_UART5), 38400, serial_hd(0), DEVICE_LITTLE_ENDIAN); /* I2C */ object_property_set_link(OBJECT(&s->i2c), "dram", OBJECT(s->dram_mr), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), sc->irqmap[ASPEED_DEV_I2C] + i); /* * The AST2600 SoC has one IRQ per I2C bus. Skip the common * IRQ (AST2400 and AST2500) and connect all bussses. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), i + 1, irq); } /* FMC, The number of CS is set at the board level */ object_property_set_link(OBJECT(&s->fmc), "dram", OBJECT(s->dram_mr), &error_abort); if (!object_property_set_int(OBJECT(&s->fmc), "sdram-base", sc->memmap[ASPEED_DEV_SDRAM], errp)) { return; } if (!sysbus_realize(SYS_BUS_DEVICE(&s->fmc), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1, s->fmc.ctrl->flash_window_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); /* SPI */ for (i = 0; i < sc->spis_num; i++) { object_property_set_link(OBJECT(&s->spi[i]), "dram", OBJECT(s->dram_mr), &error_abort); object_property_set_int(OBJECT(&s->spi[i]), "num-cs", 1, &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, sc->memmap[ASPEED_DEV_SPI1 + i]); sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1, s->spi[i].ctrl->flash_window_base); } /* EHCI */ for (i = 0; i < sc->ehcis_num; i++) { if (!sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, sc->memmap[ASPEED_DEV_EHCI1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, aspeed_soc_get_irq(s, ASPEED_DEV_EHCI1 + i)); } /* SDMC - SDRAM Memory Controller */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdmc), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, sc->memmap[ASPEED_DEV_SDMC]); /* Watch dog */ for (i = 0; i < sc->wdts_num; i++) { AspeedWDTClass *awc = ASPEED_WDT_GET_CLASS(&s->wdt[i]); object_property_set_link(OBJECT(&s->wdt[i]), "scu", OBJECT(&s->scu), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, sc->memmap[ASPEED_DEV_WDT] + i * awc->offset); } /* Net */ for (i = 0; i < sc->macs_num; i++) { object_property_set_bool(OBJECT(&s->ftgmac100[i]), "aspeed", true, &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->ftgmac100[i]), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, sc->memmap[ASPEED_DEV_ETH1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, aspeed_soc_get_irq(s, ASPEED_DEV_ETH1 + i)); object_property_set_link(OBJECT(&s->mii[i]), "nic", OBJECT(&s->ftgmac100[i]), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->mii[i]), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->mii[i]), 0, sc->memmap[ASPEED_DEV_MII1 + i]); } /* XDMA */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->xdma), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->xdma), 0, sc->memmap[ASPEED_DEV_XDMA]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->xdma), 0, aspeed_soc_get_irq(s, ASPEED_DEV_XDMA)); /* GPIO */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio_1_8v), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio_1_8v), 0, sc->memmap[ASPEED_DEV_GPIO_1_8V]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio_1_8v), 0, aspeed_soc_get_irq(s, ASPEED_DEV_GPIO_1_8V)); /* SDHCI */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdhci), 0, sc->memmap[ASPEED_DEV_SDHCI]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI)); /* eMMC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->emmc), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->emmc), 0, sc->memmap[ASPEED_DEV_EMMC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->emmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_EMMC)); }
569dbe19c415865a3b2a1ca806f780d1bd5da2db
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: fix strerror printing Fix missing sign inversion. Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com> Reviewed-by: Keith Busch <kbusch@kernel.org>
static void nvme_aio_err(NvmeRequest *req, int ret) { uint16_t status = NVME_SUCCESS; Error *local_err = NULL; switch (req->cmd.opcode) { case NVME_CMD_READ: status = NVME_UNRECOVERED_READ; break; case NVME_CMD_FLUSH: case NVME_CMD_WRITE: case NVME_CMD_WRITE_ZEROES: case NVME_CMD_ZONE_APPEND: status = NVME_WRITE_FAULT; break; default: status = NVME_INTERNAL_DEV_ERROR; break; } trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status); error_setg_errno(&local_err, -ret, "aio failed"); error_report_err(local_err); /* * Set the command status code to the first encountered error but allow a * subsequent Internal Device Error to trump it. */ if (req->status && status != NVME_INTERNAL_DEV_ERROR) { return; } req->status = status; }
594a2b742b15a81e3bb41938c25ad6520c38e3cc
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: use locally assigned QEMU IEEE OUI Commit 6eb7a071292a ("hw/block/nvme: change controller pci id") changed the controller to use a Red Hat assigned PCI Device and Vendor ID, but did not change the IEEE OUI away from the Intel IEEE OUI. Fix that and use the locally assigned QEMU IEEE OUI instead if the `use-intel-id` parameter is not explicitly set. Also reverse the Intel IEEE OUI bytes. Signed-off-by: Gollu Appalanaidu <anaidu.gollu@samsung.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) { NvmeIdCtrl *id = &n->id_ctrl; uint8_t *pci_conf = pci_dev->config; id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' '); strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' '); strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' '); id->cntlid = cpu_to_le16(n->cntlid); id->rab = 6; if (n->params.use_intel_id) { id->ieee[0] = 0xb3; id->ieee[1] = 0x02; id->ieee[2] = 0x00; } else { id->ieee[0] = 0x00; id->ieee[1] = 0x54; id->ieee[2] = 0x52; } id->mdts = n->params.mdts; id->ver = cpu_to_le32(NVME_SPEC_VER); id->oacs = cpu_to_le16(0); id->cntrltype = 0x1; /* * Because the controller always completes the Abort command immediately, * there can never be more than one concurrently executing Abort command, * so this value is never used for anything. Note that there can easily be * many Abort commands in the queues, but they are not considered * "executing" until processed by nvme_abort. * * The specification recommends a value of 3 for Abort Command Limit (four * concurrently outstanding Abort commands), so lets use that though it is * inconsequential. */ id->acl = 3; id->aerl = n->params.aerl; id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO; id->lpa = NVME_LPA_NS_SMART | NVME_LPA_CSE | NVME_LPA_EXTENDED; /* recommended default value (~70 C) */ id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING); id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL); id->sqes = (0x6 << 4) | 0x6; id->cqes = (0x4 << 4) | 0x4; id->nn = cpu_to_le32(n->num_namespaces); id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP | NVME_ONCS_FEATURES | NVME_ONCS_DSM | NVME_ONCS_COMPARE | NVME_ONCS_COPY); id->vwc = (0x2 << 1) | 0x1; id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0); id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN | NVME_CTRL_SGLS_BITBUCKET); nvme_init_subnqn(n); id->psd[0].mp = cpu_to_le16(0x9c4); id->psd[0].enlat = cpu_to_le32(0x10); id->psd[0].exlat = cpu_to_le32(0x4); if (n->subsys) { id->cmic |= NVME_CMIC_MULTI_CTRL; } NVME_CAP_SET_MQES(n->bar.cap, 0x7ff); NVME_CAP_SET_CQR(n->bar.cap, 1); NVME_CAP_SET_TO(n->bar.cap, 0xf); NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_NVM); NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_CSI_SUPP); NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_ADMIN_ONLY); NVME_CAP_SET_MPSMAX(n->bar.cap, 4); NVME_CAP_SET_CMBS(n->bar.cap, n->params.cmb_size_mb ? 1 : 0); NVME_CAP_SET_PMRS(n->bar.cap, n->pmr.dev ? 1 : 0); n->bar.vs = NVME_SPEC_VER; n->bar.intmc = n->bar.intms = 0; }
92323c8c2566b8ea4cdfe8e72a22d2651b0ee6af
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: fix Close Zone Implicitly and Explicitly Open zones can be closed by Close Zone management function. This got broken by a recent commit ("hw/block/nvme: refactor zone resource management") and now such commands fail with Invalid Zone State Transition status. Modify nvm_zrm_close() function to make Close Zone work correctly. Signed-off-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone) { switch (nvme_get_zone_state(zone)) { case NVME_ZONE_STATE_FULL: return NVME_SUCCESS; case NVME_ZONE_STATE_IMPLICITLY_OPEN: case NVME_ZONE_STATE_EXPLICITLY_OPEN: nvme_aor_dec_open(ns); /* fallthrough */ case NVME_ZONE_STATE_CLOSED: nvme_aor_dec_active(ns); /* fallthrough */ case NVME_ZONE_STATE_EMPTY: nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL); return NVME_SUCCESS; default: return NVME_ZONE_INVAL_TRANSITION; } }
5a11a1ca0d0ed5be52070f1da8de89ef85941183
https://github.com/qemu/qemu
1not_vulnerable
blockdev: fix drive_backup_prepare() missed error We leak local_err and don't report failure to the caller. It's definitely wrong, let's fix. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Greg Kurz <groug@kaod.org> Reviewed-by: Alberto Garcia <berto@igalia.com> Message-Id: <20210202124956.63146-5-vsementsov@virtuozzo.com> Signed-off-by: Eric Blake <eblake@redhat.com>
static void drive_backup_prepare(BlkActionState *common, Error **errp) { DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); DriveBackup *backup; BlockDriverState *bs; BlockDriverState *target_bs; BlockDriverState *source = NULL; AioContext *aio_context; AioContext *old_context; QDict *options; Error *local_err = NULL; int flags; int64_t size; bool set_backing_hd = false; int ret; assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP); backup = common->action->u.drive_backup.data; if (!backup->has_mode) { backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; } bs = bdrv_lookup_bs(backup->device, backup->device, errp); if (!bs) { return; } if (!bs->drv) { error_setg(errp, "Device has no medium"); return; } aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); /* Paired with .clean() */ bdrv_drained_begin(bs); if (!backup->has_format) { backup->format = backup->mode == NEW_IMAGE_MODE_EXISTING ? NULL : (char *) bs->drv->format_name; } /* Early check to avoid creating target */ if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { goto out; } flags = bs->open_flags | BDRV_O_RDWR; /* * See if we have a backing HD we can use to create our new image * on top of. */ if (backup->sync == MIRROR_SYNC_MODE_TOP) { /* * Backup will not replace the source by the target, so none * of the filters skipped here will be removed (in contrast to * mirror). Therefore, we can skip all of them when looking * for the first COW relationship. */ source = bdrv_cow_bs(bdrv_skip_filters(bs)); if (!source) { backup->sync = MIRROR_SYNC_MODE_FULL; } } if (backup->sync == MIRROR_SYNC_MODE_NONE) { source = bs; flags |= BDRV_O_NO_BACKING; set_backing_hd = true; } size = bdrv_getlength(bs); if (size < 0) { error_setg_errno(errp, -size, "bdrv_getlength failed"); goto out; } if (backup->mode != NEW_IMAGE_MODE_EXISTING) { assert(backup->format); if (source) { /* Implicit filters should not appear in the filename */ BlockDriverState *explicit_backing = bdrv_skip_implicit_filters(source); bdrv_refresh_filename(explicit_backing); bdrv_img_create(backup->target, backup->format, explicit_backing->filename, explicit_backing->drv->format_name, NULL, size, flags, false, &local_err); } else { bdrv_img_create(backup->target, backup->format, NULL, NULL, NULL, size, flags, false, &local_err); } } if (local_err) { error_propagate(errp, local_err); goto out; } options = qdict_new(); qdict_put_str(options, "discard", "unmap"); qdict_put_str(options, "detect-zeroes", "unmap"); if (backup->format) { qdict_put_str(options, "driver", backup->format); } target_bs = bdrv_open(backup->target, NULL, options, flags, errp); if (!target_bs) { goto out; } /* Honor bdrv_try_set_aio_context() context acquisition requirements. */ old_context = bdrv_get_aio_context(target_bs); aio_context_release(aio_context); aio_context_acquire(old_context); ret = bdrv_try_set_aio_context(target_bs, aio_context, errp); if (ret < 0) { bdrv_unref(target_bs); aio_context_release(old_context); return; } aio_context_release(old_context); aio_context_acquire(aio_context); if (set_backing_hd) { if (bdrv_set_backing_hd(target_bs, source, errp) < 0) { goto unref; } } state->bs = bs; state->job = do_backup_common(qapi_DriveBackup_base(backup), bs, target_bs, aio_context, common->block_job_txn, errp); unref: bdrv_unref(target_bs); out: aio_context_release(aio_context); }
6069bbc904503dd4f4c2cfd7ff883300a6bddeeb
https://github.com/qemu/qemu
1not_vulnerable
hw/misc/iotkit-sysctl: Implement SSE-200 and SSE-300 PID register values The SSE-200 and SSE-300 have different PID register values from the IoTKit for the sysctl register block. We incorrectly implemented the SSE-200 with the same PID values as IoTKit. Fix the SSE-200 bug and report these register values for SSE-300. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210219144617.4782-19-peter.maydell@linaro.org
static uint64_t iotkit_sysctl_read(void *opaque, hwaddr offset, unsigned size) { IoTKitSysCtl *s = IOTKIT_SYSCTL(opaque); uint64_t r; switch (offset) { case A_SECDBGSTAT: r = s->secure_debug; break; case A_SCSECCTRL: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: case ARMSSE_SSE300: r = s->scsecctrl; break; default: g_assert_not_reached(); } break; case A_FCLK_DIV: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: case ARMSSE_SSE300: r = s->fclk_div; break; default: g_assert_not_reached(); } break; case A_SYSCLK_DIV: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: case ARMSSE_SSE300: r = s->sysclk_div; break; default: g_assert_not_reached(); } break; case A_CLOCK_FORCE: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: case ARMSSE_SSE300: r = s->clock_force; break; default: g_assert_not_reached(); } break; case A_RESET_SYNDROME: r = s->reset_syndrome; break; case A_RESET_MASK: r = s->reset_mask; break; case A_GRETREG: r = s->gretreg; break; case A_INITSVTOR0: r = s->initsvtor0; break; case A_INITSVTOR1: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: r = s->initsvtor1; break; case ARMSSE_SSE300: goto bad_offset; default: g_assert_not_reached(); } break; case A_CPUWAIT: switch (s->sse_version) { case ARMSSE_IOTKIT: case ARMSSE_SSE200: r = s->cpuwait; break; case ARMSSE_SSE300: /* In SSE300 this is reserved (for INITSVTOR2) */ goto bad_offset; default: g_assert_not_reached(); } break; case A_NMI_ENABLE: switch (s->sse_version) { case ARMSSE_IOTKIT: /* In IoTKit this is named BUSWAIT but marked reserved, R/O, zero */ r = 0; break; case ARMSSE_SSE200: r = s->nmi_enable; break; case ARMSSE_SSE300: /* In SSE300 this is reserved (for INITSVTOR3) */ goto bad_offset; default: g_assert_not_reached(); } break; case A_WICCTRL: switch (s->sse_version) { case ARMSSE_IOTKIT: case ARMSSE_SSE200: r = s->wicctrl; break; case ARMSSE_SSE300: /* In SSE300 this offset is CPUWAIT */ r = s->cpuwait; break; default: g_assert_not_reached(); } break; case A_EWCTRL: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: r = s->ewctrl; break; case ARMSSE_SSE300: /* In SSE300 this offset is is NMI_ENABLE */ r = s->nmi_enable; break; default: g_assert_not_reached(); } break; case A_PWRCTRL: switch (s->sse_version) { case ARMSSE_IOTKIT: case ARMSSE_SSE200: goto bad_offset; case ARMSSE_SSE300: r = s->pwrctrl; break; default: g_assert_not_reached(); } break; case A_PDCM_PD_SYS_SENSE: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: case ARMSSE_SSE300: r = s->pdcm_pd_sys_sense; break; default: g_assert_not_reached(); } break; case A_PDCM_PD_CPU0_SENSE: switch (s->sse_version) { case ARMSSE_IOTKIT: case ARMSSE_SSE200: goto bad_offset; case ARMSSE_SSE300: r = s->pdcm_pd_cpu0_sense; break; default: g_assert_not_reached(); } break; case A_PDCM_PD_SRAM0_SENSE: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: r = s->pdcm_pd_sram0_sense; break; case ARMSSE_SSE300: goto bad_offset; default: g_assert_not_reached(); } break; case A_PDCM_PD_SRAM1_SENSE: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: r = s->pdcm_pd_sram1_sense; break; case ARMSSE_SSE300: goto bad_offset; default: g_assert_not_reached(); } break; case A_PDCM_PD_SRAM2_SENSE: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: r = s->pdcm_pd_sram2_sense; break; case ARMSSE_SSE300: r = s->pdcm_pd_vmr0_sense; break; default: g_assert_not_reached(); } break; case A_PDCM_PD_SRAM3_SENSE: switch (s->sse_version) { case ARMSSE_IOTKIT: goto bad_offset; case ARMSSE_SSE200: r = s->pdcm_pd_sram3_sense; break; case ARMSSE_SSE300: r = s->pdcm_pd_vmr1_sense; break; default: g_assert_not_reached(); } break; case A_PID4 ... A_CID3: switch (s->sse_version) { case ARMSSE_IOTKIT: r = iotkit_sysctl_id[(offset - A_PID4) / 4]; break; case ARMSSE_SSE200: case ARMSSE_SSE300: r = sse200_sysctl_id[(offset - A_PID4) / 4]; break; default: g_assert_not_reached(); } break; case A_SECDBGSET: case A_SECDBGCLR: case A_SWRESET: qemu_log_mask(LOG_GUEST_ERROR, "IoTKit SysCtl read: read of WO offset %x\n", (int)offset); r = 0; break; default: bad_offset: qemu_log_mask(LOG_GUEST_ERROR, "IoTKit SysCtl read: bad offset %x\n", (int)offset); r = 0; break; } trace_iotkit_sysctl_read(offset, r, size); return r; }
a4f1542af58fd6ab061e594d4e161f1c8b4a4372
https://github.com/qemu/qemu
1not_vulnerable
block/export: fix blk_size double byteswap The config->blk_size field is little-endian. Use the native-endian blk_size variable to avoid double byteswapping. Fixes: 11f60f7eaee2630dd6fa0c3a8c49f792e46c4cf1 ("block/export: make vhost-user-blk config space little-endian") Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20210223144653.811468-8-stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static void vu_blk_initialize_config(BlockDriverState *bs, struct virtio_blk_config *config, uint32_t blk_size, uint16_t num_queues) { config->capacity = cpu_to_le64(bdrv_getlength(bs) >> BDRV_SECTOR_BITS); config->blk_size = cpu_to_le32(blk_size); config->size_max = cpu_to_le32(0); config->seg_max = cpu_to_le32(128 - 2); config->min_io_size = cpu_to_le16(1); config->opt_io_size = cpu_to_le32(1); config->num_queues = cpu_to_le16(num_queues); config->max_discard_sectors = cpu_to_le32(32768); config->max_discard_seg = cpu_to_le32(1); config->discard_sector_alignment = cpu_to_le32(blk_size >> 9); config->max_write_zeroes_sectors = cpu_to_le32(32768); config->max_write_zeroes_seg = cpu_to_le32(1); }
535255b43898d2e96744057eb86f8497d4d7a461
https://github.com/qemu/qemu
1not_vulnerable
vhost-user-blk: fix blkcfg->num_queues endianness Treat the num_queues field as virtio-endian. On big-endian hosts the vhost-user-blk num_queues field was in the wrong endianness. Move the blkcfg.num_queues store operation from realize to vhost_user_blk_update_config() so feature negotiation has finished and we know the endianness of the device. VIRTIO 1.0 devices are little-endian, but in case someone wants to use legacy VIRTIO we support all endianness cases. Cc: qemu-stable@nongnu.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Message-Id: <20210223144653.811468-2-stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config) { VHostUserBlk *s = VHOST_USER_BLK(vdev); /* Our num_queues overrides the device backend */ virtio_stw_p(vdev, &s->blkcfg.num_queues, s->num_queues); memcpy(config, &s->blkcfg, sizeof(struct virtio_blk_config)); }
bdc4c4c5e372756a5ba3fb3a61e585b02f0dd7f4
https://github.com/qemu/qemu
1not_vulnerable
backup: Remove nodes from job in .clean() The block job holds a reference to the backup-top node (because it is passed as the main job BDS to block_job_create()). Therefore, bdrv_backup_top_drop() cannot delete the backup-top node (replacing it by its child does not affect the job parent, because that has .stay_at_node set). That is a problem, because all of its I/O functions assume the BlockCopyState (s->bcs) to be valid and that it has a filtered child; but after bdrv_backup_top_drop(), neither of those things are true. It does not make sense to add new parents to backup-top after backup_clean(), so we should detach it from the job before bdrv_backup_top_drop(). Because there is no function to do that for a single node, just detach all of the job's nodes -- the job does not do anything past backup_clean() anyway. Signed-off-by: Max Reitz <mreitz@redhat.com> Message-Id: <20210219153348.41861-2-mreitz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static void backup_abort(Job *job) { BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); if (s->sync_bitmap) { backup_cleanup_sync_bitmap(s, -1); } }
4e0ed62937d0498295457c2e1d8282a24ba140cb
https://github.com/qemu/qemu
1not_vulnerable
esp: raise interrupt after every non-DMA byte transferred to the FIFO This matches the description in the datasheet and is required as support for non-DMA transfers is added. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Message-Id: <20210304221103.6369-36-mark.cave-ayland@ilande.co.uk>
void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) { trace_esp_mem_writeb(saddr, s->wregs[saddr], val); switch (saddr) { case ESP_TCHI: s->tchi_written = true; /* fall through */ case ESP_TCLO: case ESP_TCMID: s->rregs[ESP_RSTAT] &= ~STAT_TC; break; case ESP_FIFO: if (s->do_cmd) { if (s->cmdlen < ESP_CMDBUF_SZ) { s->cmdbuf[s->cmdlen++] = val & 0xff; } else { trace_esp_error_fifo_overrun(); } } else if (s->ti_wptr == TI_BUFSZ - 1) { trace_esp_error_fifo_overrun(); } else { s->ti_size++; s->ti_buf[s->ti_wptr++] = val & 0xff; } /* Non-DMA transfers raise an interrupt after every byte */ if (s->rregs[ESP_CMD] == CMD_TI) { s->rregs[ESP_RINTR] |= INTR_FC | INTR_BS; esp_raise_irq(s); } break; case ESP_CMD: s->rregs[saddr] = val; if (val & CMD_DMA) { s->dma = 1; /* Reload DMA counter. */ if (esp_get_stc(s) == 0) { esp_set_tc(s, 0x10000); } else { esp_set_tc(s, esp_get_stc(s)); } } else { s->dma = 0; } switch (val & CMD_CMD) { case CMD_NOP: trace_esp_mem_writeb_cmd_nop(val); break; case CMD_FLUSH: trace_esp_mem_writeb_cmd_flush(val); /*s->ti_size = 0;*/ s->ti_wptr = 0; s->ti_rptr = 0; break; case CMD_RESET: trace_esp_mem_writeb_cmd_reset(val); esp_soft_reset(s); break; case CMD_BUSRESET: trace_esp_mem_writeb_cmd_bus_reset(val); if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { s->rregs[ESP_RINTR] |= INTR_RST; esp_raise_irq(s); } break; case CMD_TI: trace_esp_mem_writeb_cmd_ti(val); handle_ti(s); break; case CMD_ICCS: trace_esp_mem_writeb_cmd_iccs(val); write_response(s); s->rregs[ESP_RINTR] |= INTR_FC; s->rregs[ESP_RSTAT] |= STAT_MI; break; case CMD_MSGACC: trace_esp_mem_writeb_cmd_msgacc(val); s->rregs[ESP_RINTR] |= INTR_DC; s->rregs[ESP_RSEQ] = 0; s->rregs[ESP_RFLAGS] = 0; esp_raise_irq(s); break; case CMD_PAD: trace_esp_mem_writeb_cmd_pad(val); s->rregs[ESP_RSTAT] = STAT_TC; s->rregs[ESP_RINTR] |= INTR_FC; s->rregs[ESP_RSEQ] = 0; break; case CMD_SATN: trace_esp_mem_writeb_cmd_satn(val); break; case CMD_RSTATN: trace_esp_mem_writeb_cmd_rstatn(val); break; case CMD_SEL: trace_esp_mem_writeb_cmd_sel(val); handle_s_without_atn(s); break; case CMD_SELATN: trace_esp_mem_writeb_cmd_selatn(val); handle_satn(s); break; case CMD_SELATNS: trace_esp_mem_writeb_cmd_selatns(val); handle_satn_stop(s); break; case CMD_ENSEL: trace_esp_mem_writeb_cmd_ensel(val); s->rregs[ESP_RINTR] = 0; break; case CMD_DISSEL: trace_esp_mem_writeb_cmd_dissel(val); s->rregs[ESP_RINTR] = 0; esp_raise_irq(s); break; default: trace_esp_error_unhandled_command(val); break; } break; case ESP_WBUSID ... ESP_WSYNO: break; case ESP_CFG1: case ESP_CFG2: case ESP_CFG3: case ESP_RES3: case ESP_RES4: s->rregs[saddr] = val; break; case ESP_WCCF ... ESP_WTEST: break; default: trace_esp_error_invalid_write(val, saddr); return; } s->wregs[saddr] = val; }
2526e69efd8e386573212bf3ea05171a727a598b
https://github.com/qemu/qemu
1not_vulnerable
target/hexagon/opcodes: Add missing varargs cleanup Fix a trivial incorrect usage of variable argument macros detected by Coverity (missing_va_end: va_end was not called for ap). Fixes: Coverity CID 1446720 (VARARGS) Fixes: e3c00c2ed75 ("Hexagon (target/hexagon) opcode data structures") Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Tested-by: Taylor Simpson <tsimpson@quicinc.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Taylor Simpson <tsimpson@quicinc.com> Message-Id: <20210223111253.2831285-1-f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
static void init_attribs(int tag, ...) { va_list ap; int attr; va_start(ap, tag); while ((attr = va_arg(ap, int)) != 0) { set_bit(attr, opcode_attribs[tag]); } va_end(ap); }
dc09f047eddec8f4a1991c4f5f4a428d7aa3f2c0
https://github.com/qemu/qemu
1not_vulnerable
tcg/tci: Use exec/cpu_ldst.h interfaces Use the provided cpu_ldst.h interfaces. This fixes the build vs the unconverted uses of g2h(), adds missed memory trace events, and correctly recognizes when a SIGSEGV belongs to the guest via set_helper_retaddr(). Fixes: 3e8f1628e864 Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) { bool result = false; int64_t i0 = u0; int64_t i1 = u1; switch (condition) { case TCG_COND_EQ: result = (u0 == u1); break; case TCG_COND_NE: result = (u0 != u1); break; case TCG_COND_LT: result = (i0 < i1); break; case TCG_COND_GE: result = (i0 >= i1); break; case TCG_COND_LE: result = (i0 <= i1); break; case TCG_COND_GT: result = (i0 > i1); break; case TCG_COND_LTU: result = (u0 < u1); break; case TCG_COND_GEU: result = (u0 >= u1); break; case TCG_COND_LEU: result = (u0 <= u1); break; case TCG_COND_GTU: result = (u0 > u1); break; default: g_assert_not_reached(); } return result; }
7520c4f0847093aefa87f23113f28d5d1d574aed
https://github.com/qemu/qemu
1not_vulnerable
trace: skip qemu_set_log_filename if no "-D" option was passed When the "simple" backend is not active but the "log" backend is, both "-trace file=" and "-D" will result in a call to qemu_set_log_filename. Unfortunately, QEMU was also calling qemu_set_log_filename if "-D" was not passed, so the "-trace file=" option had no effect and the tracepoints went back to stderr. Fortunately we can just skip qemu_set_log_filename in that case, because the log backend will initialize itself just fine as soon as qemu_set_log is called, also in qemu_process_early_options. Cc: stefanha@redhat.com Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20210209145759.141231-3-pbonzini@redhat.com>
static void qemu_process_early_options(void) { #ifdef CONFIG_SECCOMP QemuOptsList *olist = qemu_find_opts_err("sandbox", NULL); if (olist) { qemu_opts_foreach(olist, parse_sandbox, NULL, &error_fatal); } #endif qemu_opts_foreach(qemu_find_opts("name"), parse_name, NULL, &error_fatal); if (qemu_opts_foreach(qemu_find_opts("action"), process_runstate_actions, NULL, &error_fatal)) { exit(1); } #ifndef _WIN32 qemu_opts_foreach(qemu_find_opts("add-fd"), parse_add_fd, NULL, &error_fatal); qemu_opts_foreach(qemu_find_opts("add-fd"), cleanup_add_fd, NULL, &error_fatal); #endif /* Open the logfile at this point and set the log mask if necessary. */ if (log_file) { qemu_set_log_filename(log_file, &error_fatal); } if (log_mask) { int mask; mask = qemu_str_to_log_mask(log_mask); if (!mask) { qemu_print_log_usage(stdout); exit(1); } qemu_set_log(mask); } else { qemu_set_log(0); } qemu_add_default_firmwarepath(); }
e20e182ea0ab5c16557603f457fe0db445b63726
https://github.com/qemu/qemu
1not_vulnerable
x86/pvh: extract only 4 bytes of start address for 32 bit kernels When loading the PVH start address from a 32 bit ELF note, extract only the appropriate number of bytes. Fixes: ab969087da65 ("pvh: Boot uncompressed kernel using direct boot ABI") Signed-off-by: David Edmondson <david.edmondson@oracle.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> Message-Id: <20210302090315.3031492-3-david.edmondson@oracle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static uint64_t read_pvh_start_addr(void *arg1, void *arg2, bool is64) { size_t *elf_note_data_addr; /* Check if ELF Note header passed in is valid */ if (arg1 == NULL) { return 0; } if (is64) { struct elf64_note *nhdr64 = (struct elf64_note *)arg1; uint64_t nhdr_size64 = sizeof(struct elf64_note); uint64_t phdr_align = *(uint64_t *)arg2; uint64_t nhdr_namesz = nhdr64->n_namesz; elf_note_data_addr = ((void *)nhdr64) + nhdr_size64 + QEMU_ALIGN_UP(nhdr_namesz, phdr_align); pvh_start_addr = *elf_note_data_addr; } else { struct elf32_note *nhdr32 = (struct elf32_note *)arg1; uint32_t nhdr_size32 = sizeof(struct elf32_note); uint32_t phdr_align = *(uint32_t *)arg2; uint32_t nhdr_namesz = nhdr32->n_namesz; elf_note_data_addr = ((void *)nhdr32) + nhdr_size32 + QEMU_ALIGN_UP(nhdr_namesz, phdr_align); pvh_start_addr = *(uint32_t *)elf_note_data_addr; } return pvh_start_addr; }
e0a8f99355c32b48c9ef867127075b5267ae23d8
https://github.com/qemu/qemu
1not_vulnerable
accel: kvm: Fix memory waste under mismatch page size When handle dirty log, we face qemu_real_host_page_size and TARGET_PAGE_SIZE. The first one is the granule of KVM dirty bitmap, and the second one is the granule of QEMU dirty bitmap. As qemu_real_host_page_size >= TARGET_PAGE_SIZE (kvm_init() enforced it), misuse TARGET_PAGE_SIZE to init kvmslot dirty_bmap may waste memory. For example, when qemu_real_host_page_size is 64K and TARGET_PAGE_SIZE is 4K, it wastes 93.75% (15/16) memory. Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> Reviewed-by: Andrew Jones <drjones@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Message-Id: <20201217014941.22872-2-zhukeqian1@huawei.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem) { /* * XXX bad kernel interface alert * For dirty bitmap, kernel allocates array of size aligned to * bits-per-long. But for case when the kernel is 64bits and * the userspace is 32bits, userspace can't align to the same * bits-per-long, since sizeof(long) is different between kernel * and user space. This way, userspace will provide buffer which * may be 4 bytes less than the kernel will use, resulting in * userspace memory corruption (which is not detectable by valgrind * too, in most cases). * So for now, let's align to 64 instead of HOST_LONG_BITS here, in * a hope that sizeof(long) won't become >8 any time soon. * * Note: the granule of kvm dirty log is qemu_real_host_page_size. * And mem->memory_size is aligned to it (otherwise this mem can't * be registered to KVM). */ hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size, /*HOST_LONG_BITS*/ 64) / 8; mem->dirty_bmap = g_malloc0(bitmap_size); }
c6986f16a7022ccfb73d91bc7676c8e1d15e5342
https://github.com/qemu/qemu
1not_vulnerable
KVM: x86: do not fail if software breakpoint has already been removed If kvm_arch_remove_sw_breakpoint finds that a software breakpoint does not have an INT3 instruction, it fails. This can happen if one sets a software breakpoint in a kernel module and then reloads it. gdb then thinks the breakpoint cannot be deleted and there is no way to add it back. Suggested-by: Maxim Levitsky <mlevitsk@redhat.com> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) { uint8_t int3; if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) { return -EINVAL; } if (int3 != 0xcc) { return 0; } if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) { return -EINVAL; } return 0; }
819b3496196c2a7de89ed2372182c24053443990
https://github.com/qemu/qemu
1not_vulnerable
virtio-mmio: improve virtio-mmio get_dev_path alog At the moment the following QEMU command line triggers an assertion failure On xlnx-versal SOC: qemu-system-aarch64 \ -machine xlnx-versal-virt -nographic -smp 2 -m 128 \ -fsdev local,id=shareid,path=${HOME}/work,security_model=none \ -device virtio-9p-device,fsdev=shareid,mount_tag=share \ -fsdev local,id=shareid1,path=${HOME}/Music,security_model=none \ -device virtio-9p-device,fsdev=shareid1,mount_tag=share1 qemu-system-aarch64: ../migration/savevm.c:860: vmstate_register_with_alias_id: Assertion `!se->compat || se->instance_id == 0' failed. This problem was fixed on arm virt platform in commit f58b39d2d5b ("virtio-mmio: format transport base address in BusClass.get_dev_path") It works perfectly on arm virt platform. but there is still there on xlnx-versal SOC. The main difference between arm virt and xlnx-versal is they use different way to create virtio-mmio qdev. on arm virt, it calls sysbus_create_simple("virtio-mmio", base, pic[irq]); which will call sysbus_mmio_map internally and assign base address to subsys device mmio correctly. but xlnx-versal's implements won't do this. However, xlnx-versal can't switch to sysbus_create_simple() to create virtio-mmio device. It's because xlnx-versal's cpu use VersalVirt.soc.fpd.apu.mr as it's memory. which is subregion of system_memory. sysbus_create_simple will add virtio to system_memory, which can't be accessed by cpu. Besides, xlnx-versal can't add sysbus_mmio_map api call too, because this will add memory region to system_memory, and it can't be added to VersalVirt.soc.fpd.apu.mr again. We can solve this by assign correct base address offset on dev_path. This path was test on aarch64 virt & xlnx-versal platform. Signed-off-by: schspa <schspa@gmail.com> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static char *virtio_mmio_bus_get_dev_path(DeviceState *dev) { BusState *virtio_mmio_bus; VirtIOMMIOProxy *virtio_mmio_proxy; char *proxy_path; char *path; MemoryRegionSection section; virtio_mmio_bus = qdev_get_parent_bus(dev); virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent); proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy)); /* * If @format_transport_address is false, then we just perform the same as * virtio_bus_get_dev_path(): we delegate the address formatting for the * device on the virtio-mmio bus to the bus that the virtio-mmio proxy * (i.e., the device that implements the virtio-mmio bus) resides on. In * this case the base address of the virtio-mmio transport will be * invisible. */ if (!virtio_mmio_proxy->format_transport_address) { return proxy_path; } /* Otherwise, we append the base address of the transport. */ section = memory_region_find(&virtio_mmio_proxy->iomem, 0, 0x200); assert(section.mr); if (proxy_path) { path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path, section.offset_within_address_space); } else { path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx, section.offset_within_address_space); } memory_region_unref(section.mr); g_free(proxy_path); return path; }
a54b8ac340c20531daa89929c5ce7fed89fa401d
https://github.com/qemu/qemu
1not_vulnerable
css: SCHIB measurement block origin must be aligned The Measurement Block Origin inside the SCHIB is used when Measurement Block format 1 is in used and must be aligned on 64 bytes otherwise an operand exception is recognized when issuing the Modify Sub CHannel (MSCH) instruction. Signed-off-by: Pierre Morel <pmorel@linux.ibm.com> Reviewed-by: Thomas Huth <thuth@redhat.com> Message-Id: <1613741973-3711-2-git-send-email-pmorel@linux.ibm.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
static int ioinst_schib_valid(SCHIB *schib) { if ((be16_to_cpu(schib->pmcw.flags) & PMCW_FLAGS_MASK_INVALID) || (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_INVALID)) { return 0; } /* Disallow extended measurements for now. */ if (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_XMWME) { return 0; } /* for MB format 1 bits 26-31 of word 11 must be 0 */ /* MBA uses words 10 and 11, it means align on 2**6 */ if ((be16_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_MBFC) && (be64_to_cpu(schib->mba) & 0x03fUL)) { return 0; } return 1; }
403af209db8c030ed1e000640cd3cd80c6882883
https://github.com/qemu/qemu
1not_vulnerable
s390x/pci: restore missing Query PCI Function CLP data Some CLP response data was accidentally dropped when fixing endianness issues with the Query PCI Function CLP response. All of these values are sent as 0s to the guest for emulated devices, so the impact is only observed on passthrough devices. Fixes: a4e2fff1b104 ("s390x/pci: fix endianness issues") Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com> Message-Id: <1613681609-9349-1-git-send-email-mjrosato@linux.ibm.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) { ClpReqHdr *reqh; ClpRspHdr *resh; S390PCIBusDevice *pbdev; uint32_t req_len; uint32_t res_len; uint8_t buffer[4096 * 2]; uint8_t cc = 0; CPUS390XState *env = &cpu->env; S390pciState *s = s390_get_phb(); int i; if (env->psw.mask & PSW_MASK_PSTATE) { s390_program_interrupt(env, PGM_PRIVILEGED, ra); return 0; } if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) { s390_cpu_virt_mem_handle_exc(cpu, ra); return 0; } reqh = (ClpReqHdr *)buffer; req_len = lduw_p(&reqh->len); if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) { s390_program_interrupt(env, PGM_OPERAND, ra); return 0; } if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, req_len + sizeof(*resh))) { s390_cpu_virt_mem_handle_exc(cpu, ra); return 0; } resh = (ClpRspHdr *)(buffer + req_len); res_len = lduw_p(&resh->len); if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) { s390_program_interrupt(env, PGM_OPERAND, ra); return 0; } if ((req_len + res_len) > 8192) { s390_program_interrupt(env, PGM_OPERAND, ra); return 0; } if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, req_len + res_len)) { s390_cpu_virt_mem_handle_exc(cpu, ra); return 0; } if (req_len != 32) { stw_p(&resh->rsp, CLP_RC_LEN); goto out; } switch (lduw_p(&reqh->cmd)) { case CLP_LIST_PCI: { ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer; list_pci(rrb, &cc); break; } case CLP_SET_PCI_FN: { ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh; ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh; pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqsetpci->fh)); if (!pbdev) { stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); goto out; } switch (reqsetpci->oc) { case CLP_SET_ENABLE_PCI_FN: switch (reqsetpci->ndas) { case 0: stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS); goto out; case 1: break; default: stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES); goto out; } if (pbdev->fh & FH_MASK_ENABLE) { stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); goto out; } pbdev->fh |= FH_MASK_ENABLE; pbdev->state = ZPCI_FS_ENABLED; stl_p(&ressetpci->fh, pbdev->fh); stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); break; case CLP_SET_DISABLE_PCI_FN: if (!(pbdev->fh & FH_MASK_ENABLE)) { stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); goto out; } device_legacy_reset(DEVICE(pbdev)); pbdev->fh &= ~FH_MASK_ENABLE; pbdev->state = ZPCI_FS_DISABLED; stl_p(&ressetpci->fh, pbdev->fh); stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); break; default: DPRINTF("unknown set pci command\n"); stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); break; } break; } case CLP_QUERY_PCI_FN: { ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh; ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh; pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqquery->fh)); if (!pbdev) { DPRINTF("query pci no pci dev\n"); stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH); goto out; } stq_p(&resquery->sdma, pbdev->zpci_fn.sdma); stq_p(&resquery->edma, pbdev->zpci_fn.edma); stw_p(&resquery->pchid, pbdev->zpci_fn.pchid); stw_p(&resquery->vfn, pbdev->zpci_fn.vfn); resquery->flags = pbdev->zpci_fn.flags; resquery->pfgid = pbdev->zpci_fn.pfgid; resquery->pft = pbdev->zpci_fn.pft; resquery->fmbl = pbdev->zpci_fn.fmbl; stl_p(&resquery->fid, pbdev->zpci_fn.fid); stl_p(&resquery->uid, pbdev->zpci_fn.uid); memcpy(resquery->pfip, pbdev->zpci_fn.pfip, CLP_PFIP_NR_SEGMENTS); memcpy(resquery->util_str, pbdev->zpci_fn.util_str, CLP_UTIL_STR_LEN); for (i = 0; i < PCI_BAR_COUNT; i++) { uint32_t data = pci_get_long(pbdev->pdev->config + PCI_BASE_ADDRESS_0 + (i * 4)); stl_p(&resquery->bar[i], data); resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ? ctz64(pbdev->pdev->io_regions[i].size) : 0; DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i, ldl_p(&resquery->bar[i]), pbdev->pdev->io_regions[i].size, resquery->bar_size[i]); } stw_p(&resquery->hdr.rsp, CLP_RC_OK); break; } case CLP_QUERY_PCI_FNGRP: { ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh; ClpReqQueryPciGrp *reqgrp = (ClpReqQueryPciGrp *)reqh; S390PCIGroup *group; group = s390_group_find(reqgrp->g); if (!group) { /* We do not allow access to unknown groups */ /* The group must have been obtained with a vfio device */ stw_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID); goto out; } resgrp->fr = group->zpci_group.fr; stq_p(&resgrp->dasm, group->zpci_group.dasm); stq_p(&resgrp->msia, group->zpci_group.msia); stw_p(&resgrp->mui, group->zpci_group.mui); stw_p(&resgrp->i, group->zpci_group.i); stw_p(&resgrp->maxstbl, group->zpci_group.maxstbl); resgrp->version = group->zpci_group.version; stw_p(&resgrp->hdr.rsp, CLP_RC_OK); break; } default: DPRINTF("unknown clp command\n"); stw_p(&resh->rsp, CLP_RC_CMD); break; } out: if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer, req_len + res_len)) { s390_cpu_virt_mem_handle_exc(cpu, ra); return 0; } setcc(cpu, cc); return 0; }
ea1b90b4fcb1230b2c85f3fd4ee09a84ddca7a6f
https://github.com/qemu/qemu
1not_vulnerable
target/s390x/arch_dump: Fix warning for the name field in the PT_NOTE section There is a compiler warning with GCC 9.3 when compiling with the -fsanitize=thread compiler flag: In function 'strncpy', inlined from 's390x_write_elf64_notes' at ../target/s390x/arch_dump.c:219:9: /usr/include/x86_64-linux-gnu/bits/string_fortified.h:106:10: error: '__builtin_strncpy' specified bound 8 equals destination size [-Werror=stringop-truncation] Since the name should always be NUL-terminated, let's use g_strlcpy() to silence this warning. And while we're at it, also add an assert() to make sure that the provided names always fit the size field (which is fine for the current callers, the function is called once with "CORE" and once with "LINUX" as a name). Signed-off-by: Thomas Huth <thuth@redhat.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Message-Id: <20210205093921.848260-1-thuth@redhat.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
static int s390x_write_elf64_notes(const char *note_name, WriteCoreDumpFunction f, S390CPU *cpu, int id, void *opaque, const NoteFuncDesc *funcs) { Note note; const NoteFuncDesc *nf; int note_size; int ret = -1; assert(strlen(note_name) < sizeof(note.name)); for (nf = funcs; nf->note_contents_func; nf++) { memset(&note, 0, sizeof(note)); note.hdr.n_namesz = cpu_to_be32(strlen(note_name) + 1); note.hdr.n_descsz = cpu_to_be32(nf->contents_size); g_strlcpy(note.name, note_name, sizeof(note.name)); (*nf->note_contents_func)(&note, cpu, id); note_size = sizeof(note) - sizeof(note.contents) + nf->contents_size; ret = f(&note, note_size, opaque); if (ret < 0) { return -1; } } return 0; }
b52fa0ea45bea494a953dd766151d584a28e87e5
https://github.com/qemu/qemu
1not_vulnerable
hw/pci: Have safer pcie_bus_realize() by checking error path While pci_bus_realize() currently does not use the Error* argument, it would be an error to leave pcie_bus_realize() setting bus->flags if pci_bus_realize() had failed. Fix by using a local Error* and return early (propagating the error) if pci_bus_realize() failed. Reported-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20210201153700.618946-1-philmd@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
static void pci_bus_realize(BusState *qbus, Error **errp) { PCIBus *bus = PCI_BUS(qbus); bus->machine_done.notify = pcibus_machine_done; qemu_add_machine_init_done_notifier(&bus->machine_done); vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_pcibus, bus); }
c45b426acd1ad8e30fbe1b9af8c07b2889c28c6b
https://github.com/qemu/qemu
1not_vulnerable
tcg/i386: rdpmc: fix the the condtions Signed-off-by: Zheng Zhan Liang <linuxmaker@163.com> Message-Id: <20210225054756.35962-1-linuxmaker@163.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
void helper_rdtscp(CPUX86State *env) { helper_rdtsc(env); env->regs[R_ECX] = (uint32_t)(env->tsc_aux); }
6585b1627899a3fcaf1cf62bfb659b04371ca9ec
https://github.com/qemu/qemu
1not_vulnerable
char: don't fail when client is not connected This patch checks that ioc is not null before using it in tcp socket tcp_chr_add_watch function. The failure occurs in replay mode of the execution, when monitor and serial port are tcp servers, and there are no clients connected to them: -monitor tcp:127.0.0.1:8081,server,nowait -serial tcp:127.0.0.1:8082,server,nowait Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <161284977034.741841.12565530923825663110.stgit@pasha-ThinkPad-X280> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len) { SocketChardev *s = SOCKET_CHARDEV(chr); struct iovec iov = { .iov_base = buf, .iov_len = len }; int ret; size_t i; int *msgfds = NULL; size_t msgfds_num = 0; if (qio_channel_has_feature(s->ioc, QIO_CHANNEL_FEATURE_FD_PASS)) { ret = qio_channel_readv_full(s->ioc, &iov, 1, &msgfds, &msgfds_num, NULL); } else { ret = qio_channel_readv_full(s->ioc, &iov, 1, NULL, NULL, NULL); } if (ret == QIO_CHANNEL_ERR_BLOCK) { errno = EAGAIN; ret = -1; } else if (ret == -1) { errno = EIO; } if (msgfds_num) { /* close and clean read_msgfds */ for (i = 0; i < s->read_msgfds_num; i++) { close(s->read_msgfds[i]); } if (s->read_msgfds_num) { g_free(s->read_msgfds); } s->read_msgfds = msgfds; s->read_msgfds_num = msgfds_num; } for (i = 0; i < s->read_msgfds_num; i++) { int fd = s->read_msgfds[i]; if (fd < 0) { continue; } /* O_NONBLOCK is preserved across SCM_RIGHTS so reset it */ qemu_set_block(fd); #ifndef MSG_CMSG_CLOEXEC qemu_set_cloexec(fd); #endif } return ret; }
782a78c9e994c2be23467262f50e885a0eb0d9fc
https://github.com/qemu/qemu
1not_vulnerable
scsi-disk: pass guest recoverable errors through even for rerror=stop Right now, recoverable sense values are only passed directly to the guest only for rerror=report. However, when rerror/werror are 'stop' we still don't want the host to be involved on every UNIT ATTENTION (especially considered that the QMP event will not have enough information to act on the report). Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed) { bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); SCSISense sense = SENSE_CODE(NO_SENSE); int error = 0; bool req_has_sense = false; BlockErrorAction action; int status; if (ret < 0) { status = scsi_sense_from_errno(-ret, &sense); error = -ret; } else { /* A passthrough command has completed with nonzero status. */ status = ret; if (status == CHECK_CONDITION) { req_has_sense = true; error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); } else { error = EINVAL; } } /* * Check whether the error has to be handled by the guest or should * rather follow the rerror=/werror= settings. Guest-handled errors * are usually retried immediately, so do not post them to QMP and * do not account them as failed I/O. */ if (req_has_sense && scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { action = BLOCK_ERROR_ACTION_REPORT; acct_failed = false; } else { action = blk_get_error_action(s->qdev.conf.blk, is_read, error); blk_error_action(s->qdev.conf.blk, action, is_read, error); } switch (action) { case BLOCK_ERROR_ACTION_REPORT: if (acct_failed) { block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); } if (req_has_sense) { sdc->update_sense(&r->req); } else if (status == CHECK_CONDITION) { scsi_req_build_sense(&r->req, sense); } scsi_req_complete(&r->req, status); return true; case BLOCK_ERROR_ACTION_IGNORE: return false; case BLOCK_ERROR_ACTION_STOP: scsi_req_retry(&r->req); return true; default: g_assert_not_reached(); } }
424740def9a42da88550410de9a41ef07cc4a010
https://github.com/qemu/qemu
1not_vulnerable
scsi-disk: do not complete requests early for rerror/werror=ignore When requested to ignore errors, just do nothing and let the request complete normally. This means that the request will be accounted correctly. This is what commit 40dce4ee61 ("scsi-disk: fix rerror/werror=ignore", 2018-10-19) was supposed to do: Fixes: 40dce4ee61 ("scsi-disk: fix rerror/werror=ignore", 2018-10-19) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) { bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, is_read, error); if (action == BLOCK_ERROR_ACTION_REPORT) { if (acct_failed) { block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); } switch (error) { case 0: /* A passthrough command has run and has produced sense data; check * whether the error has to be handled by the guest or should rather * pause the host. */ assert(r->status && *r->status); if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { /* These errors are handled by guest. */ sdc->update_sense(&r->req); scsi_req_complete(&r->req, *r->status); return true; } error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); break; #ifdef CONFIG_LINUX /* These errno mapping are specific to Linux. For more information: * - scsi_decide_disposition in drivers/scsi/scsi_error.c * - scsi_result_to_blk_status in drivers/scsi/scsi_lib.c * - blk_errors[] in block/blk-core.c */ case EBADE: /* DID_NEXUS_FAILURE -> BLK_STS_NEXUS. */ scsi_req_complete(&r->req, RESERVATION_CONFLICT); break; case ENODATA: /* DID_MEDIUM_ERROR -> BLK_STS_MEDIUM. */ scsi_check_condition(r, SENSE_CODE(READ_ERROR)); break; case EREMOTEIO: /* DID_TARGET_FAILURE -> BLK_STS_TARGET. */ scsi_req_complete(&r->req, HARDWARE_ERROR); break; #endif case ENOMEDIUM: scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); break; case ENOMEM: scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); break; case EINVAL: scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); break; case ENOSPC: scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); break; default: scsi_check_condition(r, SENSE_CODE(IO_ERROR)); break; } } blk_error_action(s->qdev.conf.blk, action, is_read, error); if (action == BLOCK_ERROR_ACTION_IGNORE) { return false; } if (action == BLOCK_ERROR_ACTION_STOP) { scsi_req_retry(&r->req); } return true; }
33b44fdaba54a99e94b604dafb0d5fcaa8a35261
https://github.com/qemu/qemu
1not_vulnerable
acpi: set fadt.smi_cmd to zero when SMM is not supported >From table 5.9 SMI_CMD of ACPI spec > This field is reserved and must be zero on system > that does not support System Management mode. When smm is not enabled, set it to zero to comform to the spec. When -machine smm=off is passed, the change to FACP is as follows. @@ -1,46 +1,46 @@ /* * Intel ACPI Component Architecture * AML/ASL+ Disassembler version 20180105 (64-bit version) * Copyright (c) 2000 - 2018 Intel Corporation * - * Disassembly of tests/data/acpi/q35/FACP, Fri Feb 5 16:57:04 2021 + * Disassembly of /tmp/aml-1OQYX0, Fri Feb 5 16:57:04 2021 * * ACPI Data Table [FACP] * * Format: [HexOffset DecimalOffset ByteLength] FieldName : FieldValue */ [000h 0000 4] Signature : "FACP" [Fixed ACPI Description Table (FADT)] [004h 0004 4] Table Length : 000000F4 [008h 0008 1] Revision : 03 -[009h 0009 1] Checksum : 1F +[009h 0009 1] Checksum : D6 [00Ah 0010 6] Oem ID : "BOCHS " [010h 0016 8] Oem Table ID : "BXPCFACP" [018h 0024 4] Oem Revision : 00000001 [01Ch 0028 4] Asl Compiler ID : "BXPC" [020h 0032 4] Asl Compiler Revision : 00000001 [024h 0036 4] FACS Address : 00000000 [028h 0040 4] DSDT Address : 00000000 [02Ch 0044 1] Model : 01 [02Dh 0045 1] PM Profile : 00 [Unspecified] [02Eh 0046 2] SCI Interrupt : 0009 -[030h 0048 4] SMI Command Port : 000000B2 -[034h 0052 1] ACPI Enable Value : 02 -[035h 0053 1] ACPI Disable Value : 03 +[030h 0048 4] SMI Command Port : 00000000 +[034h 0052 1] ACPI Enable Value : 00 +[035h 0053 1] ACPI Disable Value : 00 [036h 0054 1] S4BIOS Command : 00 [037h 0055 1] P-State Control : 00 [038h 0056 4] PM1A Event Block Address : 00000600 [03Ch 0060 4] PM1B Event Block Address : 00000000 [040h 0064 4] PM1A Control Block Address : 00000604 [044h 0068 4] PM1B Control Block Address : 00000000 [048h 0072 4] PM2 Control Block Address : 00000000 [04Ch 0076 4] PM Timer Block Address : 00000608 [050h 0080 4] GPE0 Block Address : 00000620 [054h 0084 4] GPE1 Block Address : 00000000 [058h 0088 1] PM1 Event Block Length : 04 [059h 0089 1] PM1 Control Block Length : 02 [05Ah 0090 1] PM2 Control Block Length : 00 [05Bh 0091 1] PM Timer Block Length : 04 [05Ch 0092 1] GPE0 Block Length : 10 [05Dh 0093 1] GPE1 Block Length : 00 Reviewed-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> Message-Id: <09ed791ef77fda2b194100669cbc690865c9eb52.1613615732.git.isaku.yamahata@intel.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
static void init_common_fadt_data(MachineState *ms, Object *o, AcpiFadtData *data) { X86MachineState *x86ms = X86_MACHINE(ms); /* * "ICH9-LPC" or "PIIX4_PM" has "smm-compat" property to keep the old * behavior for compatibility irrelevant to smm_enabled, which doesn't * comforms to ACPI spec. */ bool smm_enabled = object_property_get_bool(o, "smm-compat", NULL) ? true : x86_machine_is_smm_enabled(x86ms); uint32_t io = object_property_get_uint(o, ACPI_PM_PROP_PM_IO_BASE, NULL); AmlAddressSpace as = AML_AS_SYSTEM_IO; AcpiFadtData fadt = { .rev = 3, .flags = (1 << ACPI_FADT_F_WBINVD) | (1 << ACPI_FADT_F_PROC_C1) | (1 << ACPI_FADT_F_SLP_BUTTON) | (1 << ACPI_FADT_F_RTC_S4) | (1 << ACPI_FADT_F_USE_PLATFORM_CLOCK) | /* APIC destination mode ("Flat Logical") has an upper limit of 8 * CPUs for more than 8 CPUs, "Clustered Logical" mode has to be * used */ ((ms->smp.max_cpus > 8) ? (1 << ACPI_FADT_F_FORCE_APIC_CLUSTER_MODEL) : 0), .int_model = 1 /* Multiple APIC */, .rtc_century = RTC_CENTURY, .plvl2_lat = 0xfff /* C2 state not supported */, .plvl3_lat = 0xfff /* C3 state not supported */, .smi_cmd = smm_enabled ? ACPI_PORT_SMI_CMD : 0, .sci_int = object_property_get_uint(o, ACPI_PM_PROP_SCI_INT, NULL), .acpi_enable_cmd = smm_enabled ? object_property_get_uint(o, ACPI_PM_PROP_ACPI_ENABLE_CMD, NULL) : 0, .acpi_disable_cmd = smm_enabled ? object_property_get_uint(o, ACPI_PM_PROP_ACPI_DISABLE_CMD, NULL) : 0, .pm1a_evt = { .space_id = as, .bit_width = 4 * 8, .address = io }, .pm1a_cnt = { .space_id = as, .bit_width = 2 * 8, .address = io + 0x04 }, .pm_tmr = { .space_id = as, .bit_width = 4 * 8, .address = io + 0x08 }, .gpe0_blk = { .space_id = as, .bit_width = object_property_get_uint(o, ACPI_PM_PROP_GPE0_BLK_LEN, NULL) * 8, .address = object_property_get_uint(o, ACPI_PM_PROP_GPE0_BLK, NULL) }, }; *data = fadt; }
b48088d60e8466eea2cc517dedb7ee0d97b7feab
https://github.com/qemu/qemu
1not_vulnerable
acpi/gpex: Fix cca attribute check for pxb device When check DMA support for device attached to pxb, the cache coherency attribute need to be set. This add _CCA attribute for pxb DSDT. Fixes: 6f9765fbad ("acpi/gpex: Build tables for pxb") Signed-off-by: Jiahui Cen <cenjiahui@huawei.com> Signed-off-by: Xingang Wang <wangxingang5@huawei.com> Message-Id: <1612490205-48788-3-git-send-email-wangxingang5@huawei.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Igor Mammedov <imammedo@redhat.com>
void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) { int nr_pcie_buses = cfg->ecam.size / PCIE_MMCFG_SIZE_MIN; Aml *method, *crs, *dev, *rbuf; PCIBus *bus = cfg->bus; CrsRangeSet crs_range_set; CrsRangeEntry *entry; int i; /* start to construct the tables for pxb */ crs_range_set_init(&crs_range_set); if (bus) { QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); if (!pci_bus_is_root(bus)) { continue; } /* * 0 - (nr_pcie_buses - 1) is the bus range for the main * host-bridge and it equals the MIN of the * busNr defined for pxb-pcie. */ if (bus_num < nr_pcie_buses) { nr_pcie_buses = bus_num; } dev = aml_device("PC%.02X", bus_num); aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08"))); aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03"))); aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); aml_append(dev, aml_name_decl("_STR", aml_unicode("pxb Device"))); aml_append(dev, aml_name_decl("_CCA", aml_int(1))); if (numa_node != NUMA_NODE_UNASSIGNED) { aml_append(dev, aml_name_decl("_PXM", aml_int(numa_node))); } acpi_dsdt_add_pci_route_table(dev, cfg->irq); /* * Resources defined for PXBs are composed by the folling parts: * 1. The resources the pci-brige/pcie-root-port need. * 2. The resources the devices behind pxb need. */ crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), &crs_range_set, cfg->pio.base, 0, 0, 0); aml_append(dev, aml_name_decl("_CRS", crs)); acpi_dsdt_add_pci_osc(dev); aml_append(scope, dev); } } /* tables for the main */ dev = aml_device("%s", "PCI0"); aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08"))); aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03"))); aml_append(dev, aml_name_decl("_SEG", aml_int(0))); aml_append(dev, aml_name_decl("_BBN", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(0))); aml_append(dev, aml_name_decl("_STR", aml_unicode("PCIe 0 Device"))); aml_append(dev, aml_name_decl("_CCA", aml_int(1))); acpi_dsdt_add_pci_route_table(dev, cfg->irq); method = aml_method("_CBA", 0, AML_NOTSERIALIZED); aml_append(method, aml_return(aml_int(cfg->ecam.base))); aml_append(dev, method); /* * At this point crs_range_set has all the ranges used by pci * busses *other* than PCI0. These ranges will be excluded from * the PCI0._CRS. */ rbuf = aml_resource_template(); aml_append(rbuf, aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, 0x0000, 0x0000, nr_pcie_buses - 1, 0x0000, nr_pcie_buses)); if (cfg->mmio32.size) { crs_replace_with_free_ranges(crs_range_set.mem_ranges, cfg->mmio32.base, cfg->mmio32.base + cfg->mmio32.size - 1); for (i = 0; i < crs_range_set.mem_ranges->len; i++) { entry = g_ptr_array_index(crs_range_set.mem_ranges, i); aml_append(rbuf, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, entry->base, entry->limit, 0x0000, entry->limit - entry->base + 1)); } } if (cfg->pio.size) { crs_replace_with_free_ranges(crs_range_set.io_ranges, 0x0000, cfg->pio.size - 1); for (i = 0; i < crs_range_set.io_ranges->len; i++) { entry = g_ptr_array_index(crs_range_set.io_ranges, i); aml_append(rbuf, aml_dword_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, entry->base, entry->limit, cfg->pio.base, entry->limit - entry->base + 1)); } } if (cfg->mmio64.size) { crs_replace_with_free_ranges(crs_range_set.mem_64bit_ranges, cfg->mmio64.base, cfg->mmio64.base + cfg->mmio64.size - 1); for (i = 0; i < crs_range_set.mem_64bit_ranges->len; i++) { entry = g_ptr_array_index(crs_range_set.mem_64bit_ranges, i); aml_append(rbuf, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, entry->base, entry->limit, 0x0000, entry->limit - entry->base + 1)); } } aml_append(dev, aml_name_decl("_CRS", rbuf)); acpi_dsdt_add_pci_osc(dev); Aml *dev_res0 = aml_device("%s", "RES0"); aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02"))); crs = aml_resource_template(); aml_append(crs, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, cfg->ecam.base, cfg->ecam.base + cfg->ecam.size - 1, 0x0000, cfg->ecam.size)); aml_append(dev_res0, aml_name_decl("_CRS", crs)); aml_append(dev, dev_res0); aml_append(scope, dev); crs_range_set_free(&crs_range_set); }
97ca9c5920362d5b7a9f96d4fa758e9f2ccb3301
https://github.com/qemu/qemu
1not_vulnerable
failover: really display a warning when the primary device is not found In failover_add_primary(), we search the id of the failover device by scanning the list of the devices in the opts list to find a device with a failover_pair_id equals to the id of the virtio-net device. If the failover_pair_id is not found, QEMU ignores the primary device silently (which also means it will not be hidden and it will be enabled directly at boot). After that, we search the id in the opts list to do a qdev_device_add() with it. The device will be always found as otherwise we had exited before, and thus the warning is never displayed. Fix that by moving the error report to the first exit condition. Also add a g_assert() to be sure the compiler will not complain about a possibly NULL pointer. Signed-off-by: Laurent Vivier <lvivier@redhat.com> Message-Id: <20210212135250.2738750-4-lvivier@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
static void failover_add_primary(VirtIONet *n, Error **errp) { Error *err = NULL; QemuOpts *opts; char *id; DeviceState *dev = failover_find_primary_device(n); if (dev) { return; } id = failover_find_primary_device_id(n); if (!id) { error_setg(errp, "Primary device not found"); error_append_hint(errp, "Virtio-net failover will not work. Make " "sure primary device has parameter" " failover_pair_id=%s\n", n->netclient_name); return; } opts = qemu_opts_find(qemu_find_opts("device"), id); g_assert(opts); /* cannot be NULL because id was found using opts list */ dev = qdev_device_add(opts, &err); if (err) { qemu_opts_del(opts); } else { object_unref(OBJECT(dev)); } error_propagate(errp, err); }
00e7b1299599384dfdda2a2a4570a0fb2d69eb6b
https://github.com/qemu/qemu
1not_vulnerable
virtio-net: add missing object_unref() failover_add_primary() calls qdev_device_add() and doesn't unref the device. Because of that, when the device is unplugged a reference is remaining and prevents the cleanup of the object. This prevents to be able to plugin back the failover primary device, with errors like: (qemu) device_add vfio-pci,host=0000:41:00.0,id=hostdev0,bus=root.3,failover_pair_id=net0 (qemu) device_del hostdev0 We can check with "info qtree" and "info pci" that the device has been removed, and then: (qemu) device_add vfio-pci,host=0000:41:00.0,id=hostdev1,bus=root.3,failover_pair_id=net0 Error: vfio 0000:41:00.0: device is already attached (qemu) device_add vfio-pci,host=0000:41:00.0,id=hostdev0,bus=root.3,failover_pair_id=net0 qemu-kvm: Duplicate ID 'hostdev0' for device Fixes: 21e8709b29cd ("failover: Remove primary_dev member") Cc: quintela@redhat.com Signed-off-by: Laurent Vivier <lvivier@redhat.com> Message-Id: <20210212135250.2738750-3-lvivier@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Jens Freimann <jfreimann@redhat.com>
static void failover_add_primary(VirtIONet *n, Error **errp) { Error *err = NULL; QemuOpts *opts; char *id; DeviceState *dev = failover_find_primary_device(n); if (dev) { return; } id = failover_find_primary_device_id(n); if (!id) { return; } opts = qemu_opts_find(qemu_find_opts("device"), id); if (opts) { dev = qdev_device_add(opts, &err); if (err) { qemu_opts_del(opts); } else { object_unref(OBJECT(dev)); } } else { error_setg(errp, "Primary device not found"); error_append_hint(errp, "Virtio-net failover will not work. Make " "sure primary device has parameter" " failover_pair_id=<virtio-net-id>\n"); } error_propagate(errp, err); }
cc2b4550115baf77d556341f17eb464d18953cee
https://github.com/qemu/qemu
1not_vulnerable
vt82c686: Fix superio_cfg_{read,write}() functions These functions are memory region callbacks so we have to check against relative address not the mapped address. Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu> Message-Id: <15b2968fd300a12d06b42368d084f6f80d3c3be5.1610223397.git.balaton@eik.bme.hu> [PMD: Split original patch in 5, this is part 5/5] Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
static void superio_cfg_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { SuperIOConfig *sc = opaque; uint8_t idx = sc->regs[0]; if (addr == 0) { /* config index register */ sc->regs[0] = data; return; } /* config data register */ trace_via_superio_write(idx, data); switch (idx) { case 0x00 ... 0xdf: case 0xe4: case 0xe5: case 0xe9 ... 0xed: case 0xf3: case 0xf5: case 0xf7: case 0xf9 ... 0xfb: case 0xfd ... 0xff: /* ignore write to read only registers */ return; /* case 0xe6 ... 0xe8: Should set base port of parallel and serial */ default: qemu_log_mask(LOG_UNIMP, "via_superio_cfg: unimplemented register 0x%x\n", idx); break; } sc->regs[idx] = data; }
2473dc4022458dcc05ec367ce97edbef29d7e50c
https://github.com/qemu/qemu
1not_vulnerable
hw/sd: sd: Skip write protect groups check in sd_erase() for high capacity cards High capacity cards don't support write protection hence we should not perform the write protect groups check in sd_erase() for them. Signed-off-by: Bin Meng <bin.meng@windriver.com> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-Id: <20210216150225.27996-6-bmeng.cn@gmail.com> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
static void sd_erase(SDState *sd) { int i; uint64_t erase_start = sd->erase_start; uint64_t erase_end = sd->erase_end; bool sdsc = true; trace_sdcard_erase(sd->erase_start, sd->erase_end); if (sd->erase_start == INVALID_ADDRESS || sd->erase_end == INVALID_ADDRESS) { sd->card_status |= ERASE_SEQ_ERROR; sd->erase_start = INVALID_ADDRESS; sd->erase_end = INVALID_ADDRESS; return; } if (FIELD_EX32(sd->ocr, OCR, CARD_CAPACITY)) { /* High capacity memory card: erase units are 512 byte blocks */ erase_start *= 512; erase_end *= 512; sdsc = false; } if (erase_start > sd->size || erase_end > sd->size) { sd->card_status |= OUT_OF_RANGE; sd->erase_start = INVALID_ADDRESS; sd->erase_end = INVALID_ADDRESS; return; } sd->erase_start = INVALID_ADDRESS; sd->erase_end = INVALID_ADDRESS; sd->csd[14] |= 0x40; /* Only SDSC cards support write protect groups */ if (sdsc) { erase_start = sd_addr_to_wpnum(erase_start); erase_end = sd_addr_to_wpnum(erase_end); for (i = erase_start; i <= erase_end; i++) { assert(i < sd->wpgrps_size); if (test_bit(i, sd->wp_groups)) { sd->card_status |= WP_ERASE_SKIP; } } } }
64ea2d9f74a824269f2f564632bda52d60ae9243
https://github.com/qemu/qemu
1not_vulnerable
hw/sd: sd: Fix address check in sd_erase() For high capacity memory cards, the erase start address and end address are multiplied by 512, but the address check is still based on the original block number in sd->erase_{start, end}. Fixes: 1bd6fd8ed593 ("hw/sd/sdcard: Do not attempt to erase out of range addresses") Signed-off-by: Bin Meng <bin.meng@windriver.com> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-Id: <20210216150225.27996-2-bmeng.cn@gmail.com> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
static void sd_erase(SDState *sd) { int i; uint64_t erase_start = sd->erase_start; uint64_t erase_end = sd->erase_end; trace_sdcard_erase(sd->erase_start, sd->erase_end); if (sd->erase_start == INVALID_ADDRESS || sd->erase_end == INVALID_ADDRESS) { sd->card_status |= ERASE_SEQ_ERROR; sd->erase_start = INVALID_ADDRESS; sd->erase_end = INVALID_ADDRESS; return; } if (FIELD_EX32(sd->ocr, OCR, CARD_CAPACITY)) { /* High capacity memory card: erase units are 512 byte blocks */ erase_start *= 512; erase_end *= 512; } if (erase_start > sd->size || erase_end > sd->size) { sd->card_status |= OUT_OF_RANGE; sd->erase_start = INVALID_ADDRESS; sd->erase_end = INVALID_ADDRESS; return; } erase_start = sd_addr_to_wpnum(erase_start); erase_end = sd_addr_to_wpnum(erase_end); sd->erase_start = INVALID_ADDRESS; sd->erase_end = INVALID_ADDRESS; sd->csd[14] |= 0x40; for (i = erase_start; i <= erase_end; i++) { assert(i < sd->wpgrps_size); if (test_bit(i, sd->wp_groups)) { sd->card_status |= WP_ERASE_SKIP; } } }
edd4a85dd77369eb9e64c2755dad212f406dda43
https://github.com/qemu/qemu
1not_vulnerable
contrib: space required after that ',' I am reading contrib related code and found some style problems while check the code using checkpatch.pl. This commit fixs the issue below: ERROR: space required after that ',' Signed-off-by: zhouyang <zhouyang789@huawei.com> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20210118031004.1662363-5-zhouyang789@huawei.com> Message-Id: <20210213130325.14781-8-alex.bennee@linaro.org>
static void plugin_exit(qemu_plugin_id_t id, void *p) { g_autoptr(GString) report = g_string_new("Instruction Classes:\n"); int i; GList *counts; InsnClassExecCount *class = NULL; for (i = 0; i < class_table_sz; i++) { class = &class_table[i]; switch (class->what) { case COUNT_CLASS: if (class->count || verbose) { g_string_append_printf(report, "Class: %-24s\t(%ld hits)\n", class->class, class->count); } break; case COUNT_INDIVIDUAL: g_string_append_printf(report, "Class: %-24s\tcounted individually\n", class->class); break; case COUNT_NONE: g_string_append_printf(report, "Class: %-24s\tnot counted\n", class->class); break; default: break; } } counts = g_hash_table_get_values(insns); if (counts && g_list_next(counts)) { g_string_append_printf(report, "Individual Instructions:\n"); counts = g_list_sort(counts, cmp_exec_count); for (i = 0; i < limit && g_list_next(counts); i++, counts = g_list_next(counts)) { InsnExecCount *rec = (InsnExecCount *) counts->data; g_string_append_printf(report, "Instr: %-24s\t(%ld hits)\t(op=0x%08x/%s)\n", rec->insn, rec->count, rec->opcode, rec->class ? rec->class->class : "un-categorised"); } g_list_free(counts); } g_hash_table_destroy(insns); qemu_plugin_outs(report->str); }
82e2756897810b6e17e0c352101878b97b1e2688
https://github.com/qemu/qemu
1not_vulnerable
event_notifier: Set ->initialized earlier in event_notifier_init() Otherwise the call to event_notifier_set() is a nop, which causes the SLOF firmware on POWER to hang when booting from a virtio-scsi device: virtio_scsi_dataplane_start() virtio_scsi_vring_init() virtio_bus_set_host_notifier() <- assign == true event_notifier_init() <- active == 1 event_notifier_set() <- fails right away if !e->initialized Fixes: e34e47eb28c0 ("event_notifier: handle initialization failure better") Cc: mlevitsk@redhat.com Signed-off-by: Greg Kurz <groug@kaod.org> Message-Id: <20210216120247.1293569-1-groug@kaod.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
int event_notifier_init(EventNotifier *e, int active) { int fds[2]; int ret; #ifdef CONFIG_EVENTFD ret = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); #else ret = -1; errno = ENOSYS; #endif if (ret >= 0) { e->rfd = e->wfd = ret; } else { if (errno != ENOSYS) { return -errno; } if (qemu_pipe(fds) < 0) { return -errno; } ret = fcntl_setfl(fds[0], O_NONBLOCK); if (ret < 0) { ret = -errno; goto fail; } ret = fcntl_setfl(fds[1], O_NONBLOCK); if (ret < 0) { ret = -errno; goto fail; } e->rfd = fds[0]; e->wfd = fds[1]; } e->initialized = true; if (active) { event_notifier_set(e); } return 0; fail: close(fds[0]); close(fds[1]); return ret; }
118f2aadbc66aaae4e8d52259288e18f2aa4544a
https://github.com/qemu/qemu
1not_vulnerable
hvf: Guard xgetbv call This prevents illegal instruction on cpus that do not support xgetbv. Buglink: https://bugs.launchpad.net/qemu/+bug/1758819 Reviewed-by: Cameron Esfahani <dirty@apple.com> Signed-off-by: Hill Ma <maahiuzeon@gmail.com> Message-Id: <X/6OJ7qk0W6bHkHQ@Hills-Mac-Pro.local> Signed-off-by: Roman Bolshakov <r.bolshakov@yadro.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, int reg) { uint64_t cap; uint32_t eax, ebx, ecx, edx; host_cpuid(func, idx, &eax, &ebx, &ecx, &edx); switch (func) { case 0: eax = eax < (uint32_t)0xd ? eax : (uint32_t)0xd; break; case 1: edx &= CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS; ecx &= CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 | CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_MOVBE | CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND; ecx |= CPUID_EXT_HYPERVISOR; break; case 6: eax = CPUID_6_EAX_ARAT; ebx = 0; ecx = 0; edx = 0; break; case 7: if (idx == 0) { ebx &= CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA | CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512PF | CPUID_7_0_EBX_AVX512ER | CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_INVPCID; hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap); if (!(cap & CPU_BASED2_INVPCID)) { ebx &= ~CPUID_7_0_EBX_INVPCID; } ecx &= CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_AVX512_VPOPCNTDQ; edx &= CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS; } else { ebx = 0; ecx = 0; edx = 0; } eax = 0; break; case 0xD: if (idx == 0) { uint64_t host_xcr0; if (xgetbv(ecx, 0, &host_xcr0)) { uint64_t supp_xcr0 = host_xcr0 & (XSTATE_FP_MASK | XSTATE_SSE_MASK | XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK); eax &= supp_xcr0; } } else if (idx == 1) { hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap); eax &= CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1; if (!(cap & CPU_BASED2_XSAVES_XRSTORS)) { eax &= ~CPUID_XSAVE_XSAVES; } } break; case 0x80000001: /* LM only if HVF in 64-bit mode */ edx &= CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_EXT2_SYSCALL | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | CPUID_PSE36 | CPUID_EXT2_MMXEXT | CPUID_MMX | CPUID_FXSR | CPUID_EXT2_FXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX; hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap); if (!(cap & CPU_BASED2_RDTSCP)) { edx &= ~CPUID_EXT2_RDTSCP; } hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &cap); if (!(cap & CPU_BASED_TSC_OFFSET)) { edx &= ~CPUID_EXT2_RDTSCP; } ecx &= CPUID_EXT3_LAHF_LM | CPUID_EXT3_CMP_LEG | CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_OSVW | CPUID_EXT3_XOP | CPUID_EXT3_FMA4 | CPUID_EXT3_TBM; break; default: return 0; } switch (reg) { case R_EAX: return eax; case R_EBX: return ebx; case R_ECX: return ecx; case R_EDX: return edx; default: return 0; } }
342e3a4f20653c2d419cc0e8fdc0b99dfea32fed
https://github.com/qemu/qemu
1not_vulnerable
util/cutils: Skip "." when looking for next directory component When looking for the next directory component, a "." component is now skipped. This fixes the path(s) used for firmware lookup for the prefix == bindir case which is standard for QEMU on Windows and where the internally used bindir value ends with "/.". Signed-off-by: Stefan Weil <sw@weilnetz.de> Message-Id: <20210208205752.2488774-1-sw@weilnetz.de> Cc: qemu-stable@nongnu.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static inline bool starts_with_prefix(const char *dir) { size_t prefix_len = strlen(CONFIG_PREFIX); return !memcmp(dir, CONFIG_PREFIX, prefix_len) && (!dir[prefix_len] || G_IS_DIR_SEPARATOR(dir[prefix_len])); }
61dbe03787f2f8bdd61da99ea19fd80b0d5c2bfa
https://github.com/qemu/qemu
1not_vulnerable
linux-user/aarch64: Signal SEGV_MTESERR for sync tag check fault Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210212184902.1251044-28-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
void cpu_loop(CPUARMState *env) { CPUState *cs = env_cpu(env); int trapnr, ec, fsc; abi_long ret; target_siginfo_t info; for (;;) { cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); process_queued_cpu_work(cs); switch (trapnr) { case EXCP_SWI: ret = do_syscall(env, env->xregs[8], env->xregs[0], env->xregs[1], env->xregs[2], env->xregs[3], env->xregs[4], env->xregs[5], 0, 0); if (ret == -TARGET_ERESTARTSYS) { env->pc -= 4; } else if (ret != -TARGET_QEMU_ESIGRETURN) { env->xregs[0] = ret; } break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_UDEF: info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLOPN; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info._sifields._sigfault._addr = env->exception.vaddress; /* We should only arrive here with EC in {DATAABORT, INSNABORT}. */ ec = syn_get_ec(env->exception.syndrome); assert(ec == EC_DATAABORT || ec == EC_INSNABORT); /* Both EC have the same format for FSC, or close enough. */ fsc = extract32(env->exception.syndrome, 0, 6); switch (fsc) { case 0x04 ... 0x07: /* Translation fault, level {0-3} */ info.si_code = TARGET_SEGV_MAPERR; break; case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */ case 0x0d ... 0x0f: /* Permission fault, level {1-3} */ info.si_code = TARGET_SEGV_ACCERR; break; case 0x11: /* Synchronous Tag Check Fault */ info.si_code = TARGET_SEGV_MTESERR; break; default: g_assert_not_reached(); } queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_DEBUG: case EXCP_BKPT: info.si_signo = TARGET_SIGTRAP; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_SEMIHOST: env->xregs[0] = do_common_semihosting(cs); env->pc += 4; break; case EXCP_YIELD: /* nothing to do here for user-mode, just resume guest code */ break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; default: EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); abort(); } process_pending_signals(env); /* Exception return on AArch64 always clears the exclusive monitor, * so any return to running guest code implies this. */ env->exclusive_addr = -1; } }
7f22201ac85186dd476d5ff8da278ab603df72b6
https://github.com/qemu/qemu
1not_vulnerable
xen-block: fix reporting of discard feature Linux blkfront expects both "discard-granularity" and "discard-alignment" present on xenbus in order to properly enable the feature, not exposing "discard-alignment" left some Linux blkfront versions with a broken discard setup. This has also been addressed in Linux with: https://lore.kernel.org/lkml/20210118151528.81668-1-roger.pau@citrix.com/T/#u Fix QEMU to report a "discard-alignment" of 0, in order for it to work with older Linux frontends. Reported-by: Arthur Borsboom <arthurborsboom@gmail.com> Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Message-Id: <20210118153330.82324-1-roger.pau@citrix.com> Reviewed-by: Paul Durrant <paul@xen.org> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
static void xen_block_realize(XenDevice *xendev, Error **errp) { ERRP_GUARD(); XenBlockDevice *blockdev = XEN_BLOCK_DEVICE(xendev); XenBlockDeviceClass *blockdev_class = XEN_BLOCK_DEVICE_GET_CLASS(xendev); const char *type = object_get_typename(OBJECT(blockdev)); XenBlockVdev *vdev = &blockdev->props.vdev; BlockConf *conf = &blockdev->props.conf; BlockBackend *blk = conf->blk; if (vdev->type == XEN_BLOCK_VDEV_TYPE_INVALID) { error_setg(errp, "vdev property not set"); return; } trace_xen_block_realize(type, vdev->disk, vdev->partition); if (blockdev_class->realize) { blockdev_class->realize(blockdev, errp); if (*errp) { return; } } /* * The blkif protocol does not deal with removable media, so it must * always be present, even for CDRom devices. */ assert(blk); if (!blk_is_inserted(blk)) { error_setg(errp, "device needs media, but drive is empty"); return; } if (!blkconf_apply_backend_options(conf, blockdev->info & VDISK_READONLY, true, errp)) { return; } if (!(blockdev->info & VDISK_CDROM) && !blkconf_geometry(conf, NULL, 65535, 255, 255, errp)) { return; } if (!blkconf_blocksizes(conf, errp)) { return; } blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev); blk_set_guest_block_size(blk, conf->logical_block_size); if (conf->discard_granularity == -1) { conf->discard_granularity = conf->physical_block_size; } if (blk_get_flags(blk) & BDRV_O_UNMAP) { xen_device_backend_printf(xendev, "feature-discard", "%u", 1); xen_device_backend_printf(xendev, "discard-granularity", "%u", conf->discard_granularity); xen_device_backend_printf(xendev, "discard-alignment", "%u", 0); } xen_device_backend_printf(xendev, "feature-flush-cache", "%u", 1); xen_device_backend_printf(xendev, "max-ring-page-order", "%u", blockdev->props.max_ring_page_order); xen_device_backend_printf(xendev, "info", "%u", blockdev->info); xen_device_frontend_printf(xendev, "virtual-device", "%lu", vdev->number); xen_device_frontend_printf(xendev, "device-type", "%s", blockdev->device_type); xen_device_backend_printf(xendev, "sector-size", "%u", conf->logical_block_size); xen_block_set_size(blockdev); blockdev->dataplane = xen_block_dataplane_create(xendev, blk, conf->logical_block_size, blockdev->props.iothread); }
076d467aacdf6dc5d01e2e61740b1795f2aec2f6
https://github.com/qemu/qemu
1not_vulnerable
blockjob: Fix crash with IOthread when block commit after snapshot Currently, if guest has workloads, IO thread will acquire aio_context lock before do io_submit, it leads to segmentfault when do block commit after snapshot. Just like below: Program received signal SIGSEGV, Segmentation fault. [Switching to Thread 0x7f7c7d91f700 (LWP 99907)] 0x00005576d0f65aab in bdrv_mirror_top_pwritev at ../block/mirror.c:1437 1437 ../block/mirror.c: No such file or directory. (gdb) p s->job $17 = (MirrorBlockJob *) 0x0 (gdb) p s->stop $18 = false Call trace of IO thread: 0 0x00005576d0f65aab in bdrv_mirror_top_pwritev at ../block/mirror.c:1437 1 0x00005576d0f7f3ab in bdrv_driver_pwritev at ../block/io.c:1174 2 0x00005576d0f8139d in bdrv_aligned_pwritev at ../block/io.c:1988 3 0x00005576d0f81b65 in bdrv_co_pwritev_part at ../block/io.c:2156 4 0x00005576d0f8e6b7 in blk_do_pwritev_part at ../block/block-backend.c:1260 5 0x00005576d0f8e84d in blk_aio_write_entry at ../block/block-backend.c:1476 ... Switch to qemu main thread: 0 0x00007f903be704ed in __lll_lock_wait at /lib/../lib64/libpthread.so.0 1 0x00007f903be6bde6 in _L_lock_941 at /lib/../lib64/libpthread.so.0 2 0x00007f903be6bcdf in pthread_mutex_lock at /lib/../lib64/libpthread.so.0 3 0x0000564b21456889 in qemu_mutex_lock_impl at ../util/qemu-thread-posix.c:79 4 0x0000564b213af8a5 in block_job_add_bdrv at ../blockjob.c:224 5 0x0000564b213b00ad in block_job_create at ../blockjob.c:440 6 0x0000564b21357c0a in mirror_start_job at ../block/mirror.c:1622 7 0x0000564b2135a9af in commit_active_start at ../block/mirror.c:1867 8 0x0000564b2133d132 in qmp_block_commit at ../blockdev.c:2768 9 0x0000564b2141fef3 in qmp_marshal_block_commit at qapi/qapi-commands-block-core.c:346 10 0x0000564b214503c9 in do_qmp_dispatch_bh at ../qapi/qmp-dispatch.c:110 11 0x0000564b21451996 in aio_bh_poll at ../util/async.c:164 12 0x0000564b2146018e in aio_dispatch at ../util/aio-posix.c:381 13 0x0000564b2145187e in aio_ctx_dispatch at ../util/async.c:306 14 0x00007f9040239049 in g_main_context_dispatch at /lib/../lib64/libglib-2.0.so.0 15 0x0000564b21447368 in main_loop_wait at ../util/main-loop.c:232 16 0x0000564b21447368 in main_loop_wait at ../util/main-loop.c:255 17 0x0000564b21447368 in main_loop_wait at ../util/main-loop.c:531 18 0x0000564b212304e1 in qemu_main_loop at ../softmmu/runstate.c:721 19 0x0000564b20f7975e in main at ../softmmu/main.c:50 In IO thread when do bdrv_mirror_top_pwritev, the job is NULL, and stop field is false, this means the MirrorBDSOpaque "s" object has not been initialized yet, and this object is initialized by block_job_create(), but the initialize process is stuck in acquiring the lock. In this situation, IO thread come to bdrv_mirror_top_pwritev(),which means that mirror-top node is already inserted into block graph, but its bs->opaque->job is not initialized. The root cause is that qemu main thread do release/acquire when hold the lock, at the same time, IO thread get the lock after release stage, and the crash occured. Actually, in this situation, job->job.aio_context will not equal to qemu_get_aio_context(), and will be the same as bs->aio_context, thus, no need to release the lock, becasue bdrv_root_attach_child() will not change the context. This patch fix this issue. Fixes: 132ada80 "block: Adjust AioContexts when attaching nodes" Signed-off-by: Michael Qiu <qiudayu@huayun.com> Message-Id: <20210203024059.52683-1-08005325@163.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, Error **errp) { BdrvChild *c; bool need_context_ops; bdrv_ref(bs); need_context_ops = bdrv_get_aio_context(bs) != job->job.aio_context; if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) { aio_context_release(job->job.aio_context); } c = bdrv_root_attach_child(bs, name, &child_job, 0, job->job.aio_context, perm, shared_perm, job, errp); if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) { aio_context_acquire(job->job.aio_context); } if (c == NULL) { return -EPERM; } job->nodes = g_slist_prepend(job->nodes, c); bdrv_op_block_all(bs, job->blocker); return 0; }
4df7b7fac84ba570bb33970659296555896232b6
https://github.com/qemu/qemu
1not_vulnerable
linux-user/syscall: Fix do_ioctl_ifconf() for 64 bit targets. The sizeof(struct ifreq) is 40 for 64 bit and 32 for 32 bit architectures. This structure contains a union of other structures, of which struct ifmap is the biggest for 64 bit architectures. Calling ioclt(…, SIOCGIFCONF, …) fills a struct sockaddr of that union, and do_ioctl_ifconf() only considered that struct sockaddr for the size of the union, which has the same size as struct ifmap on 32 bit architectures. So do_ioctl_ifconf() assumed a wrong size of 32 for struct ifreq instead of the correct size of 40 on 64 bit architectures. The fix makes do_ioctl_ifconf() handle struct ifmap as the biggest part of the union, treating struct ifreq with the correct size. Signed-off-by: Stefan <stefan-guix@vodafonemail.de> Message-Id: <60AA0765-53DD-43D1-A3D2-75F1778526F6@vodafonemail.de> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, int cmd, abi_long arg) { const argtype *arg_type = ie->arg_type; int target_size; void *argptr; int ret; struct ifconf *host_ifconf; uint32_t outbufsz; const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) }; int target_ifreq_size; int nb_ifreq; int free_buf = 0; int i; int target_ifc_len; abi_long target_ifc_buf; int host_ifc_len; char *host_ifc_buf; assert(arg_type[0] == TYPE_PTR); assert(ie->access == IOC_RW); arg_type++; target_size = thunk_type_size(arg_type, 0); argptr = lock_user(VERIFY_READ, arg, target_size, 1); if (!argptr) return -TARGET_EFAULT; thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); unlock_user(argptr, arg, 0); host_ifconf = (struct ifconf *)(unsigned long)buf_temp; target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; target_ifreq_size = thunk_type_size(ifreq_max_type, 0); if (target_ifc_buf != 0) { target_ifc_len = host_ifconf->ifc_len; nb_ifreq = target_ifc_len / target_ifreq_size; host_ifc_len = nb_ifreq * sizeof(struct ifreq); outbufsz = sizeof(*host_ifconf) + host_ifc_len; if (outbufsz > MAX_STRUCT_SIZE) { /* * We can't fit all the extents into the fixed size buffer. * Allocate one that is large enough and use it instead. */ host_ifconf = malloc(outbufsz); if (!host_ifconf) { return -TARGET_ENOMEM; } memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); free_buf = 1; } host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf); host_ifconf->ifc_len = host_ifc_len; } else { host_ifc_buf = NULL; } host_ifconf->ifc_buf = host_ifc_buf; ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); if (!is_error(ret)) { /* convert host ifc_len to target ifc_len */ nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); target_ifc_len = nb_ifreq * target_ifreq_size; host_ifconf->ifc_len = target_ifc_len; /* restore target ifc_buf */ host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; /* copy struct ifconf to target user */ argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); if (!argptr) return -TARGET_EFAULT; thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); unlock_user(argptr, arg, target_size); if (target_ifc_buf != 0) { /* copy ifreq[] to target user */ argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); for (i = 0; i < nb_ifreq ; i++) { thunk_convert(argptr + i * target_ifreq_size, host_ifc_buf + i * sizeof(struct ifreq), ifreq_arg_type, THUNK_TARGET); } unlock_user(argptr, target_ifc_buf, target_ifc_len); } } if (free_buf) { free(host_ifconf); } return ret; }
ccc5ccc17f8cfbfd87d9aede5d12a2d47c56e712
https://github.com/qemu/qemu
1not_vulnerable
linux-user/mmap: Avoid asserts for out of range mremap calls If mremap() is called without the MREMAP_MAYMOVE flag with a start address just before the end of memory (reserved_va) where new_size would exceed it (and GUEST_ADDR_MAX), the assert(end - 1 <= GUEST_ADDR_MAX) in  page_set_flags() would trigger. Add an extra guard to the guest_range_valid() checks to prevent this and avoid asserting binaries when reserved_va is set. This meant a bug I was seeing locally now gives the same behaviour  regardless of whether reserved_va is set or not. Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org Reviewed-by: Laurent Vivier <laurent@vivier.eu> Message-Id: <70c46e7b999bafbb01d54bfafd44b420d0b782e9.camel@linuxfoundation.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, abi_ulong new_size, unsigned long flags, abi_ulong new_addr) { int prot; void *host_addr; if (!guest_range_valid(old_addr, old_size) || ((flags & MREMAP_FIXED) && !guest_range_valid(new_addr, new_size)) || ((flags & MREMAP_MAYMOVE) == 0 && !guest_range_valid(old_addr, new_size))) { errno = ENOMEM; return -1; } mmap_lock(); if (flags & MREMAP_FIXED) { host_addr = mremap(g2h(old_addr), old_size, new_size, flags, g2h(new_addr)); if (reserved_va && host_addr != MAP_FAILED) { /* If new and old addresses overlap then the above mremap will already have failed with EINVAL. */ mmap_reserve(old_addr, old_size); } } else if (flags & MREMAP_MAYMOVE) { abi_ulong mmap_start; mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE); if (mmap_start == -1) { errno = ENOMEM; host_addr = MAP_FAILED; } else { host_addr = mremap(g2h(old_addr), old_size, new_size, flags | MREMAP_FIXED, g2h(mmap_start)); if (reserved_va) { mmap_reserve(old_addr, old_size); } } } else { int prot = 0; if (reserved_va && old_size < new_size) { abi_ulong addr; for (addr = old_addr + old_size; addr < old_addr + new_size; addr++) { prot |= page_get_flags(addr); } } if (prot == 0) { host_addr = mremap(g2h(old_addr), old_size, new_size, flags); if (host_addr != MAP_FAILED) { /* Check if address fits target address space */ if (!guest_range_valid(h2g(host_addr), new_size)) { /* Revert mremap() changes */ host_addr = mremap(g2h(old_addr), new_size, old_size, flags); errno = ENOMEM; host_addr = MAP_FAILED; } else if (reserved_va && old_size > new_size) { mmap_reserve(old_addr + old_size, old_size - new_size); } } } else { errno = ENOMEM; host_addr = MAP_FAILED; } } if (host_addr == MAP_FAILED) { new_addr = -1; } else { new_addr = h2g(host_addr); prot = page_get_flags(old_addr); page_set_flags(old_addr, old_addr + old_size, 0); page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID); } tb_invalidate_phys_range(new_addr, new_addr + new_size); mmap_unlock(); return new_addr; }
1c3dfb506ea3decd17ec69ed6eaf611a885b9f59
https://github.com/qemu/qemu
1not_vulnerable
linux-user/signal: Decode waitid si_code When mapping the host waitid status to the target status we previously just used decoding information in the status value. This doesn't follow what the waitid documentation describes, which instead suggests using the si_code value for the decoding. This results in the incorrect values seen when calling waitid. This is especially apparent on RV32 where all wait calls use waitid (see the bug case). This patch just passes the waitid status directly back to the guest. Buglink: https://bugs.launchpad.net/qemu/+bug/1906193 Signed-off-by: Alistair Francis <alistair.francis@wdc.com> Tested-by: Andreas K. Hüttel <dilfridge@gentoo.org> Reviewed-by: Laurent Vivier <laurent@vivier.eu> Message-Id: <1fb2d56aa23a81f4473e638abe9e2d78c09a3d5b.1611080607.git.alistair.francis@wdc.com> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, const siginfo_t *info) { int sig = host_to_target_signal(info->si_signo); int si_code = info->si_code; int si_type; tinfo->si_signo = sig; tinfo->si_errno = 0; tinfo->si_code = info->si_code; /* This memset serves two purposes: * (1) ensure we don't leak random junk to the guest later * (2) placate false positives from gcc about fields * being used uninitialized if it chooses to inline both this * function and tswap_siginfo() into host_to_target_siginfo(). */ memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); /* This is awkward, because we have to use a combination of * the si_code and si_signo to figure out which of the union's * members are valid. (Within the host kernel it is always possible * to tell, but the kernel carefully avoids giving userspace the * high 16 bits of si_code, so we don't have the information to * do this the easy way...) We therefore make our best guess, * bearing in mind that a guest can spoof most of the si_codes * via rt_sigqueueinfo() if it likes. * * Once we have made our guess, we record it in the top 16 bits of * the si_code, so that tswap_siginfo() later can use it. * tswap_siginfo() will strip these top bits out before writing * si_code to the guest (sign-extending the lower bits). */ switch (si_code) { case SI_USER: case SI_TKILL: case SI_KERNEL: /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. * These are the only unspoofable si_code values. */ tinfo->_sifields._kill._pid = info->si_pid; tinfo->_sifields._kill._uid = info->si_uid; si_type = QEMU_SI_KILL; break; default: /* Everything else is spoofable. Make best guess based on signal */ switch (sig) { case TARGET_SIGCHLD: tinfo->_sifields._sigchld._pid = info->si_pid; tinfo->_sifields._sigchld._uid = info->si_uid; tinfo->_sifields._sigchld._status = info->si_status; tinfo->_sifields._sigchld._utime = info->si_utime; tinfo->_sifields._sigchld._stime = info->si_stime; si_type = QEMU_SI_CHLD; break; case TARGET_SIGIO: tinfo->_sifields._sigpoll._band = info->si_band; tinfo->_sifields._sigpoll._fd = info->si_fd; si_type = QEMU_SI_POLL; break; default: /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ tinfo->_sifields._rt._pid = info->si_pid; tinfo->_sifields._rt._uid = info->si_uid; /* XXX: potential problem if 64 bit */ tinfo->_sifields._rt._sigval.sival_ptr = (abi_ulong)(unsigned long)info->si_value.sival_ptr; si_type = QEMU_SI_RT; break; } break; } tinfo->si_code = deposit32(si_code, 16, 16, si_type); }
c90e3512a4683345a8e7074961d8275ceaec578d
https://github.com/qemu/qemu
1not_vulnerable
io: error_prepend() in qio_channel_readv_full_all() causes segfault Using error_prepend() in qio_channel_readv_full_all() causes a segfault as errp is not set when ret is 0. This results in the failure of iotest 83. Replacing with error_setg() fixes the problem. Additionally, removes a full stop at the end of error message Reported-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Jagannathan Raman <jag.raman@oracle.com> Fixes: bebab91ebdfc591f8793a9a17370df1bfbe8b2ca (io: add qio_channel_readv_full_all_eof & qio_channel_readv_full_all helpers) Message-Id: <be476bcdb99e820fec0fa09fe8f04c9dd3e62473.1613128220.git.jag.raman@oracle.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
int qio_channel_readv_full_all(QIOChannel *ioc, const struct iovec *iov, size_t niov, int **fds, size_t *nfds, Error **errp) { int ret = qio_channel_readv_full_all_eof(ioc, iov, niov, fds, nfds, errp); if (ret == 0) { error_setg(errp, "Unexpected end-of-file before all data were read"); return -1; } if (ret == 1) { return 0; } return ret; }
d3c1183ffeb71ca3a783eae3d7e1c51e71e8a621
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Correctly initialize MDCR_EL2.HPMN When working with performance monitoring counters, we look at MDCR_EL2.HPMN as part of the check whether a counter is enabled. This check fails, because MDCR_EL2.HPMN is reset to 0, meaning that no counters are "enabled" for < EL2. That's in violation of the Arm specification, which states that > On a Warm reset, this field [MDCR_EL2.HPMN] resets to the value in > PMCR_EL0.N That's also what a comment in the code acknowledges, but the necessary adjustment seems to have been forgotten when support for more counters was added. This change fixes the issue by setting the reset value to PMCR.N, which is four. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void define_pmu_regs(ARMCPU *cpu) { /* * v7 performance monitor control register: same implementor * field as main ID register, and we implement four counters in * addition to the cycle count register. */ unsigned int i, pmcrn = PMCR_NUM_COUNTERS; ARMCPRegInfo pmcr = { .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), .accessfn = pmreg_access, .writefn = pmcr_write, .raw_writefn = raw_write, }; ARMCPRegInfo pmcr64 = { .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | PMCRLC, .writefn = pmcr_write, .raw_writefn = raw_write, }; define_one_arm_cp_reg(cpu, &pmcr); define_one_arm_cp_reg(cpu, &pmcr64); for (i = 0; i < pmcrn; i++) { char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); ARMCPRegInfo pmev_regs[] = { { .name = pmevcntr_name, .cp = 15, .crn = 14, .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, .accessfn = pmreg_access }, { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_IO, .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, .raw_readfn = pmevcntr_rawread, .raw_writefn = pmevcntr_rawwrite }, { .name = pmevtyper_name, .cp = 15, .crn = 14, .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, .accessfn = pmreg_access }, { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_IO, .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, .raw_writefn = pmevtyper_rawwrite }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, pmev_regs); g_free(pmevcntr_name); g_free(pmevcntr_el0_name); g_free(pmevtyper_name); g_free(pmevtyper_el0_name); } if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { ARMCPRegInfo v81_pmu_regs[] = { { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = extract64(cpu->pmceid0, 32, 32) }, { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = extract64(cpu->pmceid1, 32, 32) }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, v81_pmu_regs); } if (cpu_isar_feature(any_pmu_8_4, cpu)) { static const ARMCPRegInfo v84_pmmir = { .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = 0 }; define_one_arm_cp_reg(cpu, &v84_pmmir); } }
832a59e43b5d8b8a9c2b2565008ebea1059d539d
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: fix error handling in nvme_ns_realize nvme_ns_realize passes errp to nvme_register_namespaces, but then try to prepend errp with local_err. Just remove the local_err and use errp directly. Fixes: 15d024d4aa9b ("hw/block/nvme: split setup and register for namespace") Cc: Minwoo Im <minwoo.im.dev@gmail.com> Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static void nvme_ns_realize(DeviceState *dev, Error **errp) { NvmeNamespace *ns = NVME_NS(dev); BusState *s = qdev_get_parent_bus(dev); NvmeCtrl *n = NVME(s->parent); if (nvme_ns_setup(ns, errp)) { return; } if (nvme_register_namespace(n, ns, errp)) { return; } }
2132cfe52bd87f191887c1728190bf56e8d5275f
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: Fix a build error in nvme_get_feature() Current QEMU HEAD nvme.c does not compile with the default GCC 5.4 on a Ubuntu 16.04 host: hw/block/nvme.c:3242:9: error: ‘result’ may be used uninitialized in this function [-Werror=maybe-uninitialized] trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled"); ^ hw/block/nvme.c:3150:14: note: ‘result’ was declared here uint32_t result; ^ Explicitly initialize the result to fix it. Fixes: aa5e55e3b07e ("hw/block/nvme: open code for volatile write cache") Fixes: Coverity CID 1446371 Signed-off-by: Bin Meng <bin.meng@windriver.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) { NvmeCmd *cmd = &req->cmd; uint32_t dw10 = le32_to_cpu(cmd->cdw10); uint32_t dw11 = le32_to_cpu(cmd->cdw11); uint32_t nsid = le32_to_cpu(cmd->nsid); uint32_t result; uint8_t fid = NVME_GETSETFEAT_FID(dw10); NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10); uint16_t iv; NvmeNamespace *ns; int i; static const uint32_t nvme_feature_default[NVME_FID_MAX] = { [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT, }; trace_pci_nvme_getfeat(nvme_cid(req), nsid, fid, sel, dw11); if (!nvme_feature_support[fid]) { return NVME_INVALID_FIELD | NVME_DNR; } if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { /* * The Reservation Notification Mask and Reservation Persistence * features require a status code of Invalid Field in Command when * NSID is 0xFFFFFFFF. Since the device does not support those * features we can always return Invalid Namespace or Format as we * should do for all other features. */ return NVME_INVALID_NSID | NVME_DNR; } if (!nvme_ns(n, nsid)) { return NVME_INVALID_FIELD | NVME_DNR; } } switch (sel) { case NVME_GETFEAT_SELECT_CURRENT: break; case NVME_GETFEAT_SELECT_SAVED: /* no features are saveable by the controller; fallthrough */ case NVME_GETFEAT_SELECT_DEFAULT: goto defaults; case NVME_GETFEAT_SELECT_CAP: result = nvme_feature_cap[fid]; goto out; } switch (fid) { case NVME_TEMPERATURE_THRESHOLD: result = 0; /* * The controller only implements the Composite Temperature sensor, so * return 0 for all other sensors. */ if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { goto out; } switch (NVME_TEMP_THSEL(dw11)) { case NVME_TEMP_THSEL_OVER: result = n->features.temp_thresh_hi; goto out; case NVME_TEMP_THSEL_UNDER: result = n->features.temp_thresh_low; goto out; } return NVME_INVALID_FIELD | NVME_DNR; case NVME_ERROR_RECOVERY: if (!nvme_nsid_valid(n, nsid)) { return NVME_INVALID_NSID | NVME_DNR; } ns = nvme_ns(n, nsid); if (unlikely(!ns)) { return NVME_INVALID_FIELD | NVME_DNR; } result = ns->features.err_rec; goto out; case NVME_VOLATILE_WRITE_CACHE: result = 0; for (i = 1; i <= n->num_namespaces; i++) { ns = nvme_ns(n, i); if (!ns) { continue; } result = blk_enable_write_cache(ns->blkconf.blk); if (result) { break; } } trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled"); goto out; case NVME_ASYNCHRONOUS_EVENT_CONF: result = n->features.async_config; goto out; case NVME_TIMESTAMP: return nvme_get_feature_timestamp(n, req); default: break; } defaults: switch (fid) { case NVME_TEMPERATURE_THRESHOLD: result = 0; if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { break; } if (NVME_TEMP_THSEL(dw11) == NVME_TEMP_THSEL_OVER) { result = NVME_TEMPERATURE_WARNING; } break; case NVME_NUMBER_OF_QUEUES: result = (n->params.max_ioqpairs - 1) | ((n->params.max_ioqpairs - 1) << 16); trace_pci_nvme_getfeat_numq(result); break; case NVME_INTERRUPT_VECTOR_CONF: iv = dw11 & 0xffff; if (iv >= n->params.max_ioqpairs + 1) { return NVME_INVALID_FIELD | NVME_DNR; } result = iv; if (iv == n->admin_cq.vector) { result |= NVME_INTVC_NOCOALESCING; } break; case NVME_COMMAND_SET_PROFILE: result = 0; break; default: result = nvme_feature_default[fid]; break; } out: req->cqe.result = cpu_to_le32(result); return NVME_SUCCESS; }
886188a9c11ff9518c8f20e265e7a98439c14d32
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: fix legacy namespace registration Moving namespace registration to the nvme-ns realization function had the unintended side-effect of breaking legacy namespace registration. Fix this. Fixes: 15d024d4aa9b ("hw/block/nvme: split setup and register for namespace") Reported-by: Alexander Graf <agraf@csgraf.de> Cc: Minwoo Im <minwoo.im.dev@gmail.com> Tested-by: Alexander Graf <agraf@csgraf.de> Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static void nvme_realize(PCIDevice *pci_dev, Error **errp) { NvmeCtrl *n = NVME(pci_dev); NvmeNamespace *ns; Error *local_err = NULL; nvme_check_constraints(n, &local_err); if (local_err) { error_propagate(errp, local_err); return; } qbus_create_inplace(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, &pci_dev->qdev, n->parent_obj.qdev.id); nvme_init_state(n); if (nvme_init_pci(n, pci_dev, errp)) { return; } nvme_init_ctrl(n, pci_dev); /* setup a namespace if the controller drive property was given */ if (n->namespace.blkconf.blk) { ns = &n->namespace; ns->params.nsid = 1; if (nvme_ns_setup(ns, errp)) { return; } if (nvme_register_namespace(n, ns, errp)) { return; } } }
10d0ef3e6cfe228df4b2d3e27325f1b0e2b71fd5
https://github.com/qemu/qemu
1not_vulnerable
target/arm: Fix SCR RES1 handling The FW and AW bits of SCR_EL3 are RES1 only in some contexts. Force them to 1 only when there is no support for AArch32 at EL1 or above. The reset value will be 0x30 only if the CPU is AArch64-only; if there is support for AArch32 at EL1 or above, it will be reset to 0. Also adds helper function isar_feature_aa64_aa32_el1 to check if AArch32 is supported at EL1 or above. Signed-off-by: Mike Nawrocki <michael.nawrocki@gtri.gatech.edu> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210203165552.16306-2-michael.nawrocki@gtri.gatech.edu Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Begin with base v8.0 state. */ uint32_t valid_mask = 0x3fff; ARMCPU *cpu = env_archcpu(env); if (ri->state == ARM_CP_STATE_AA64) { if (arm_feature(env, ARM_FEATURE_AARCH64) && !cpu_isar_feature(aa64_aa32_el1, cpu)) { value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ } valid_mask &= ~SCR_NET; if (cpu_isar_feature(aa64_lor, cpu)) { valid_mask |= SCR_TLOR; } if (cpu_isar_feature(aa64_pauth, cpu)) { valid_mask |= SCR_API | SCR_APK; } if (cpu_isar_feature(aa64_sel2, cpu)) { valid_mask |= SCR_EEL2; } if (cpu_isar_feature(aa64_mte, cpu)) { valid_mask |= SCR_ATA; } } else { valid_mask &= ~(SCR_RW | SCR_ST); } if (!arm_feature(env, ARM_FEATURE_EL2)) { valid_mask &= ~SCR_HCE; /* On ARMv7, SMD (or SCD as it is called in v7) is only * supported if EL2 exists. The bit is UNK/SBZP when * EL2 is unavailable. In QEMU ARMv7, we force it to always zero * when EL2 is unavailable. * On ARMv8, this bit is always available. */ if (arm_feature(env, ARM_FEATURE_V7) && !arm_feature(env, ARM_FEATURE_V8)) { valid_mask &= ~SCR_SMD; } } /* Clear all-context RES0 bits. */ value &= valid_mask; raw_write(env, ri, value); }
b01fec3659f7e595d5066fc052fb31a94a8a969b
https://github.com/qemu/qemu
1not_vulnerable
spapr_numa.c: fix ibm,max-associativity-domains calculation The current logic for calculating 'maxdomain' making it a sum of numa_state->num_nodes with spapr->gpu_numa_id. spapr->gpu_numa_id is used as a index to determine the next available NUMA id that a given NVGPU can use. The problem is that the initial value of gpu_numa_id, for any topology that has more than one NUMA node, is equal to numa_state->num_nodes. This means that our maxdomain will always be, at least, twice the amount of existing NUMA nodes. This means that a guest with 4 NUMA nodes will end up with the following max-associativity-domains: rtas/ibm,max-associativity-domains 00000004 00000008 00000008 00000008 00000008 This overtuning of maxdomains doesn't go unnoticed in the guest, being detected in SLUB during boot: dmesg | grep SLUB [ 0.000000] SLUB: HWalign=128, Order=0-3, MinObjects=0, CPUs=4, Nodes=8 SLUB is detecting 8 total nodes, with 4 nodes being online. This patch fixes ibm,max-associativity-domains by considering the amount of NVGPUs NUMA nodes presented in the guest, instead of just spapr->gpu_numa_id. Reported-by: Cédric Le Goater <clg@kaod.org> Tested-by: Cédric Le Goater <clg@kaod.org> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com> Message-Id: <20210128174213.1349181-4-danielhb413@gmail.com> Reviewed-by: Greg Kurz <groug@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) { MachineState *ms = MACHINE(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); uint32_t number_nvgpus_nodes = spapr->gpu_numa_id - spapr_numa_initial_nvgpu_numa_id(ms); uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x3), cpu_to_be32(0x2), cpu_to_be32(0x1), }; uint32_t nr_refpoints = ARRAY_SIZE(refpoints); uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes; uint32_t maxdomains[] = { cpu_to_be32(4), cpu_to_be32(maxdomain), cpu_to_be32(maxdomain), cpu_to_be32(maxdomain), cpu_to_be32(maxdomain) }; if (spapr_machine_using_legacy_numa(spapr)) { uint32_t legacy_refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4), cpu_to_be32(0x2), }; uint32_t legacy_maxdomain = spapr->gpu_numa_id > 1 ? 1 : 0; uint32_t legacy_maxdomains[] = { cpu_to_be32(4), cpu_to_be32(legacy_maxdomain), cpu_to_be32(legacy_maxdomain), cpu_to_be32(legacy_maxdomain), cpu_to_be32(spapr->gpu_numa_id), }; G_STATIC_ASSERT(sizeof(legacy_refpoints) <= sizeof(refpoints)); G_STATIC_ASSERT(sizeof(legacy_maxdomains) <= sizeof(maxdomains)); nr_refpoints = 3; memcpy(refpoints, legacy_refpoints, sizeof(legacy_refpoints)); memcpy(maxdomains, legacy_maxdomains, sizeof(legacy_maxdomains)); /* pseries-5.0 and older reference-points array is {0x4, 0x4} */ if (smc->pre_5_1_assoc_refpoints) { nr_refpoints = 2; } } _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points", refpoints, nr_refpoints * sizeof(refpoints[0]))); _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains", maxdomains, sizeof(maxdomains))); }
0065f42ef1206527188a44e9c456c9b6d10de5ec
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: fix set feature save field check Currently, no features are saveable, so the current check is not wrong, but add a check against the feature capabilities to make sure this will not regress if saveable features are added later. Signed-off-by: Gollu Appalanaidu <anaidu.gollu@samsung.com> Reviewed-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) { NvmeNamespace *ns = NULL; NvmeCmd *cmd = &req->cmd; uint32_t dw10 = le32_to_cpu(cmd->cdw10); uint32_t dw11 = le32_to_cpu(cmd->cdw11); uint32_t nsid = le32_to_cpu(cmd->nsid); uint8_t fid = NVME_GETSETFEAT_FID(dw10); uint8_t save = NVME_SETFEAT_SAVE(dw10); int i; trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11); if (save && !(nvme_feature_cap[fid] & NVME_FEAT_CAP_SAVE)) { return NVME_FID_NOT_SAVEABLE | NVME_DNR; } if (!nvme_feature_support[fid]) { return NVME_INVALID_FIELD | NVME_DNR; } if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { if (nsid != NVME_NSID_BROADCAST) { if (!nvme_nsid_valid(n, nsid)) { return NVME_INVALID_NSID | NVME_DNR; } ns = nvme_ns(n, nsid); if (unlikely(!ns)) { return NVME_INVALID_FIELD | NVME_DNR; } } } else if (nsid && nsid != NVME_NSID_BROADCAST) { if (!nvme_nsid_valid(n, nsid)) { return NVME_INVALID_NSID | NVME_DNR; } return NVME_FEAT_NOT_NS_SPEC | NVME_DNR; } if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) { return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; } switch (fid) { case NVME_TEMPERATURE_THRESHOLD: if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { break; } switch (NVME_TEMP_THSEL(dw11)) { case NVME_TEMP_THSEL_OVER: n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11); break; case NVME_TEMP_THSEL_UNDER: n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11); break; default: return NVME_INVALID_FIELD | NVME_DNR; } if ((n->temperature >= n->features.temp_thresh_hi) || (n->temperature <= n->features.temp_thresh_low)) { nvme_smart_event(n, NVME_AER_INFO_SMART_TEMP_THRESH); } break; case NVME_ERROR_RECOVERY: if (nsid == NVME_NSID_BROADCAST) { for (i = 1; i <= n->num_namespaces; i++) { ns = nvme_ns(n, i); if (!ns) { continue; } if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { ns->features.err_rec = dw11; } } break; } assert(ns); if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { ns->features.err_rec = dw11; } break; case NVME_VOLATILE_WRITE_CACHE: for (i = 1; i <= n->num_namespaces; i++) { ns = nvme_ns(n, i); if (!ns) { continue; } if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) { blk_flush(ns->blkconf.blk); } blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1); } break; case NVME_NUMBER_OF_QUEUES: if (n->qs_created) { return NVME_CMD_SEQ_ERROR | NVME_DNR; } /* * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR * and NSQR. */ if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) { return NVME_INVALID_FIELD | NVME_DNR; } trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1, ((dw11 >> 16) & 0xFFFF) + 1, n->params.max_ioqpairs, n->params.max_ioqpairs); req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) | ((n->params.max_ioqpairs - 1) << 16)); break; case NVME_ASYNCHRONOUS_EVENT_CONF: n->features.async_config = dw11; break; case NVME_TIMESTAMP: return nvme_set_feature_timestamp(n, req); case NVME_COMMAND_SET_PROFILE: if (dw11 & 0x1ff) { trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff); return NVME_CMD_SET_CMB_REJECTED | NVME_DNR; } break; default: return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; } return NVME_SUCCESS; }
56990c777a635ded6e2f191c470ca6410cf5c11a
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: fix set feature for error recovery Only enable DULBE if the namespace supports it. Signed-off-by: Gollu Appalanaidu <anaidu.gollu@samsung.com> Reviewed-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) { NvmeNamespace *ns = NULL; NvmeCmd *cmd = &req->cmd; uint32_t dw10 = le32_to_cpu(cmd->cdw10); uint32_t dw11 = le32_to_cpu(cmd->cdw11); uint32_t nsid = le32_to_cpu(cmd->nsid); uint8_t fid = NVME_GETSETFEAT_FID(dw10); uint8_t save = NVME_SETFEAT_SAVE(dw10); int i; trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11); if (save) { return NVME_FID_NOT_SAVEABLE | NVME_DNR; } if (!nvme_feature_support[fid]) { return NVME_INVALID_FIELD | NVME_DNR; } if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { if (nsid != NVME_NSID_BROADCAST) { if (!nvme_nsid_valid(n, nsid)) { return NVME_INVALID_NSID | NVME_DNR; } ns = nvme_ns(n, nsid); if (unlikely(!ns)) { return NVME_INVALID_FIELD | NVME_DNR; } } } else if (nsid && nsid != NVME_NSID_BROADCAST) { if (!nvme_nsid_valid(n, nsid)) { return NVME_INVALID_NSID | NVME_DNR; } return NVME_FEAT_NOT_NS_SPEC | NVME_DNR; } if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) { return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; } switch (fid) { case NVME_TEMPERATURE_THRESHOLD: if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { break; } switch (NVME_TEMP_THSEL(dw11)) { case NVME_TEMP_THSEL_OVER: n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11); break; case NVME_TEMP_THSEL_UNDER: n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11); break; default: return NVME_INVALID_FIELD | NVME_DNR; } if ((n->temperature >= n->features.temp_thresh_hi) || (n->temperature <= n->features.temp_thresh_low)) { nvme_smart_event(n, NVME_AER_INFO_SMART_TEMP_THRESH); } break; case NVME_ERROR_RECOVERY: if (nsid == NVME_NSID_BROADCAST) { for (i = 1; i <= n->num_namespaces; i++) { ns = nvme_ns(n, i); if (!ns) { continue; } if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { ns->features.err_rec = dw11; } } break; } assert(ns); if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { ns->features.err_rec = dw11; } break; case NVME_VOLATILE_WRITE_CACHE: for (i = 1; i <= n->num_namespaces; i++) { ns = nvme_ns(n, i); if (!ns) { continue; } if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) { blk_flush(ns->blkconf.blk); } blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1); } break; case NVME_NUMBER_OF_QUEUES: if (n->qs_created) { return NVME_CMD_SEQ_ERROR | NVME_DNR; } /* * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR * and NSQR. */ if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) { return NVME_INVALID_FIELD | NVME_DNR; } trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1, ((dw11 >> 16) & 0xFFFF) + 1, n->params.max_ioqpairs, n->params.max_ioqpairs); req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) | ((n->params.max_ioqpairs - 1) << 16)); break; case NVME_ASYNCHRONOUS_EVENT_CONF: n->features.async_config = dw11; break; case NVME_TIMESTAMP: return nvme_set_feature_timestamp(n, req); case NVME_COMMAND_SET_PROFILE: if (dw11 & 0x1ff) { trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff); return NVME_CMD_SET_CMB_REJECTED | NVME_DNR; } break; default: return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; } return NVME_SUCCESS; }
044f1876b0b00a970e65b99d9be21925cdd7dc6b
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: error if drive less than a zone size If a user assigns a backing device with less capacity than the size of a single zone, the namespace capacity will be reported as zero and the kernel will silently fail to allocate the namespace. This patch errors out in case that the backing device cannot accomodate at least a single zone. Signed-off-by: Minwoo Im <minwoo.im.dev@gmail.com> [k.jensen: small fixup in the error and commit message] Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static int nvme_ns_zoned_check_calc_geometry(NvmeNamespace *ns, Error **errp) { uint64_t zone_size, zone_cap; uint32_t lbasz = ns->blkconf.logical_block_size; /* Make sure that the values of ZNS properties are sane */ if (ns->params.zone_size_bs) { zone_size = ns->params.zone_size_bs; } else { zone_size = NVME_DEFAULT_ZONE_SIZE; } if (ns->params.zone_cap_bs) { zone_cap = ns->params.zone_cap_bs; } else { zone_cap = zone_size; } if (zone_cap > zone_size) { error_setg(errp, "zone capacity %"PRIu64"B exceeds " "zone size %"PRIu64"B", zone_cap, zone_size); return -1; } if (zone_size < lbasz) { error_setg(errp, "zone size %"PRIu64"B too small, " "must be at least %"PRIu32"B", zone_size, lbasz); return -1; } if (zone_cap < lbasz) { error_setg(errp, "zone capacity %"PRIu64"B too small, " "must be at least %"PRIu32"B", zone_cap, lbasz); return -1; } /* * Save the main zone geometry values to avoid * calculating them later again. */ ns->zone_size = zone_size / lbasz; ns->zone_capacity = zone_cap / lbasz; ns->num_zones = ns->size / lbasz / ns->zone_size; /* Do a few more sanity checks of ZNS properties */ if (!ns->num_zones) { error_setg(errp, "insufficient drive capacity, must be at least the size " "of one zone (%"PRIu64"B)", zone_size); return -1; } if (ns->params.max_open_zones > ns->num_zones) { error_setg(errp, "max_open_zones value %u exceeds the number of zones %u", ns->params.max_open_zones, ns->num_zones); return -1; } if (ns->params.max_active_zones > ns->num_zones) { error_setg(errp, "max_active_zones value %u exceeds the number of zones %u", ns->params.max_active_zones, ns->num_zones); return -1; } if (ns->params.zd_extension_size) { if (ns->params.zd_extension_size & 0x3f) { error_setg(errp, "zone descriptor extension size must be a multiple of 64B"); return -1; } if ((ns->params.zd_extension_size >> 6) > 0xff) { error_setg(errp, "zone descriptor extension size is too large"); return -1; } } return 0; }
0d3d5da2ccc8823c7c904b790b8d0fdf569790f0
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: fix 64 bit register hi/lo split writes 64 bit registers like ASQ and ACQ should be writable by both a hi/lo 32 bit write combination as well as a plain 64 bit write. The spec does not define ordering on the hi/lo split, but the code currently assumes that the low order bits are written first. Additionally, the code does not consider that another address might already have been written into the register, causing the OR'ing to result in a bad address. Fix this by explicitly overwriting only the low or high order bits for 32 bit writes. Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Keith Busch <kbusch@kernel.org>
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, unsigned size) { if (unlikely(offset & (sizeof(uint32_t) - 1))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32, "MMIO write not 32-bit aligned," " offset=0x%"PRIx64"", offset); /* should be ignored, fall through for now */ } if (unlikely(size < sizeof(uint32_t))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall, "MMIO write smaller than 32-bits," " offset=0x%"PRIx64", size=%u", offset, size); /* should be ignored, fall through for now */ } switch (offset) { case 0xc: /* INTMS */ if (unlikely(msix_enabled(&(n->parent_obj)))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, "undefined access to interrupt mask set" " when MSI-X is enabled"); /* should be ignored, fall through for now */ } n->bar.intms |= data & 0xffffffff; n->bar.intmc = n->bar.intms; trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc); nvme_irq_check(n); break; case 0x10: /* INTMC */ if (unlikely(msix_enabled(&(n->parent_obj)))) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, "undefined access to interrupt mask clr" " when MSI-X is enabled"); /* should be ignored, fall through for now */ } n->bar.intms &= ~(data & 0xffffffff); n->bar.intmc = n->bar.intms; trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc); nvme_irq_check(n); break; case 0x14: /* CC */ trace_pci_nvme_mmio_cfg(data & 0xffffffff); /* Windows first sends data, then sends enable bit */ if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) && !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc)) { n->bar.cc = data; } if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) { n->bar.cc = data; if (unlikely(nvme_start_ctrl(n))) { trace_pci_nvme_err_startfail(); n->bar.csts = NVME_CSTS_FAILED; } else { trace_pci_nvme_mmio_start_success(); n->bar.csts = NVME_CSTS_READY; } } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) { trace_pci_nvme_mmio_stopped(); nvme_ctrl_reset(n); n->bar.csts &= ~NVME_CSTS_READY; } if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) { trace_pci_nvme_mmio_shutdown_set(); nvme_ctrl_shutdown(n); n->bar.cc = data; n->bar.csts |= NVME_CSTS_SHST_COMPLETE; } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) { trace_pci_nvme_mmio_shutdown_cleared(); n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE; n->bar.cc = data; } break; case 0x1C: /* CSTS */ if (data & (1 << 4)) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported, "attempted to W1C CSTS.NSSRO" " but CAP.NSSRS is zero (not supported)"); } else if (data != 0) { NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts, "attempted to set a read only bit" " of controller status"); } break; case 0x20: /* NSSR */ if (data == 0x4E564D65) { trace_pci_nvme_ub_mmiowr_ssreset_unsupported(); } else { /* The spec says that writes of other values have no effect */ return; } break; case 0x24: /* AQA */ n->bar.aqa = data & 0xffffffff; trace_pci_nvme_mmio_aqattr(data & 0xffffffff); break; case 0x28: /* ASQ */ n->bar.asq = size == 8 ? data : (n->bar.asq & ~0xffffffffULL) | (data & 0xffffffff); trace_pci_nvme_mmio_asqaddr(data); break; case 0x2c: /* ASQ hi */ n->bar.asq = (n->bar.asq & 0xffffffff) | (data << 32); trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq); break; case 0x30: /* ACQ */ trace_pci_nvme_mmio_acqaddr(data); n->bar.acq = size == 8 ? data : (n->bar.acq & ~0xffffffffULL) | (data & 0xffffffff); break; case 0x34: /* ACQ hi */ n->bar.acq = (n->bar.acq & 0xffffffff) | (data << 32); trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq); break; case 0x38: /* CMBLOC */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved, "invalid write to reserved CMBLOC" " when CMBSZ is zero, ignored"); return; case 0x3C: /* CMBSZ */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly, "invalid write to read only CMBSZ, ignored"); return; case 0xE00: /* PMRCAP */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly, "invalid write to PMRCAP register, ignored"); return; case 0xE04: /* TODO PMRCTL */ break; case 0xE08: /* PMRSTS */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly, "invalid write to PMRSTS register, ignored"); return; case 0xE0C: /* PMREBS */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly, "invalid write to PMREBS register, ignored"); return; case 0xE10: /* PMRSWTP */ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly, "invalid write to PMRSWTP register, ignored"); return; case 0xE14: /* TODO PMRMSC */ break; default: NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid, "invalid MMIO write," " offset=0x%"PRIx64", data=%"PRIx64"", offset, data); break; } }
635b23ad43e37910eb7607cfee6887e89ae9e69a
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: fix zone write finalize The zone write pointer is unconditionally advanced, even for write faults. Make sure that the zone is always transitioned to Full if the write pointer reaches zone capacity. Cc: Dmitry Fomichev <dmitry.fomichev@wdc.com> Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req, bool failed) { NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; NvmeZone *zone; NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe; uint64_t slba; uint32_t nlb; slba = le64_to_cpu(rw->slba); nlb = le16_to_cpu(rw->nlb) + 1; zone = nvme_get_zone_by_slba(ns, slba); zone->d.wp += nlb; if (failed) { res->slba = 0; } if (zone->d.wp == nvme_zone_wr_boundary(zone)) { switch (nvme_get_zone_state(zone)) { case NVME_ZONE_STATE_IMPLICITLY_OPEN: case NVME_ZONE_STATE_EXPLICITLY_OPEN: nvme_aor_dec_open(ns); /* fall through */ case NVME_ZONE_STATE_CLOSED: nvme_aor_dec_active(ns); /* fall through */ case NVME_ZONE_STATE_EMPTY: nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL); /* fall through */ case NVME_ZONE_STATE_FULL: break; default: assert(false); } } }
add961300c8e29167465fe8206539c4e6bffde28
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: Correct error status for unaligned ZA TP 4053 says (in section 2.3.1.1) - ... if a Zone Append command specifies a ZSLBA that is not the lowest logical block address in that zone, then the controller shall abort that command with a status code of Invalid Field In Command. In the code, Zone Invalid Write is returned instead, fix this. Signed-off-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
static uint16_t nvme_check_zone_write(NvmeCtrl *n, NvmeNamespace *ns, NvmeZone *zone, uint64_t slba, uint32_t nlb, bool append) { uint16_t status; if (unlikely((slba + nlb) > nvme_zone_wr_boundary(zone))) { status = NVME_ZONE_BOUNDARY_ERROR; } else { status = nvme_check_zone_state_for_write(zone); } if (status != NVME_SUCCESS) { trace_pci_nvme_err_zone_write_not_ok(slba, nlb, status); } else { assert(nvme_wp_is_valid(zone)); if (append) { if (unlikely(slba != zone->d.zslba)) { trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba); status = NVME_INVALID_FIELD; } if (nvme_l2b(ns, nlb) > (n->page_size << n->zasl)) { trace_pci_nvme_err_append_too_large(slba, nlb, n->zasl); status = NVME_INVALID_FIELD; } } else if (unlikely(slba != zone->w_ptr)) { trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba, zone->w_ptr); status = NVME_ZONE_INVALID_WRITE; } } return status; }
e1f81c1478398713f14c1b6ba011d4bb841dea27
https://github.com/qemu/qemu
1not_vulnerable
hw/block/nvme: fix bad clearing of CAP Commit 37712e00b1f0 ("hw/block/nvme: factor out pmr setup") changed the control flow such that the CAP register is erronously cleared after nvme_init_pmr() has configured it. Since the entire NvmeCtrl structure is zero-filled initially, there is no need for the explicit clearing, so just remove it. Fixes: 37712e00b1f0 ("hw/block/nvme: factor out pmr setup") Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) { NvmeIdCtrl *id = &n->id_ctrl; uint8_t *pci_conf = pci_dev->config; char *subnqn; id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' '); strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' '); strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' '); id->rab = 6; id->ieee[0] = 0x00; id->ieee[1] = 0x02; id->ieee[2] = 0xb3; id->mdts = n->params.mdts; id->ver = cpu_to_le32(NVME_SPEC_VER); id->oacs = cpu_to_le16(0); /* * Because the controller always completes the Abort command immediately, * there can never be more than one concurrently executing Abort command, * so this value is never used for anything. Note that there can easily be * many Abort commands in the queues, but they are not considered * "executing" until processed by nvme_abort. * * The specification recommends a value of 3 for Abort Command Limit (four * concurrently outstanding Abort commands), so lets use that though it is * inconsequential. */ id->acl = 3; id->aerl = n->params.aerl; id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO; id->lpa = NVME_LPA_NS_SMART | NVME_LPA_EXTENDED; /* recommended default value (~70 C) */ id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING); id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL); id->sqes = (0x6 << 4) | 0x6; id->cqes = (0x4 << 4) | 0x4; id->nn = cpu_to_le32(n->num_namespaces); id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP | NVME_ONCS_FEATURES | NVME_ONCS_DSM | NVME_ONCS_COMPARE); id->vwc = 0x1; id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN | NVME_CTRL_SGLS_BITBUCKET); subnqn = g_strdup_printf("nqn.2019-08.org.qemu:%s", n->params.serial); strpadcpy((char *)id->subnqn, sizeof(id->subnqn), subnqn, '\0'); g_free(subnqn); id->psd[0].mp = cpu_to_le16(0x9c4); id->psd[0].enlat = cpu_to_le32(0x10); id->psd[0].exlat = cpu_to_le32(0x4); NVME_CAP_SET_MQES(n->bar.cap, 0x7ff); NVME_CAP_SET_CQR(n->bar.cap, 1); NVME_CAP_SET_TO(n->bar.cap, 0xf); NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_NVM); NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_ADMIN_ONLY); NVME_CAP_SET_MPSMAX(n->bar.cap, 4); n->bar.vs = NVME_SPEC_VER; n->bar.intmc = n->bar.intms = 0; }
b5bf601f364e1a14ca4c3276f88dfec024acf613
https://github.com/qemu/qemu
1not_vulnerable
nvram: add nrf51_soc flash read method Add nrf51_soc mmio read method to avoid NULL pointer dereference issue. Reported-by: Lei Sun <slei.casper@gmail.com> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> Reviewed-by: Li Qiang <liq3ea@gmail.com> Message-Id: <20200811114133.672647-6-ppandit@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void flash_write(void *opaque, hwaddr offset, uint64_t value, unsigned int size) { NRF51NVMState *s = NRF51_NVM(opaque); if (s->config & NRF51_NVMC_CONFIG_WEN) { uint32_t oldval; assert(offset + size <= s->flash_size); /* NOR Flash only allows bits to be flipped from 1's to 0's on write */ oldval = ldl_le_p(s->storage + offset); oldval &= value; stl_le_p(s->storage + offset, oldval); memory_region_flush_rom_device(&s->flash, offset, size); } else { qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash write 0x%" HWADDR_PRIx" while flash not writable.\n", __func__, offset); } }
f867cebaedbc9c43189f102e4cdfdff05e88df7f
https://github.com/qemu/qemu
1not_vulnerable
prep: add ppc-parity write method Add ppc-parity mmio write method to avoid NULL pointer dereference issue. Reported-by: Lei Sun <slei.casper@gmail.com> Acked-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> Reviewed-by: Li Qiang <liq3ea@gmail.com> Message-Id: <20200811114133.672647-5-ppandit@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static uint64_t ppc_parity_error_readl(void *opaque, hwaddr addr, unsigned int size) { uint32_t val = 0; trace_prep_systemio_read((unsigned int)addr, val); return val; }
24202d2b561c3b4c48bd28383c8c34b4ac66c2bf
https://github.com/qemu/qemu
1not_vulnerable
vfio: add quirk device write method Add vfio quirk device mmio write method to avoid NULL pointer dereference issue. Reported-by: Lei Sun <slei.casper@gmail.com> Reviewed-by: Li Qiang <liq3ea@gmail.com> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Acked-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> Message-Id: <20200811114133.672647-4-ppandit@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static uint64_t vfio_ati_3c3_quirk_read(void *opaque, hwaddr addr, unsigned size) { VFIOPCIDevice *vdev = opaque; uint64_t data = vfio_pci_read_config(&vdev->pdev, PCI_BASE_ADDRESS_4 + 1, size); trace_vfio_quirk_ati_3c3_read(vdev->vbasedev.name, data); return data; }
520f26fc6d17b71a43eaf620e834b3bdf316f3d3
https://github.com/qemu/qemu
1not_vulnerable
hw/pci-host: add pci-intack write method Add pci-intack mmio write method to avoid NULL pointer dereference issue. Reported-by: Lei Sun <slei.casper@gmail.com> Reviewed-by: Li Qiang <liq3ea@gmail.com> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> Message-Id: <20200811114133.672647-2-ppandit@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static uint64_t raven_intack_read(void *opaque, hwaddr addr, unsigned int size) { return pic_read_irq(isa_pic); }
33c38f8ca1e09b9f77cf263404d423e076c19177
https://github.com/qemu/qemu
1not_vulnerable
cpu-throttle: Remove timer_mod() from cpu_throttle_set() During migrations, after each iteration, cpu_throttle_set() is called, which irrespective of input, re-arms the timer according to value of new_throttle_pct. This causes cpu_throttle_thread() to be delayed in getting scheduled and consqeuntly lets guest run for more time than what the throttle value should allow. This leads to spikes in guest throughput at high cpu-throttle percentage whenever cpu_throttle_set() is called. A solution would be not to modify the timer immediately in cpu_throttle_set(), instead, only modify throttle_percentage so that the throttle would automatically adjust to the required percentage when cpu_throttle_timer_tick() is invoked. Manually tested the patch using following configuration: Guest: Centos7 (3.10.0-123.el7.x86_64) Total Memory - 64GB , CPUs - 16 Tool used - stress (1.0.4) Workload - stress --vm 32 --vm-bytes 1G --vm-keep Migration Parameters: Network Bandwidth - 500MBPS cpu-throttle-initial - 99 Results: With timer_mod(): fails to converge, continues indefinitely Without timer_mod(): converges in 249 sec Signed-off-by: Utkarsh Tripathi <utkarsh.tripathi@nutanix.com> Message-Id: <1609420384-119407-1-git-send-email-utkarsh.tripathi@nutanix.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void cpu_throttle_timer_tick(void *opaque) { CPUState *cpu; double pct; /* Stop the timer if needed */ if (!cpu_throttle_get_percentage()) { return; } CPU_FOREACH(cpu) { if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) { async_run_on_cpu(cpu, cpu_throttle_thread, RUN_ON_CPU_NULL); } } pct = (double)cpu_throttle_get_percentage() / 100; timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + CPU_THROTTLE_TIMESLICE_NS / (1 - pct)); }
189012fcd7babafd937c4cabd5c3231be6e85fdc
https://github.com/qemu/qemu
1not_vulnerable
replay: fix replay of the interrupts Sometimes interrupt event comes at the same time with the virtual timers. In this case replay tries to proceed the timers, because deadline for them is zero. This patch allows processing interrupts and exceptions by entering the vCPU execution loop, when deadline is zero, but checkpoint associated with virtual timers is not ready to be replayed. Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru> Message-Id: <161216312794.2030770.1709657858900983160.stgit@pasha-ThinkPad-X280> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
void icount_handle_deadline(void) { assert(qemu_in_vcpu_thread()); int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, QEMU_TIMER_ATTR_ALL); /* * Instructions, interrupts, and exceptions are processed in cpu-exec. * Don't interrupt cpu thread, when these events are waiting * (i.e., there is no checkpoint) */ if (deadline == 0 && (replay_mode != REPLAY_MODE_PLAY || replay_has_checkpoint())) { icount_notify_aio_contexts(); } }
5ea9e9e239db83391a39c09f1de63c4099c20df5
https://github.com/qemu/qemu
1not_vulnerable
target/i386: do not set LM for 32-bit emulation "-cpu host/max" 32-bit targets by definition do not support long mode; therefore, the bit must be masked in the features supported by the accelerator. As a side effect, this avoids setting up the 0x80000008 CPUID leaf for qemu-system-i386 -cpu host which since commit 5a140b255d ("x86/cpu: Use max host physical address if -cpu max option is applied") would have printed this error: qemu-system-i386: phys-bits should be between 32 and 36 (but is 48) Reported-by: Nathan Chancellor <natechancellor@gmail.com> Tested-by: Nathan Chancellor <natechancellor@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, bool migratable_only) { FeatureWordInfo *wi = &feature_word_info[w]; uint64_t r = 0; if (kvm_enabled()) { switch (wi->type) { case CPUID_FEATURE_WORD: r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, wi->cpuid.ecx, wi->cpuid.reg); break; case MSR_FEATURE_WORD: r = kvm_arch_get_supported_msr_feature(kvm_state, wi->msr.index); break; } } else if (hvf_enabled()) { if (wi->type != CPUID_FEATURE_WORD) { return 0; } r = hvf_get_supported_cpuid(wi->cpuid.eax, wi->cpuid.ecx, wi->cpuid.reg); } else if (tcg_enabled()) { r = wi->tcg_features; } else { return ~0; } #ifndef TARGET_X86_64 if (w == FEAT_8000_0001_EDX) { r &= ~CPUID_EXT2_LM; } #endif if (migratable_only) { r &= x86_cpu_get_migratable_flags(w); } return r; }