id
stringlengths 22
26
| content
stringlengths 72
142k
|
|---|---|
devign_test_set_data_3
|
int ff_get_wav_header(AVFormatContext *s, AVIOContext *pb,
AVCodecContext *codec, int size, int big_endian)
{
int id;
uint64_t bitrate;
if (size < 14) {
avpriv_request_sample(codec, "wav header size < 14");
return AVERROR_INVALIDDATA;
}
codec->codec_type = AVMEDIA_TYPE_AUDIO;
if (!big_endian) {
id = avio_rl16(pb);
if (id != 0x0165) {
codec->channels = avio_rl16(pb);
codec->sample_rate = avio_rl32(pb);
bitrate = avio_rl32(pb) * 8LL;
codec->block_align = avio_rl16(pb);
}
} else {
id = avio_rb16(pb);
codec->channels = avio_rb16(pb);
codec->sample_rate = avio_rb32(pb);
bitrate = avio_rb32(pb) * 8LL;
codec->block_align = avio_rb16(pb);
}
if (size == 14) { /* We're dealing with plain vanilla WAVEFORMAT */
codec->bits_per_coded_sample = 8;
} else {
if (!big_endian) {
codec->bits_per_coded_sample = avio_rl16(pb);
} else {
codec->bits_per_coded_sample = avio_rb16(pb);
}
}
if (id == 0xFFFE) {
codec->codec_tag = 0;
} else {
codec->codec_tag = id;
codec->codec_id = ff_wav_codec_get_id(id,
codec->bits_per_coded_sample);
}
if (size >= 18 && id != 0x0165) { /* We're obviously dealing with WAVEFORMATEX */
int cbSize = avio_rl16(pb); /* cbSize */
if (big_endian) {
avpriv_report_missing_feature(codec, "WAVEFORMATEX support for RIFX files\n");
return AVERROR_PATCHWELCOME;
}
size -= 18;
cbSize = FFMIN(size, cbSize);
if (cbSize >= 22 && id == 0xfffe) { /* WAVEFORMATEXTENSIBLE */
parse_waveformatex(pb, codec);
cbSize -= 22;
size -= 22;
}
if (cbSize > 0) {
av_freep(&codec->extradata);
if (ff_get_extradata(codec, pb, cbSize) < 0)
return AVERROR(ENOMEM);
size -= cbSize;
}
/* It is possible for the chunk to contain garbage at the end */
if (size > 0)
avio_skip(pb, size);
} else if (id == 0x0165 && size >= 32) {
int nb_streams, i;
size -= 4;
av_freep(&codec->extradata);
if (ff_get_extradata(codec, pb, size) < 0)
return AVERROR(ENOMEM);
nb_streams = AV_RL16(codec->extradata + 4);
codec->sample_rate = AV_RL32(codec->extradata + 12);
codec->channels = 0;
bitrate = 0;
if (size < 8 + nb_streams * 20)
return AVERROR_INVALIDDATA;
for (i = 0; i < nb_streams; i++)
codec->channels += codec->extradata[8 + i * 20 + 17];
}
if (bitrate > INT_MAX) {
if (s->error_recognition & AV_EF_EXPLODE) {
av_log(s, AV_LOG_ERROR,
"The bitrate %"PRIu64" is too large.\n",
bitrate);
return AVERROR_INVALIDDATA;
} else {
av_log(s, AV_LOG_WARNING,
"The bitrate %"PRIu64" is too large, resetting to 0.",
bitrate);
codec->bit_rate = 0;
}
} else {
codec->bit_rate = bitrate;
}
if (codec->sample_rate <= 0) {
av_log(s, AV_LOG_ERROR,
"Invalid sample rate: %d\n", codec->sample_rate);
return AVERROR_INVALIDDATA;
}
if (codec->codec_id == AV_CODEC_ID_AAC_LATM) {
/* Channels and sample_rate values are those prior to applying SBR
* and/or PS. */
codec->channels = 0;
codec->sample_rate = 0;
}
/* override bits_per_coded_sample for G.726 */
if (codec->codec_id == AV_CODEC_ID_ADPCM_G726 && codec->sample_rate)
codec->bits_per_coded_sample = codec->bit_rate / codec->sample_rate;
return 0;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_12
|
static int xen_9pfs_connect(struct XenDevice *xendev)
{
int i;
Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
V9fsState *s = &xen_9pdev->state;
QemuOpts *fsdev;
if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
&xen_9pdev->num_rings) == -1 ||
xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
return -1;
}
xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(Xen9pfsRing));
for (i = 0; i < xen_9pdev->num_rings; i++) {
char *str;
int ring_order;
xen_9pdev->rings[i].priv = xen_9pdev;
xen_9pdev->rings[i].evtchn = -1;
xen_9pdev->rings[i].local_port = -1;
str = g_strdup_printf("ring-ref%u", i);
if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
&xen_9pdev->rings[i].ref) == -1) {
goto out;
}
str = g_strdup_printf("event-channel-%u", i);
if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
&xen_9pdev->rings[i].evtchn) == -1) {
goto out;
}
xen_9pdev->rings[i].intf = xengnttab_map_grant_ref(
xen_9pdev->xendev.gnttabdev,
xen_9pdev->xendev.dom,
xen_9pdev->rings[i].ref,
PROT_READ | PROT_WRITE);
if (!xen_9pdev->rings[i].intf) {
goto out;
}
ring_order = xen_9pdev->rings[i].intf->ring_order;
if (ring_order > MAX_RING_ORDER) {
goto out;
}
xen_9pdev->rings[i].ring_order = ring_order;
xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs(
xen_9pdev->xendev.gnttabdev,
(1 << ring_order),
xen_9pdev->xendev.dom,
xen_9pdev->rings[i].intf->ref,
PROT_READ | PROT_WRITE);
if (!xen_9pdev->rings[i].data) {
goto out;
}
xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
XEN_FLEX_RING_SIZE(ring_order);
xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
xen_9pdev->rings[i].out_cons = 0;
xen_9pdev->rings[i].out_size = 0;
xen_9pdev->rings[i].inprogress = false;
xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
if (xen_9pdev->rings[i].evtchndev == NULL) {
goto out;
}
fcntl(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), F_SETFD, FD_CLOEXEC);
xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
(xen_9pdev->rings[i].evtchndev,
xendev->dom,
xen_9pdev->rings[i].evtchn);
if (xen_9pdev->rings[i].local_port == -1) {
xen_pv_printf(xendev, 0,
"xenevtchn_bind_interdomain failed port=%d\n",
xen_9pdev->rings[i].evtchn);
goto out;
}
xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
}
xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");
xen_9pdev->path = xenstore_read_be_str(xendev, "path");
xen_9pdev->id = s->fsconf.fsdev_id =
g_strdup_printf("xen9p%d", xendev->dev);
xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
v9fs_register_transport(s, &xen_9p_transport);
fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
s->fsconf.tag,
1, NULL);
qemu_opt_set(fsdev, "fsdriver", "local", NULL);
qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL);
qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL);
qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
qemu_fsdev_add(fsdev);
v9fs_device_realize_common(s, NULL);
return 0;
out:
xen_9pfs_free(xendev);
return -1;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_35
|
static int subframe_count_exact(FlacEncodeContext *s, FlacSubframe *sub,
int pred_order)
{
int p, porder, psize;
int i, part_end;
int count = 0;
/* subframe header */
count += 8;
/* subframe */
if (sub->type == FLAC_SUBFRAME_CONSTANT) {
count += sub->obits;
} else if (sub->type == FLAC_SUBFRAME_VERBATIM) {
count += s->frame.blocksize * sub->obits;
} else {
/* warm-up samples */
count += pred_order * sub->obits;
/* LPC coefficients */
if (sub->type == FLAC_SUBFRAME_LPC)
count += 4 + 5 + pred_order * s->options.lpc_coeff_precision;
/* rice-encoded block */
count += 2;
/* partition order */
porder = sub->rc.porder;
psize = s->frame.blocksize >> porder;
count += 4;
/* residual */
i = pred_order;
part_end = psize;
for (p = 0; p < 1 << porder; p++) {
int k = sub->rc.params[p];
count += 4;
count += rice_count_exact(&sub->residual[i], part_end - i, k);
i = part_end;
part_end = FFMIN(s->frame.blocksize, part_end + psize);
}
}
return count;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_43
|
static void ppc_spapr_init(QEMUMachineInitArgs *args)
{
ram_addr_t ram_size = args->ram_size;
const char *cpu_model = args->cpu_model;
const char *kernel_filename = args->kernel_filename;
const char *kernel_cmdline = args->kernel_cmdline;
const char *initrd_filename = args->initrd_filename;
const char *boot_device = args->boot_order;
PowerPCCPU *cpu;
CPUPPCState *env;
PCIHostState *phb;
int i;
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
hwaddr rma_alloc_size;
uint32_t initrd_base = 0;
long kernel_size = 0, initrd_size = 0;
long load_limit, rtas_limit, fw_size;
bool kernel_le = false;
char *filename;
msi_supported = true;
spapr = g_malloc0(sizeof(*spapr));
QLIST_INIT(&spapr->phbs);
cpu_ppc_hypercall = emulate_spapr_hypercall;
/* Allocate RMA if necessary */
rma_alloc_size = kvmppc_alloc_rma("ppc_spapr.rma", sysmem);
if (rma_alloc_size == -1) {
hw_error("qemu: Unable to create RMA\n");
exit(1);
}
if (rma_alloc_size && (rma_alloc_size < ram_size)) {
spapr->rma_size = rma_alloc_size;
} else {
spapr->rma_size = ram_size;
/* With KVM, we don't actually know whether KVM supports an
* unbounded RMA (PR KVM) or is limited by the hash table size
* (HV KVM using VRMA), so we always assume the latter
*
* In that case, we also limit the initial allocations for RTAS
* etc... to 256M since we have no way to know what the VRMA size
* is going to be as it depends on the size of the hash table
* isn't determined yet.
*/
if (kvm_enabled()) {
spapr->vrma_adjust = 1;
spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
}
}
/* We place the device tree and RTAS just below either the top of the RMA,
* or just below 2GB, whichever is lowere, so that it can be
* processed with 32-bit real mode code if necessary */
rtas_limit = MIN(spapr->rma_size, 0x80000000);
spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE;
spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE;
load_limit = spapr->fdt_addr - FW_OVERHEAD;
/* We aim for a hash table of size 1/128 the size of RAM. The
* normal rule of thumb is 1/64 the size of RAM, but that's much
* more than needed for the Linux guests we support. */
spapr->htab_shift = 18; /* Minimum architected size */
while (spapr->htab_shift <= 46) {
if ((1ULL << (spapr->htab_shift + 7)) >= ram_size) {
break;
}
spapr->htab_shift++;
}
/* Set up Interrupt Controller before we create the VCPUs */
spapr->icp = xics_system_init(smp_cpus * kvmppc_smt_threads() / smp_threads,
XICS_IRQS);
spapr->next_irq = XICS_IRQ_BASE;
/* init CPUs */
if (cpu_model == NULL) {
cpu_model = kvm_enabled() ? "host" : "POWER7";
}
for (i = 0; i < smp_cpus; i++) {
cpu = cpu_ppc_init(cpu_model);
if (cpu == NULL) {
fprintf(stderr, "Unable to find PowerPC CPU definition\n");
exit(1);
}
env = &cpu->env;
xics_cpu_setup(spapr->icp, cpu);
/* Set time-base frequency to 512 MHz */
cpu_ppc_tb_init(env, TIMEBASE_FREQ);
/* PAPR always has exception vectors in RAM not ROM. To ensure this,
* MSR[IP] should never be set.
*/
env->msr_mask &= ~(1 << 6);
/* Tell KVM that we're in PAPR mode */
if (kvm_enabled()) {
kvmppc_set_papr(cpu);
}
qemu_register_reset(spapr_cpu_reset, cpu);
}
/* allocate RAM */
spapr->ram_limit = ram_size;
if (spapr->ram_limit > rma_alloc_size) {
ram_addr_t nonrma_base = rma_alloc_size;
ram_addr_t nonrma_size = spapr->ram_limit - rma_alloc_size;
memory_region_init_ram(ram, NULL, "ppc_spapr.ram", nonrma_size);
vmstate_register_ram_global(ram);
memory_region_add_subregion(sysmem, nonrma_base, ram);
}
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
spapr->rtas_size = load_image_targphys(filename, spapr->rtas_addr,
rtas_limit - spapr->rtas_addr);
if (spapr->rtas_size < 0) {
hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
exit(1);
}
if (spapr->rtas_size > RTAS_MAX_SIZE) {
hw_error("RTAS too big ! 0x%lx bytes (max is 0x%x)\n",
spapr->rtas_size, RTAS_MAX_SIZE);
exit(1);
}
g_free(filename);
/* Set up EPOW events infrastructure */
spapr_events_init(spapr);
/* Set up VIO bus */
spapr->vio_bus = spapr_vio_bus_init();
for (i = 0; i < MAX_SERIAL_PORTS; i++) {
if (serial_hds[i]) {
spapr_vty_create(spapr->vio_bus, serial_hds[i]);
}
}
/* We always have at least the nvram device on VIO */
spapr_create_nvram(spapr);
/* Set up PCI */
spapr_pci_msi_init(spapr, SPAPR_PCI_MSI_WINDOW);
spapr_pci_rtas_init();
phb = spapr_create_phb(spapr, 0);
for (i = 0; i < nb_nics; i++) {
NICInfo *nd = &nd_table[i];
if (!nd->model) {
nd->model = g_strdup("ibmveth");
}
if (strcmp(nd->model, "ibmveth") == 0) {
spapr_vlan_create(spapr->vio_bus, nd);
} else {
pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
}
}
for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
spapr_vscsi_create(spapr->vio_bus);
}
/* Graphics */
if (spapr_vga_init(phb->bus)) {
spapr->has_graphics = true;
}
if (usb_enabled(spapr->has_graphics)) {
pci_create_simple(phb->bus, -1, "pci-ohci");
if (spapr->has_graphics) {
usbdevice_create("keyboard");
usbdevice_create("mouse");
}
}
if (spapr->rma_size < (MIN_RMA_SLOF << 20)) {
fprintf(stderr, "qemu: pSeries SLOF firmware requires >= "
"%ldM guest RMA (Real Mode Area memory)\n", MIN_RMA_SLOF);
exit(1);
}
if (kernel_filename) {
uint64_t lowaddr = 0;
kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL,
NULL, &lowaddr, NULL, 1, ELF_MACHINE, 0);
if (kernel_size < 0) {
kernel_size = load_elf(kernel_filename,
translate_kernel_address, NULL,
NULL, &lowaddr, NULL, 0, ELF_MACHINE, 0);
kernel_le = kernel_size > 0;
}
if (kernel_size < 0) {
kernel_size = load_image_targphys(kernel_filename,
KERNEL_LOAD_ADDR,
load_limit - KERNEL_LOAD_ADDR);
}
if (kernel_size < 0) {
fprintf(stderr, "qemu: could not load kernel '%s'\n",
kernel_filename);
exit(1);
}
/* load initrd */
if (initrd_filename) {
/* Try to locate the initrd in the gap between the kernel
* and the firmware. Add a bit of space just in case
*/
initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff;
initrd_size = load_image_targphys(initrd_filename, initrd_base,
load_limit - initrd_base);
if (initrd_size < 0) {
fprintf(stderr, "qemu: could not load initial ram disk '%s'\n",
initrd_filename);
exit(1);
}
} else {
initrd_base = 0;
initrd_size = 0;
}
}
if (bios_name == NULL) {
bios_name = FW_FILE_NAME;
}
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
if (fw_size < 0) {
hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
exit(1);
}
g_free(filename);
spapr->entry_point = 0x100;
vmstate_register(NULL, 0, &vmstate_spapr, spapr);
register_savevm_live(NULL, "spapr/htab", -1, 1,
&savevm_htab_handlers, spapr);
/* Prepare the device tree */
spapr->fdt_skel = spapr_create_fdt_skel(cpu_model,
initrd_base, initrd_size,
kernel_size, kernel_le,
boot_device, kernel_cmdline,
spapr->epow_irq);
assert(spapr->fdt_skel != NULL);
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_60
|
static int mpeg1_decode_sequence(AVCodecContext *avctx,
UINT8 *buf, int buf_size)
{
Mpeg1Context *s1 = avctx->priv_data;
MpegEncContext *s = &s1->mpeg_enc_ctx;
int width, height, i, v, j;
float aspect;
init_get_bits(&s->gb, buf, buf_size);
width = get_bits(&s->gb, 12);
height = get_bits(&s->gb, 12);
s->aspect_ratio_info= get_bits(&s->gb, 4);
if(!s->mpeg2){
aspect= mpeg1_aspect[s->aspect_ratio_info];
if(aspect!=0.0) avctx->aspect_ratio= width/(aspect*height);
}
s->frame_rate_index = get_bits(&s->gb, 4);
if (s->frame_rate_index == 0)
return -1;
s->bit_rate = get_bits(&s->gb, 18) * 400;
if (get_bits1(&s->gb) == 0) /* marker */
return -1;
if (width <= 0 || height <= 0 ||
(width % 2) != 0 || (height % 2) != 0)
return -1;
if (width != s->width ||
height != s->height) {
/* start new mpeg1 context decoding */
s->out_format = FMT_MPEG1;
if (s1->mpeg_enc_ctx_allocated) {
MPV_common_end(s);
}
s->width = width;
s->height = height;
avctx->has_b_frames= 1;
s->avctx = avctx;
avctx->width = width;
avctx->height = height;
if (s->frame_rate_index >= 9) {
/* at least give a valid frame rate (some old mpeg1 have this) */
avctx->frame_rate = 25 * FRAME_RATE_BASE;
} else {
avctx->frame_rate = frame_rate_tab[s->frame_rate_index];
}
s->frame_rate = avctx->frame_rate;
avctx->bit_rate = s->bit_rate;
if (MPV_common_init(s) < 0)
return -1;
s1->mpeg_enc_ctx_allocated = 1;
}
skip_bits(&s->gb, 10); /* vbv_buffer_size */
skip_bits(&s->gb, 1);
/* get matrix */
if (get_bits1(&s->gb)) {
for(i=0;i<64;i++) {
v = get_bits(&s->gb, 8);
j = s->intra_scantable.permutated[i];
s->intra_matrix[j] = v;
s->chroma_intra_matrix[j] = v;
}
#ifdef DEBUG
dprintf("intra matrix present\n");
for(i=0;i<64;i++)
dprintf(" %d", s->intra_matrix[s->intra_scantable.permutated[i]]);
printf("\n");
#endif
} else {
for(i=0;i<64;i++) {
int j= s->idct_permutation[i];
v = ff_mpeg1_default_intra_matrix[i];
s->intra_matrix[j] = v;
s->chroma_intra_matrix[j] = v;
}
}
if (get_bits1(&s->gb)) {
for(i=0;i<64;i++) {
v = get_bits(&s->gb, 8);
j = s->intra_scantable.permutated[i];
s->inter_matrix[j] = v;
s->chroma_inter_matrix[j] = v;
}
#ifdef DEBUG
dprintf("non intra matrix present\n");
for(i=0;i<64;i++)
dprintf(" %d", s->inter_matrix[s->intra_scantable.permutated[i]]);
printf("\n");
#endif
} else {
for(i=0;i<64;i++) {
int j= s->idct_permutation[i];
v = ff_mpeg1_default_non_intra_matrix[i];
s->inter_matrix[j] = v;
s->chroma_inter_matrix[j] = v;
}
}
/* we set mpeg2 parameters so that it emulates mpeg1 */
s->progressive_sequence = 1;
s->progressive_frame = 1;
s->picture_structure = PICT_FRAME;
s->frame_pred_frame_dct = 1;
s->mpeg2 = 0;
avctx->sub_id = 1; /* indicates mpeg1 */
return 0;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_70
|
static uint32_t drc_set_unusable(sPAPRDRConnector *drc)
{
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_UNUSABLE;
if (drc->awaiting_release) {
uint32_t drc_index = spapr_drc_index(drc);
trace_spapr_drc_set_allocation_state_finalizing(drc_index);
spapr_drc_detach(drc);
}
return RTAS_OUT_SUCCESS;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_83
|
static void scsi_read_request(SCSIDiskReq *r)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
if (r->sector_count == (uint32_t)-1) {
DPRINTF("Read buf_len=%zd\n", r->iov.iov_len);
r->sector_count = 0;
scsi_req_data(&r->req, r->iov.iov_len);
return;
}
DPRINTF("Read sector_count=%d\n", r->sector_count);
if (r->sector_count == 0) {
scsi_command_complete(r, GOOD, NO_SENSE);
return;
}
/* No data transfer may already be in progress */
assert(r->req.aiocb == NULL);
n = r->sector_count;
if (n > SCSI_DMA_BUF_SIZE / 512)
n = SCSI_DMA_BUF_SIZE / 512;
r->iov.iov_len = n * 512;
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n,
scsi_read_complete, r);
if (r->req.aiocb == NULL) {
scsi_read_complete(r, -EIO);
}
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_84
|
static void lm32_evr_init(MachineState *machine)
{
const char *cpu_model = machine->cpu_model;
const char *kernel_filename = machine->kernel_filename;
LM32CPU *cpu;
CPULM32State *env;
DriveInfo *dinfo;
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *phys_ram = g_new(MemoryRegion, 1);
qemu_irq irq[32];
ResetInfo *reset_info;
int i;
/* memory map */
hwaddr flash_base = 0x04000000;
size_t flash_sector_size = 256 * 1024;
size_t flash_size = 32 * 1024 * 1024;
hwaddr ram_base = 0x08000000;
size_t ram_size = 64 * 1024 * 1024;
hwaddr timer0_base = 0x80002000;
hwaddr uart0_base = 0x80006000;
hwaddr timer1_base = 0x8000a000;
int uart0_irq = 0;
int timer0_irq = 1;
int timer1_irq = 3;
reset_info = g_malloc0(sizeof(ResetInfo));
if (cpu_model == NULL) {
cpu_model = "lm32-full";
}
cpu = LM32_CPU(cpu_generic_init(TYPE_LM32_CPU, cpu_model));
if (cpu == NULL) {
fprintf(stderr, "qemu: unable to find CPU '%s'\n", cpu_model);
exit(1);
}
env = &cpu->env;
reset_info->cpu = cpu;
reset_info->flash_base = flash_base;
memory_region_allocate_system_memory(phys_ram, NULL, "lm32_evr.sdram",
ram_size);
memory_region_add_subregion(address_space_mem, ram_base, phys_ram);
dinfo = drive_get(IF_PFLASH, 0, 0);
/* Spansion S29NS128P */
pflash_cfi02_register(flash_base, NULL, "lm32_evr.flash", flash_size,
dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
flash_sector_size, flash_size / flash_sector_size,
1, 2, 0x01, 0x7e, 0x43, 0x00, 0x555, 0x2aa, 1);
/* create irq lines */
env->pic_state = lm32_pic_init(qemu_allocate_irq(cpu_irq_handler, cpu, 0));
for (i = 0; i < 32; i++) {
irq[i] = qdev_get_gpio_in(env->pic_state, i);
}
lm32_uart_create(uart0_base, irq[uart0_irq], serial_hds[0]);
sysbus_create_simple("lm32-timer", timer0_base, irq[timer0_irq]);
sysbus_create_simple("lm32-timer", timer1_base, irq[timer1_irq]);
/* make sure juart isn't the first chardev */
env->juart_state = lm32_juart_init(serial_hds[1]);
reset_info->bootstrap_pc = flash_base;
if (kernel_filename) {
uint64_t entry;
int kernel_size;
kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, NULL, NULL,
1, EM_LATTICEMICO32, 0, 0);
reset_info->bootstrap_pc = entry;
if (kernel_size < 0) {
kernel_size = load_image_targphys(kernel_filename, ram_base,
ram_size);
reset_info->bootstrap_pc = ram_base;
}
if (kernel_size < 0) {
fprintf(stderr, "qemu: could not load kernel '%s'\n",
kernel_filename);
exit(1);
}
}
qemu_register_reset(main_cpu_reset, reset_info);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_108
|
aio_write_f(int argc, char **argv)
{
char *p;
int count = 0;
int nr_iov, i, c;
int pattern = 0xcd;
struct aio_ctx *ctx = calloc(1, sizeof(struct aio_ctx));
BlockDriverAIOCB *acb;
while ((c = getopt(argc, argv, "CqP:")) != EOF) {
switch (c) {
case 'C':
ctx->Cflag = 1;
break;
case 'q':
ctx->qflag = 1;
break;
case 'P':
pattern = atoi(optarg);
break;
default:
return command_usage(&aio_write_cmd);
}
}
if (optind > argc - 2)
return command_usage(&aio_write_cmd);
ctx->offset = cvtnum(argv[optind]);
if (ctx->offset < 0) {
printf("non-numeric length argument -- %s\n", argv[optind]);
return 0;
}
optind++;
if (ctx->offset & 0x1ff) {
printf("offset %lld is not sector aligned\n",
(long long)ctx->offset);
return 0;
}
if (count & 0x1ff) {
printf("count %d is not sector aligned\n",
count);
return 0;
}
for (i = optind; i < argc; i++) {
size_t len;
len = cvtnum(argv[optind]);
if (len < 0) {
printf("non-numeric length argument -- %s\n", argv[i]);
return 0;
}
count += len;
}
nr_iov = argc - optind;
qemu_iovec_init(&ctx->qiov, nr_iov);
ctx->buf = p = qemu_io_alloc(count, pattern);
for (i = 0; i < nr_iov; i++) {
size_t len;
len = cvtnum(argv[optind]);
if (len < 0) {
printf("non-numeric length argument -- %s\n",
argv[optind]);
return 0;
}
qemu_iovec_add(&ctx->qiov, p, len);
p += len;
optind++;
}
gettimeofday(&ctx->t1, NULL);
acb = bdrv_aio_writev(bs, ctx->offset >> 9, &ctx->qiov,
ctx->qiov.size >> 9, aio_write_done, ctx);
if (!acb)
return -EIO;
return 0;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_121
|
static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
{
int i;
int dc = block[0];
const uint8_t *cm;
dc = (3 * dc + 1) >> 1;
dc = (3 * dc + 16) >> 5;
cm = ff_cropTbl + MAX_NEG_CROP + dc;
for(i = 0; i < 8; i++){
dest[0] = cm[dest[0]];
dest[1] = cm[dest[1]];
dest[2] = cm[dest[2]];
dest[3] = cm[dest[3]];
dest[4] = cm[dest[4]];
dest[5] = cm[dest[5]];
dest[6] = cm[dest[6]];
dest[7] = cm[dest[7]];
dest += linesize;
}
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_122
|
static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
const char *desc_file_path)
{
int ret;
char access[11];
char type[11];
char fname[512];
const char *p = desc;
int64_t sectors = 0;
int64_t flat_offset;
char extent_path[PATH_MAX];
BlockDriverState *extent_file;
Error *local_err = NULL;
while (*p) {
/* parse extent line:
* RW [size in sectors] FLAT "file-name.vmdk" OFFSET
* or
* RW [size in sectors] SPARSE "file-name.vmdk"
*/
flat_offset = -1;
ret = sscanf(p, "%10s %" SCNd64 " %10s \"%511[^\n\r\"]\" %" SCNd64,
access, §ors, type, fname, &flat_offset);
if (ret < 4 || strcmp(access, "RW")) {
goto next_line;
} else if (!strcmp(type, "FLAT")) {
if (ret != 5 || flat_offset < 0) {
return -EINVAL;
}
} else if (ret != 4) {
return -EINVAL;
}
if (sectors <= 0 ||
(strcmp(type, "FLAT") && strcmp(type, "SPARSE") &&
strcmp(type, "VMFS") && strcmp(type, "VMFSSPARSE")) ||
(strcmp(access, "RW"))) {
goto next_line;
}
path_combine(extent_path, sizeof(extent_path),
desc_file_path, fname);
ret = bdrv_file_open(&extent_file, extent_path, NULL, bs->open_flags,
&local_err);
if (ret) {
qerror_report_err(local_err);
error_free(local_err);
return ret;
}
/* save to extents array */
if (!strcmp(type, "FLAT") || !strcmp(type, "VMFS")) {
/* FLAT extent */
VmdkExtent *extent;
ret = vmdk_add_extent(bs, extent_file, true, sectors,
0, 0, 0, 0, sectors, &extent);
if (ret < 0) {
return ret;
}
extent->flat_start_offset = flat_offset << 9;
} else if (!strcmp(type, "SPARSE") || !strcmp(type, "VMFSSPARSE")) {
/* SPARSE extent and VMFSSPARSE extent are both "COWD" sparse file*/
ret = vmdk_open_sparse(bs, extent_file, bs->open_flags);
if (ret) {
bdrv_unref(extent_file);
return ret;
}
} else {
fprintf(stderr,
"VMDK: Not supported extent type \"%s\""".\n", type);
return -ENOTSUP;
}
next_line:
/* move to next line */
while (*p && *p != '\n') {
p++;
}
p++;
}
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_149
|
static void compute_rematrixing_strategy(AC3EncodeContext *s)
{
int nb_coefs;
int blk, bnd, i;
AC3Block *block, *block0;
s->num_rematrixing_bands = 4;
if (s->rematrixing & AC3_REMATRIXING_IS_STATIC)
return;
nb_coefs = FFMIN(s->nb_coefs[0], s->nb_coefs[1]);
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
block = &s->blocks[blk];
block->new_rematrixing_strategy = !blk;
for (bnd = 0; bnd < s->num_rematrixing_bands; bnd++) {
/* calculate calculate sum of squared coeffs for one band in one block */
int start = ff_ac3_rematrix_band_tab[bnd];
int end = FFMIN(nb_coefs, ff_ac3_rematrix_band_tab[bnd+1]);
CoefSumType sum[4] = {0,};
for (i = start; i < end; i++) {
CoefType lt = block->mdct_coef[0][i];
CoefType rt = block->mdct_coef[1][i];
CoefType md = lt + rt;
CoefType sd = lt - rt;
sum[0] += lt * lt;
sum[1] += rt * rt;
sum[2] += md * md;
sum[3] += sd * sd;
}
/* compare sums to determine if rematrixing will be used for this band */
if (FFMIN(sum[2], sum[3]) < FFMIN(sum[0], sum[1]))
block->rematrixing_flags[bnd] = 1;
else
block->rematrixing_flags[bnd] = 0;
/* determine if new rematrixing flags will be sent */
if (blk &&
block->rematrixing_flags[bnd] != block0->rematrixing_flags[bnd]) {
block->new_rematrixing_strategy = 1;
}
}
block0 = block;
}
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_164
|
void OPPROTO op_udiv_T1_T0(void)
{
uint64_t x0;
uint32_t x1;
x0 = T0 | ((uint64_t) (env->y) << 32);
x1 = T1;
x0 = x0 / x1;
if (x0 > 0xffffffff) {
T0 = 0xffffffff;
T1 = 1;
} else {
T0 = x0;
T1 = 0;
FORCE_RET();
The vulnerability label is: Vulnerable
|
devign_test_set_data_176
|
void cpu_x86_init_mmu(CPUX86State *env)
{
a20_enabled = 1;
a20_mask = 0xffffffff;
last_pg_state = -1;
cpu_x86_update_cr0(env);
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_178
|
int qemu_cpu_self(void *env)
{
return 1;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_179
|
static void net_vhost_link_down(VhostUserState *s, bool link_down)
{
s->nc.link_down = link_down;
if (s->nc.peer) {
s->nc.peer->link_down = link_down;
}
if (s->nc.info->link_status_changed) {
s->nc.info->link_status_changed(&s->nc);
}
if (s->nc.peer && s->nc.peer->info->link_status_changed) {
s->nc.peer->info->link_status_changed(s->nc.peer);
}
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_185
|
static int rv34_decode_mv(RV34DecContext *r, int block_type)
{
MpegEncContext *s = &r->s;
GetBitContext *gb = &s->gb;
int i, j, k, l;
int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
int next_bt;
memset(r->dmv, 0, sizeof(r->dmv));
for(i = 0; i < num_mvs[block_type]; i++){
r->dmv[i][0] = svq3_get_se_golomb(gb);
r->dmv[i][1] = svq3_get_se_golomb(gb);
}
switch(block_type){
case RV34_MB_TYPE_INTRA:
case RV34_MB_TYPE_INTRA16x16:
ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
return 0;
case RV34_MB_SKIP:
if(s->pict_type == AV_PICTURE_TYPE_P){
ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
break;
}
case RV34_MB_B_DIRECT:
//surprisingly, it uses motion scheme from next reference frame
/* wait for the current mb row to be finished */
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
ff_thread_await_progress(&s->next_picture_ptr->f, s->mb_y - 1, 0);
next_bt = s->next_picture_ptr->f.mb_type[s->mb_x + s->mb_y * s->mb_stride];
if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
ZERO8x2(s->current_picture_ptr->f.motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
}else
for(j = 0; j < 2; j++)
for(i = 0; i < 2; i++)
for(k = 0; k < 2; k++)
for(l = 0; l < 2; l++)
s->current_picture_ptr->f.motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][k]);
if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
rv34_mc_2mv(r, block_type);
else
rv34_mc_2mv_skip(r);
ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
break;
case RV34_MB_P_16x16:
case RV34_MB_P_MIX16x16:
rv34_pred_mv(r, block_type, 0, 0);
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
break;
case RV34_MB_B_FORWARD:
case RV34_MB_B_BACKWARD:
r->dmv[1][0] = r->dmv[0][0];
r->dmv[1][1] = r->dmv[0][1];
if(r->rv30)
rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
else
rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
break;
case RV34_MB_P_16x8:
case RV34_MB_P_8x16:
rv34_pred_mv(r, block_type, 0, 0);
rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
if(block_type == RV34_MB_P_16x8){
rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
}
if(block_type == RV34_MB_P_8x16){
rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
}
break;
case RV34_MB_B_BIDIR:
rv34_pred_mv_b (r, block_type, 0);
rv34_pred_mv_b (r, block_type, 1);
rv34_mc_2mv (r, block_type);
break;
case RV34_MB_P_8x8:
for(i=0;i< 4;i++){
rv34_pred_mv(r, block_type, i, i);
rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
}
break;
}
return 0;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_200
|
static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
const char *replaces,
int64_t speed, uint32_t granularity,
int64_t buf_size,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
bool unmap,
BlockCompletionFunc *cb,
void *opaque, Error **errp,
const BlockJobDriver *driver,
bool is_none_mode, BlockDriverState *base)
{
MirrorBlockJob *s;
if (granularity == 0) {
granularity = bdrv_get_default_bitmap_granularity(target);
}
assert ((granularity & (granularity - 1)) == 0);
if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
(!bs->blk || !blk_iostatus_is_enabled(bs->blk))) {
error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
return;
}
if (buf_size < 0) {
error_setg(errp, "Invalid parameter 'buf-size'");
return;
}
if (buf_size == 0) {
buf_size = DEFAULT_MIRROR_BUF_SIZE;
}
/* We can't support this case as long as the block layer can't handle
* multiple BlockBackends per BlockDriverState. */
if (replaces) {
replaced_bs = bdrv_lookup_bs(replaces, replaces, errp);
if (replaced_bs == NULL) {
return;
}
} else {
replaced_bs = bs;
}
if (replaced_bs->blk && target->blk) {
error_setg(errp, "Can't create node with two BlockBackends");
return;
}
s = block_job_create(driver, bs, speed, cb, opaque, errp);
if (!s) {
return;
}
s->replaces = g_strdup(replaces);
s->on_source_error = on_source_error;
s->on_target_error = on_target_error;
s->target = target;
s->is_none_mode = is_none_mode;
s->base = base;
s->granularity = granularity;
s->buf_size = ROUND_UP(buf_size, granularity);
s->unmap = unmap;
s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
if (!s->dirty_bitmap) {
g_free(s->replaces);
block_job_unref(&s->common);
return;
}
bdrv_op_block_all(s->target, s->common.blocker);
bdrv_set_enable_write_cache(s->target, true);
if (s->target->blk) {
blk_set_on_error(s->target->blk, on_target_error, on_target_error);
blk_iostatus_enable(s->target->blk);
}
s->common.co = qemu_coroutine_create(mirror_run);
trace_mirror_start(bs, s, s->common.co, opaque);
qemu_coroutine_enter(s->common.co, s);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_218
|
START_TEST(qint_get_int_test)
{
QInt *qi;
const int value = 123456;
qi = qint_from_int(value);
fail_unless(qint_get_int(qi) == value);
QDECREF(qi);
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_244
|
static int read_code_table(CLLCContext *ctx, GetBitContext *gb, VLC *vlc)
{
uint8_t symbols[256];
uint8_t bits[256];
uint16_t codes[256];
int num_lens, num_codes, num_codes_sum, prefix;
int i, j, count;
prefix = 0;
count = 0;
num_codes_sum = 0;
num_lens = get_bits(gb, 5);
for (i = 0; i < num_lens; i++) {
num_codes = get_bits(gb, 9);
num_codes_sum += num_codes;
if (num_codes_sum > 256) {
av_log(ctx->avctx, AV_LOG_ERROR,
"Too many VLCs (%d) to be read.\n", num_codes_sum);
for (j = 0; j < num_codes; j++) {
symbols[count] = get_bits(gb, 8);
bits[count] = i + 1;
codes[count] = prefix++;
count++;
if (prefix > (65535 - 256)/2) {
prefix <<= 1;
return ff_init_vlc_sparse(vlc, VLC_BITS, count, bits, 1, 1,
codes, 2, 2, symbols, 1, 1, 0);
The vulnerability label is: Vulnerable
|
devign_test_set_data_245
|
static int cbs_h265_read_nal_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
{
BitstreamContext bc;
int err;
err = bitstream_init(&bc, unit->data, 8 * unit->data_size);
if (err < 0)
return err;
switch (unit->type) {
case HEVC_NAL_VPS:
{
H265RawVPS *vps;
vps = av_mallocz(sizeof(*vps));
if (!vps)
return AVERROR(ENOMEM);
err = cbs_h265_read_vps(ctx, &bc, vps);
if (err >= 0)
err = cbs_h265_replace_vps(ctx, vps);
if (err < 0) {
av_free(vps);
return err;
}
unit->content = vps;
}
break;
case HEVC_NAL_SPS:
{
H265RawSPS *sps;
sps = av_mallocz(sizeof(*sps));
if (!sps)
return AVERROR(ENOMEM);
err = cbs_h265_read_sps(ctx, &bc, sps);
if (err >= 0)
err = cbs_h265_replace_sps(ctx, sps);
if (err < 0) {
av_free(sps);
return err;
}
unit->content = sps;
}
break;
case HEVC_NAL_PPS:
{
H265RawPPS *pps;
pps = av_mallocz(sizeof(*pps));
if (!pps)
return AVERROR(ENOMEM);
err = cbs_h265_read_pps(ctx, &bc, pps);
if (err >= 0)
err = cbs_h265_replace_pps(ctx, pps);
if (err < 0) {
av_free(pps);
return err;
}
unit->content = pps;
}
break;
case HEVC_NAL_TRAIL_N:
case HEVC_NAL_TRAIL_R:
case HEVC_NAL_TSA_N:
case HEVC_NAL_TSA_R:
case HEVC_NAL_STSA_N:
case HEVC_NAL_STSA_R:
case HEVC_NAL_RADL_N:
case HEVC_NAL_RADL_R:
case HEVC_NAL_RASL_N:
case HEVC_NAL_RASL_R:
case HEVC_NAL_BLA_W_LP:
case HEVC_NAL_BLA_W_RADL:
case HEVC_NAL_BLA_N_LP:
case HEVC_NAL_IDR_W_RADL:
case HEVC_NAL_IDR_N_LP:
case HEVC_NAL_CRA_NUT:
{
H265RawSlice *slice;
int pos, len;
slice = av_mallocz(sizeof(*slice));
if (!slice)
return AVERROR(ENOMEM);
err = cbs_h265_read_slice_segment_header(ctx, &bc, &slice->header);
if (err < 0) {
av_free(slice);
return err;
}
pos = bitstream_tell(&bc);
len = unit->data_size;
if (!unit->data[len - 1]) {
int z;
for (z = 0; z < len && !unit->data[len - z - 1]; z++);
av_log(ctx->log_ctx, AV_LOG_DEBUG, "Deleted %d trailing zeroes "
"from slice data.\n", z);
len -= z;
}
slice->data_size = len - pos / 8;
slice->data = av_malloc(slice->data_size);
if (!slice->data) {
av_free(slice);
return AVERROR(ENOMEM);
}
memcpy(slice->data,
unit->data + pos / 8, slice->data_size);
slice->data_bit_start = pos % 8;
unit->content = slice;
}
break;
case HEVC_NAL_AUD:
{
H265RawAUD *aud;
aud = av_mallocz(sizeof(*aud));
if (!aud)
return AVERROR(ENOMEM);
err = cbs_h265_read_aud(ctx, &bc, aud);
if (err < 0) {
av_free(aud);
return err;
}
unit->content = aud;
}
break;
default:
return AVERROR(ENOSYS);
}
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_246
|
static void qpa_audio_fini (void *opaque)
{
(void) opaque;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_248
|
static void usbredir_bulk_packet(void *priv, uint32_t id,
struct usb_redir_bulk_packet_header *bulk_packet,
uint8_t *data, int data_len)
{
USBRedirDevice *dev = priv;
uint8_t ep = bulk_packet->endpoint;
int len = bulk_packet->length;
AsyncURB *aurb;
DPRINTF("bulk-in status %d ep %02X len %d id %u\n", bulk_packet->status,
ep, len, id);
aurb = async_find(dev, id);
if (!aurb) {
free(data);
return;
}
if (aurb->bulk_packet.endpoint != bulk_packet->endpoint ||
aurb->bulk_packet.stream_id != bulk_packet->stream_id) {
ERROR("return bulk packet mismatch, please report this!\n");
len = USB_RET_NAK;
}
if (aurb->packet) {
len = usbredir_handle_status(dev, bulk_packet->status, len);
if (len > 0) {
usbredir_log_data(dev, "bulk data in:", data, data_len);
if (data_len <= aurb->packet->len) {
memcpy(aurb->packet->data, data, data_len);
} else {
ERROR("bulk buffer too small (%d > %d)\n", data_len,
aurb->packet->len);
len = USB_RET_STALL;
}
}
aurb->packet->len = len;
usb_packet_complete(&dev->dev, aurb->packet);
}
async_free(dev, aurb);
free(data);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_262
|
static av_cold int hevc_init_context(AVCodecContext *avctx)
{
HEVCContext *s = avctx->priv_data;
int i;
s->avctx = avctx;
s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
if (!s->HEVClc)
goto fail;
s->HEVClcList[0] = s->HEVClc;
s->sList[0] = s;
s->cabac_state = av_malloc(HEVC_CONTEXTS);
if (!s->cabac_state)
goto fail;
s->output_frame = av_frame_alloc();
if (!s->output_frame)
goto fail;
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
s->DPB[i].frame = av_frame_alloc();
if (!s->DPB[i].frame)
goto fail;
s->DPB[i].tf.f = s->DPB[i].frame;
}
s->max_ra = INT_MAX;
s->md5_ctx = av_md5_alloc();
if (!s->md5_ctx)
goto fail;
ff_bswapdsp_init(&s->bdsp);
s->context_initialized = 1;
s->eos = 0;
return 0;
fail:
hevc_decode_free(avctx);
return AVERROR(ENOMEM);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_265
|
static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s,
uint32_t length)
{
int n, i, r, g, b;
if ((length % 3) != 0 || length > 256 * 3)
return AVERROR_INVALIDDATA;
/* read the palette */
n = length / 3;
for (i = 0; i < n; i++) {
r = bytestream2_get_byte(&s->gb);
g = bytestream2_get_byte(&s->gb);
b = bytestream2_get_byte(&s->gb);
s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | b;
}
for (; i < 256; i++)
s->palette[i] = (0xFFU << 24);
s->state |= PNG_PLTE;
bytestream2_skip(&s->gb, 4); /* crc */
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_272
|
static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size,
MemTxAttrs attrs)
{
int ret = 0;
MSIMessage from = {0}, to = {0};
from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST;
from.data = (uint32_t) value;
ret = vtd_interrupt_remap_msi(opaque, &from, &to);
if (ret) {
/* TODO: report error */
VTD_DPRINTF(GENERAL, "int remap fail for addr 0x%"PRIx64
" data 0x%"PRIx32, from.address, from.data);
/* Drop this interrupt */
return MEMTX_ERROR;
}
VTD_DPRINTF(IR, "delivering MSI 0x%"PRIx64":0x%"PRIx32
" for device sid 0x%04x",
to.address, to.data, sid);
if (dma_memory_write(&address_space_memory, to.address,
&to.data, size)) {
VTD_DPRINTF(GENERAL, "error: fail to write 0x%"PRIx64
" value 0x%"PRIx32, to.address, to.data);
}
return MEMTX_OK;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_280
|
void st_flush_trace_buffer(void)
{
if (trace_file_enabled) {
flush_trace_file();
}
/* Discard written trace records */
trace_idx = 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_289
|
VirtIODevice *virtio_blk_init(DeviceState *dev, BlockConf *conf,
char **serial)
{
VirtIOBlock *s;
int cylinders, heads, secs;
static int virtio_blk_id;
DriveInfo *dinfo;
if (!conf->bs) {
error_report("virtio-blk-pci: drive property not set");
return NULL;
}
if (!bdrv_is_inserted(conf->bs)) {
error_report("Device needs media, but drive is empty");
return NULL;
}
if (!*serial) {
/* try to fall back to value set with legacy -drive serial=... */
dinfo = drive_get_by_blockdev(conf->bs);
if (*dinfo->serial) {
*serial = strdup(dinfo->serial);
}
}
s = (VirtIOBlock *)virtio_common_init("virtio-blk", VIRTIO_ID_BLOCK,
sizeof(struct virtio_blk_config),
sizeof(VirtIOBlock));
s->vdev.get_config = virtio_blk_update_config;
s->vdev.get_features = virtio_blk_get_features;
s->vdev.reset = virtio_blk_reset;
s->bs = conf->bs;
s->conf = conf;
s->serial = *serial;
s->rq = NULL;
s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1;
bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs);
s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output);
qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
s->qdev = dev;
register_savevm(dev, "virtio-blk", virtio_blk_id++, 2,
virtio_blk_save, virtio_blk_load, s);
bdrv_set_dev_ops(s->bs, &virtio_block_ops, s);
bdrv_set_buffer_alignment(s->bs, conf->logical_block_size);
bdrv_iostatus_enable(s->bs);
add_boot_device_path(conf->bootindex, dev, "/disk@0,0");
return &s->vdev;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_304
|
static size_t header_ext_add(char *buf, uint32_t magic, const void *s,
size_t len, size_t buflen)
{
QCowExtension *ext_backing_fmt = (QCowExtension*) buf;
size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7);
if (buflen < ext_len) {
return -ENOSPC;
}
*ext_backing_fmt = (QCowExtension) {
.magic = cpu_to_be32(magic),
.len = cpu_to_be32(len),
};
memcpy(buf + sizeof(QCowExtension), s, len);
return ext_len;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_305
|
static int mov_read_strf(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
if (c->fc->nb_streams < 1)
return 0;
if (atom.size <= 40)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
if ((uint64_t)atom.size > (1<<30))
return AVERROR_INVALIDDATA;
av_free(st->codec->extradata);
st->codec->extradata = av_mallocz(atom.size - 40 + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
st->codec->extradata_size = atom.size - 40;
avio_skip(pb, 40);
avio_read(pb, st->codec->extradata, atom.size - 40);
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_321
|
static void mem_begin(MemoryListener *listener)
{
AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
d->phys_map.ptr = PHYS_MAP_NODE_NIL;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_325
|
static uint32_t tight_palette_buf2rgb(int bpp, const uint8_t *buf)
{
uint32_t rgb = 0;
if (bpp == 32) {
rgb |= ((buf[0] & ~1) | !((buf[4] >> 3) & 1)) << 24;
rgb |= ((buf[1] & ~1) | !((buf[4] >> 2) & 1)) << 16;
rgb |= ((buf[2] & ~1) | !((buf[4] >> 1) & 1)) << 8;
rgb |= ((buf[3] & ~1) | !((buf[4] >> 0) & 1)) << 0;
}
if (bpp == 16) {
rgb |= ((buf[0] & ~1) | !((buf[2] >> 1) & 1)) << 8;
rgb |= ((buf[1] & ~1) | !((buf[2] >> 0) & 1)) << 0;
}
return rgb;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_338
|
int net_init_tap(const Netdev *netdev, const char *name,
NetClientState *peer, Error **errp)
{
const NetdevTapOptions *tap;
int fd, vnet_hdr = 0, i = 0, queues;
/* for the no-fd, no-helper case */
const char *script = NULL; /* suppress wrong "uninit'd use" gcc warning */
const char *downscript = NULL;
Error *err = NULL;
const char *vhostfdname;
char ifname[128];
assert(netdev->type == NET_CLIENT_DRIVER_TAP);
tap = &netdev->u.tap;
queues = tap->has_queues ? tap->queues : 1;
vhostfdname = tap->has_vhostfd ? tap->vhostfd : NULL;
/* QEMU vlans does not support multiqueue tap, in this case peer is set.
* For -netdev, peer is always NULL. */
if (peer && (tap->has_queues || tap->has_fds || tap->has_vhostfds)) {
error_setg(errp, "Multiqueue tap cannot be used with QEMU vlans");
return -1;
}
if (tap->has_fd) {
if (tap->has_ifname || tap->has_script || tap->has_downscript ||
tap->has_vnet_hdr || tap->has_helper || tap->has_queues ||
tap->has_fds || tap->has_vhostfds) {
error_setg(errp, "ifname=, script=, downscript=, vnet_hdr=, "
"helper=, queues=, fds=, and vhostfds= "
"are invalid with fd=");
return -1;
}
fd = monitor_fd_param(cur_mon, tap->fd, &err);
if (fd == -1) {
error_propagate(errp, err);
return -1;
}
fcntl(fd, F_SETFL, O_NONBLOCK);
vnet_hdr = tap_probe_vnet_hdr(fd);
net_init_tap_one(tap, peer, "tap", name, NULL,
script, downscript,
vhostfdname, vnet_hdr, fd, &err);
if (err) {
error_propagate(errp, err);
return -1;
}
} else if (tap->has_fds) {
char **fds = g_new0(char *, MAX_TAP_QUEUES);
char **vhost_fds = g_new0(char *, MAX_TAP_QUEUES);
int nfds, nvhosts;
if (tap->has_ifname || tap->has_script || tap->has_downscript ||
tap->has_vnet_hdr || tap->has_helper || tap->has_queues ||
tap->has_vhostfd) {
error_setg(errp, "ifname=, script=, downscript=, vnet_hdr=, "
"helper=, queues=, and vhostfd= "
"are invalid with fds=");
return -1;
}
nfds = get_fds(tap->fds, fds, MAX_TAP_QUEUES);
if (tap->has_vhostfds) {
nvhosts = get_fds(tap->vhostfds, vhost_fds, MAX_TAP_QUEUES);
if (nfds != nvhosts) {
error_setg(errp, "The number of fds passed does not match "
"the number of vhostfds passed");
goto free_fail;
}
}
for (i = 0; i < nfds; i++) {
fd = monitor_fd_param(cur_mon, fds[i], &err);
if (fd == -1) {
error_propagate(errp, err);
goto free_fail;
}
fcntl(fd, F_SETFL, O_NONBLOCK);
if (i == 0) {
vnet_hdr = tap_probe_vnet_hdr(fd);
} else if (vnet_hdr != tap_probe_vnet_hdr(fd)) {
error_setg(errp,
"vnet_hdr not consistent across given tap fds");
goto free_fail;
}
net_init_tap_one(tap, peer, "tap", name, ifname,
script, downscript,
tap->has_vhostfds ? vhost_fds[i] : NULL,
vnet_hdr, fd, &err);
if (err) {
error_propagate(errp, err);
goto free_fail;
}
}
g_free(fds);
g_free(vhost_fds);
return 0;
free_fail:
for (i = 0; i < nfds; i++) {
g_free(fds[i]);
g_free(vhost_fds[i]);
}
g_free(fds);
g_free(vhost_fds);
return -1;
} else if (tap->has_helper) {
if (tap->has_ifname || tap->has_script || tap->has_downscript ||
tap->has_vnet_hdr || tap->has_queues || tap->has_vhostfds) {
error_setg(errp, "ifname=, script=, downscript=, vnet_hdr=, "
"queues=, and vhostfds= are invalid with helper=");
return -1;
}
fd = net_bridge_run_helper(tap->helper,
tap->has_br ?
tap->br : DEFAULT_BRIDGE_INTERFACE,
errp);
if (fd == -1) {
return -1;
}
fcntl(fd, F_SETFL, O_NONBLOCK);
vnet_hdr = tap_probe_vnet_hdr(fd);
net_init_tap_one(tap, peer, "bridge", name, ifname,
script, downscript, vhostfdname,
vnet_hdr, fd, &err);
if (err) {
error_propagate(errp, err);
close(fd);
return -1;
}
} else {
if (tap->has_vhostfds) {
error_setg(errp, "vhostfds= is invalid if fds= wasn't specified");
return -1;
}
script = tap->has_script ? tap->script : DEFAULT_NETWORK_SCRIPT;
downscript = tap->has_downscript ? tap->downscript :
DEFAULT_NETWORK_DOWN_SCRIPT;
if (tap->has_ifname) {
pstrcpy(ifname, sizeof ifname, tap->ifname);
} else {
ifname[0] = '\0';
}
for (i = 0; i < queues; i++) {
fd = net_tap_init(tap, &vnet_hdr, i >= 1 ? "no" : script,
ifname, sizeof ifname, queues > 1, errp);
if (fd == -1) {
return -1;
}
if (queues > 1 && i == 0 && !tap->has_ifname) {
if (tap_fd_get_ifname(fd, ifname)) {
error_setg(errp, "Fail to get ifname");
close(fd);
return -1;
}
}
net_init_tap_one(tap, peer, "tap", name, ifname,
i >= 1 ? "no" : script,
i >= 1 ? "no" : downscript,
vhostfdname, vnet_hdr, fd, &err);
if (err) {
error_propagate(errp, err);
close(fd);
return -1;
}
}
}
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_369
|
void stream_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, const char *backing_file_str,
int64_t speed, BlockdevOnError on_error,
BlockCompletionFunc *cb, void *opaque, Error **errp)
{
StreamBlockJob *s;
s = block_job_create(job_id, &stream_job_driver, bs, speed,
cb, opaque, errp);
if (!s) {
return;
}
s->base = base;
s->backing_file_str = g_strdup(backing_file_str);
s->on_error = on_error;
s->common.co = qemu_coroutine_create(stream_run);
trace_stream_start(bs, base, s, s->common.co, opaque);
qemu_coroutine_enter(s->common.co, s);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_372
|
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
int *got_picture_ptr,
AVPacket *avpkt)
{
int ret;
*got_picture_ptr = 0;
if ((avctx->coded_width || avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx))
return -1;
avctx->pkt = avpkt;
apply_param_change(avctx, avpkt);
if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) {
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr,
avpkt);
else {
ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
avpkt);
picture->pkt_dts = avpkt->dts;
picture->sample_aspect_ratio = avctx->sample_aspect_ratio;
picture->width = avctx->width;
picture->height = avctx->height;
picture->format = avctx->pix_fmt;
}
emms_c(); //needed to avoid an emms_c() call before every return;
if (*got_picture_ptr)
avctx->frame_number++;
} else
ret = 0;
/* many decoders assign whole AVFrames, thus overwriting extended_data;
* make sure it's set correctly */
picture->extended_data = picture->data;
return ret;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_373
|
static int omap_gpio_init(SysBusDevice *sbd)
{
DeviceState *dev = DEVICE(sbd);
struct omap_gpif_s *s = OMAP1_GPIO(dev);
if (!s->clk) {
hw_error("omap-gpio: clk not connected\n");
}
qdev_init_gpio_in(dev, omap_gpio_set, 16);
qdev_init_gpio_out(dev, s->omap1.handler, 16);
sysbus_init_irq(sbd, &s->omap1.irq);
memory_region_init_io(&s->iomem, OBJECT(s), &omap_gpio_ops, &s->omap1,
"omap.gpio", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_391
|
void omap_mcbsp_i2s_attach(struct omap_mcbsp_s *s, I2SCodec *slave)
{
s->codec = slave;
slave->rx_swallow = qemu_allocate_irqs(omap_mcbsp_i2s_swallow, s, 1)[0];
slave->tx_start = qemu_allocate_irqs(omap_mcbsp_i2s_start, s, 1)[0];
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_399
|
int floatx80_eq(floatx80 a, floatx80 b, float_status *status)
{
if ( ( ( extractFloatx80Exp( a ) == 0x7FFF )
&& (uint64_t) ( extractFloatx80Frac( a )<<1 ) )
|| ( ( extractFloatx80Exp( b ) == 0x7FFF )
&& (uint64_t) ( extractFloatx80Frac( b )<<1 ) )
) {
float_raise(float_flag_invalid, status);
return 0;
}
return
( a.low == b.low )
&& ( ( a.high == b.high )
|| ( ( a.low == 0 )
&& ( (uint16_t) ( ( a.high | b.high )<<1 ) == 0 ) )
);
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_402
|
void json_lexer_init(JSONLexer *lexer, JSONLexerEmitter func)
{
lexer->emit = func;
lexer->state = IN_START;
lexer->token = qstring_new();
lexer->x = lexer->y = 0;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_404
|
BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque, int type)
{
struct qemu_paiocb *acb;
acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
if (!acb)
return NULL;
acb->aio_type = type;
acb->aio_fildes = fd;
acb->ev_signo = SIGUSR2;
acb->async_context_id = get_async_context_id();
if (qiov) {
acb->aio_iov = qiov->iov;
acb->aio_niov = qiov->niov;
}
acb->aio_nbytes = nb_sectors * 512;
acb->aio_offset = sector_num * 512;
acb->next = posix_aio_state->first_aio;
posix_aio_state->first_aio = acb;
trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
qemu_paio_submit(acb);
return &acb->common;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_436
|
static int crypto_open(URLContext *h, const char *uri, int flags)
{
const char *nested_url;
int ret;
CryptoContext *c = h->priv_data;
if (!av_strstart(uri, "crypto+", &nested_url) &&
!av_strstart(uri, "crypto:", &nested_url)) {
av_log(h, AV_LOG_ERROR, "Unsupported url %s\n", uri);
ret = AVERROR(EINVAL);
goto err;
}
if (c->keylen < BLOCKSIZE || c->ivlen < BLOCKSIZE) {
av_log(h, AV_LOG_ERROR, "Key or IV not set\n");
ret = AVERROR(EINVAL);
goto err;
}
if (flags & AVIO_FLAG_WRITE) {
av_log(h, AV_LOG_ERROR, "Only decryption is supported currently\n");
ret = AVERROR(ENOSYS);
goto err;
}
if ((ret = ffurl_open(&c->hd, nested_url, AVIO_FLAG_READ)) < 0) {
av_log(h, AV_LOG_ERROR, "Unable to open input\n");
goto err;
}
c->aes = av_mallocz(av_aes_size);
if (!c->aes) {
ret = AVERROR(ENOMEM);
goto err;
}
av_aes_init(c->aes, c->key, 128, 1);
h->is_streamed = 1;
return 0;
err:
av_free(c->key);
av_free(c->iv);
return ret;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_439
|
int ff_alloc_entries(AVCodecContext *avctx, int count)
{
int i;
if (avctx->active_thread_type & FF_THREAD_SLICE) {
SliceThreadContext *p = avctx->internal->thread_ctx;
p->thread_count = avctx->thread_count;
p->entries = av_mallocz_array(count, sizeof(int));
if (!p->entries) {
return AVERROR(ENOMEM);
}
p->entries_count = count;
p->progress_mutex = av_malloc_array(p->thread_count, sizeof(pthread_mutex_t));
p->progress_cond = av_malloc_array(p->thread_count, sizeof(pthread_cond_t));
for (i = 0; i < p->thread_count; i++) {
pthread_mutex_init(&p->progress_mutex[i], NULL);
pthread_cond_init(&p->progress_cond[i], NULL);
}
}
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_444
|
static int floppy_probe_device(const char *filename)
{
int fd, ret;
int prio = 0;
struct floppy_struct fdparam;
struct stat st;
if (strstart(filename, "/dev/fd", NULL) &&
!strstart(filename, "/dev/fdset/", NULL)) {
prio = 50;
}
fd = qemu_open(filename, O_RDONLY | O_NONBLOCK);
if (fd < 0) {
goto out;
}
ret = fstat(fd, &st);
if (ret == -1 || !S_ISBLK(st.st_mode)) {
goto outc;
}
/* Attempt to detect via a floppy specific ioctl */
ret = ioctl(fd, FDGETPRM, &fdparam);
if (ret >= 0)
prio = 100;
outc:
qemu_close(fd);
out:
return prio;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_461
|
static void sun4uv_init(ram_addr_t RAM_size,
const char *boot_devices,
const char *kernel_filename, const char *kernel_cmdline,
const char *initrd_filename, const char *cpu_model,
const struct hwdef *hwdef)
{
CPUState *env;
char *filename;
m48t59_t *nvram;
int ret, linux_boot;
unsigned int i;
ram_addr_t ram_offset, prom_offset;
long initrd_size, kernel_size;
PCIBus *pci_bus, *pci_bus2, *pci_bus3;
QEMUBH *bh;
qemu_irq *irq;
int drive_index;
BlockDriverState *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
BlockDriverState *fd[MAX_FD];
void *fw_cfg;
ResetData *reset_info;
linux_boot = (kernel_filename != NULL);
/* init CPUs */
if (!cpu_model)
cpu_model = hwdef->default_cpu_model;
env = cpu_init(cpu_model);
if (!env) {
fprintf(stderr, "Unable to find Sparc CPU definition\n");
exit(1);
}
bh = qemu_bh_new(tick_irq, env);
env->tick = ptimer_init(bh);
ptimer_set_period(env->tick, 1ULL);
bh = qemu_bh_new(stick_irq, env);
env->stick = ptimer_init(bh);
ptimer_set_period(env->stick, 1ULL);
bh = qemu_bh_new(hstick_irq, env);
env->hstick = ptimer_init(bh);
ptimer_set_period(env->hstick, 1ULL);
reset_info = qemu_mallocz(sizeof(ResetData));
reset_info->env = env;
reset_info->reset_addr = hwdef->prom_addr + 0x40ULL;
qemu_register_reset(main_cpu_reset, reset_info);
main_cpu_reset(reset_info);
// Override warm reset address with cold start address
env->pc = hwdef->prom_addr + 0x20ULL;
env->npc = env->pc + 4;
/* allocate RAM */
ram_offset = qemu_ram_alloc(RAM_size);
cpu_register_physical_memory(0, RAM_size, ram_offset);
prom_offset = qemu_ram_alloc(PROM_SIZE_MAX);
cpu_register_physical_memory(hwdef->prom_addr,
(PROM_SIZE_MAX + TARGET_PAGE_SIZE) &
TARGET_PAGE_MASK,
prom_offset | IO_MEM_ROM);
if (bios_name == NULL)
bios_name = PROM_FILENAME;
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
if (filename) {
ret = load_elf(filename, hwdef->prom_addr - PROM_VADDR,
NULL, NULL, NULL);
if (ret < 0) {
ret = load_image_targphys(filename, hwdef->prom_addr,
(PROM_SIZE_MAX + TARGET_PAGE_SIZE) &
TARGET_PAGE_MASK);
}
qemu_free(filename);
} else {
ret = -1;
}
if (ret < 0) {
fprintf(stderr, "qemu: could not load prom '%s'\n",
bios_name);
exit(1);
}
kernel_size = 0;
initrd_size = 0;
if (linux_boot) {
/* XXX: put correct offset */
kernel_size = load_elf(kernel_filename, 0, NULL, NULL, NULL);
if (kernel_size < 0)
kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR,
ram_size - KERNEL_LOAD_ADDR);
if (kernel_size < 0)
kernel_size = load_image_targphys(kernel_filename,
KERNEL_LOAD_ADDR,
ram_size - KERNEL_LOAD_ADDR);
if (kernel_size < 0) {
fprintf(stderr, "qemu: could not load kernel '%s'\n",
kernel_filename);
exit(1);
}
/* load initrd */
if (initrd_filename) {
initrd_size = load_image_targphys(initrd_filename,
INITRD_LOAD_ADDR,
ram_size - INITRD_LOAD_ADDR);
if (initrd_size < 0) {
fprintf(stderr, "qemu: could not load initial ram disk '%s'\n",
initrd_filename);
exit(1);
}
}
if (initrd_size > 0) {
for (i = 0; i < 64 * TARGET_PAGE_SIZE; i += TARGET_PAGE_SIZE) {
if (ldl_phys(KERNEL_LOAD_ADDR + i) == 0x48647253) { // HdrS
stl_phys(KERNEL_LOAD_ADDR + i + 16, INITRD_LOAD_ADDR);
stl_phys(KERNEL_LOAD_ADDR + i + 20, initrd_size);
break;
}
}
}
}
pci_bus = pci_apb_init(APB_SPECIAL_BASE, APB_MEM_BASE, NULL, &pci_bus2,
&pci_bus3);
isa_mem_base = VGA_BASE;
pci_vga_init(pci_bus, 0, 0);
// XXX Should be pci_bus3
pci_ebus_init(pci_bus, -1);
i = 0;
if (hwdef->console_serial_base) {
serial_mm_init(hwdef->console_serial_base, 0, NULL, 115200,
serial_hds[i], 1);
i++;
}
for(; i < MAX_SERIAL_PORTS; i++) {
if (serial_hds[i]) {
serial_init(serial_io[i], NULL/*serial_irq[i]*/, 115200,
serial_hds[i]);
}
}
for(i = 0; i < MAX_PARALLEL_PORTS; i++) {
if (parallel_hds[i]) {
parallel_init(parallel_io[i], NULL/*parallel_irq[i]*/,
parallel_hds[i]);
}
}
for(i = 0; i < nb_nics; i++)
pci_nic_init(&nd_table[i], "ne2k_pci", NULL);
irq = qemu_allocate_irqs(cpu_set_irq, env, MAX_PILS);
if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) {
fprintf(stderr, "qemu: too many IDE bus\n");
exit(1);
}
for(i = 0; i < MAX_IDE_BUS * MAX_IDE_DEVS; i++) {
drive_index = drive_get_index(IF_IDE, i / MAX_IDE_DEVS,
i % MAX_IDE_DEVS);
if (drive_index != -1)
hd[i] = drives_table[drive_index].bdrv;
else
hd[i] = NULL;
}
pci_cmd646_ide_init(pci_bus, hd, 1);
/* FIXME: wire up interrupts. */
i8042_init(NULL/*1*/, NULL/*12*/, 0x60);
for(i = 0; i < MAX_FD; i++) {
drive_index = drive_get_index(IF_FLOPPY, 0, i);
if (drive_index != -1)
fd[i] = drives_table[drive_index].bdrv;
else
fd[i] = NULL;
}
floppy_controller = fdctrl_init(NULL/*6*/, 2, 0, 0x3f0, fd);
nvram = m48t59_init(NULL/*8*/, 0, 0x0074, NVRAM_SIZE, 59);
sun4u_NVRAM_set_params(nvram, NVRAM_SIZE, "Sun4u", RAM_size, boot_devices,
KERNEL_LOAD_ADDR, kernel_size,
kernel_cmdline,
INITRD_LOAD_ADDR, initrd_size,
/* XXX: need an option to load a NVRAM image */
0,
graphic_width, graphic_height, graphic_depth,
(uint8_t *)&nd_table[0].macaddr);
fw_cfg = fw_cfg_init(BIOS_CFG_IOPORT, BIOS_CFG_IOPORT + 1, 0, 0);
fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id);
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, KERNEL_LOAD_ADDR);
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
if (kernel_cmdline) {
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, CMDLINE_ADDR);
pstrcpy_targphys(CMDLINE_ADDR, TARGET_PAGE_SIZE, kernel_cmdline);
} else {
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0);
}
fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, INITRD_LOAD_ADDR);
fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size);
fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, boot_devices[0]);
qemu_register_boot_set(fw_cfg_boot_set, fw_cfg);
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_474
|
static void fpu_init (CPUMIPSState *env, const mips_def_t *def)
{
int i;
for (i = 0; i < MIPS_FPU_MAX; i++)
env->fpus[i].fcr0 = def->CP1_fcr0;
memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu));
if (env->user_mode_only) {
if (env->CP0_Config1 & (1 << CP0C1_FP))
env->hflags |= MIPS_HFLAG_FPU;
#ifdef TARGET_MIPS64
if (env->active_fpu.fcr0 & (1 << FCR0_F64))
env->hflags |= MIPS_HFLAG_F64;
#endif
}
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_490
|
static int bfi_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data, *buf_end = avpkt->data + avpkt->size;
int buf_size = avpkt->size;
BFIContext *bfi = avctx->priv_data;
uint8_t *dst = bfi->dst;
uint8_t *src, *dst_offset, colour1, colour2;
uint8_t *frame_end = bfi->dst + avctx->width * avctx->height;
uint32_t *pal;
int i, j, height = avctx->height;
if (bfi->frame.data[0])
avctx->release_buffer(avctx, &bfi->frame);
bfi->frame.reference = 1;
if (avctx->get_buffer(avctx, &bfi->frame) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
/* Set frame parameters and palette, if necessary */
if (!avctx->frame_number) {
bfi->frame.pict_type = AV_PICTURE_TYPE_I;
bfi->frame.key_frame = 1;
/* Setting the palette */
if (avctx->extradata_size > 768) {
av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n");
return -1;
}
pal = (uint32_t *)bfi->frame.data[1];
for (i = 0; i < avctx->extradata_size / 3; i++) {
int shift = 16;
*pal = 0;
for (j = 0; j < 3; j++, shift -= 8)
*pal +=
((avctx->extradata[i * 3 + j] << 2) |
(avctx->extradata[i * 3 + j] >> 4)) << shift;
pal++;
}
bfi->frame.palette_has_changed = 1;
} else {
bfi->frame.pict_type = AV_PICTURE_TYPE_P;
bfi->frame.key_frame = 0;
}
buf += 4; // Unpacked size, not required.
while (dst != frame_end) {
static const uint8_t lentab[4] = { 0, 2, 0, 1 };
unsigned int byte = *buf++, av_uninit(offset);
unsigned int code = byte >> 6;
unsigned int length = byte & ~0xC0;
if (buf >= buf_end) {
av_log(avctx, AV_LOG_ERROR,
"Input resolution larger than actual frame.\n");
return -1;
}
/* Get length and offset(if required) */
if (length == 0) {
if (code == 1) {
length = bytestream_get_byte(&buf);
offset = bytestream_get_le16(&buf);
} else {
length = bytestream_get_le16(&buf);
if (code == 2 && length == 0)
break;
}
} else {
if (code == 1)
offset = bytestream_get_byte(&buf);
}
/* Do boundary check */
if (dst + (length << lentab[code]) > frame_end)
break;
switch (code) {
case 0: //Normal Chain
if (length >= buf_end - buf) {
av_log(avctx, AV_LOG_ERROR, "Frame larger than buffer.\n");
return -1;
}
bytestream_get_buffer(&buf, dst, length);
dst += length;
break;
case 1: //Back Chain
dst_offset = dst - offset;
length *= 4; //Convert dwords to bytes.
if (dst_offset < bfi->dst)
break;
while (length--)
*dst++ = *dst_offset++;
break;
case 2: //Skip Chain
dst += length;
break;
case 3: //Fill Chain
colour1 = bytestream_get_byte(&buf);
colour2 = bytestream_get_byte(&buf);
while (length--) {
*dst++ = colour1;
*dst++ = colour2;
}
break;
}
}
src = bfi->dst;
dst = bfi->frame.data[0];
while (height--) {
memcpy(dst, src, avctx->width);
src += avctx->width;
dst += bfi->frame.linesize[0];
}
*data_size = sizeof(AVFrame);
*(AVFrame *)data = bfi->frame;
return buf_size;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_491
|
DeviceState *qdev_device_add(QemuOpts *opts)
{
ObjectClass *oc;
DeviceClass *dc;
const char *driver, *path, *id;
DeviceState *dev;
BusState *bus = NULL;
Error *err = NULL;
driver = qemu_opt_get(opts, "driver");
if (!driver) {
qerror_report(QERR_MISSING_PARAMETER, "driver");
return NULL;
}
/* find driver */
oc = object_class_by_name(driver);
if (!oc) {
const char *typename = find_typename_by_alias(driver);
if (typename) {
driver = typename;
oc = object_class_by_name(driver);
}
}
if (!object_class_dynamic_cast(oc, TYPE_DEVICE)) {
qerror_report(ERROR_CLASS_GENERIC_ERROR,
"'%s' is not a valid device model name", driver);
return NULL;
}
if (object_class_is_abstract(oc)) {
qerror_report(QERR_INVALID_PARAMETER_VALUE, "driver",
"non-abstract device type");
return NULL;
}
dc = DEVICE_CLASS(oc);
if (dc->cannot_instantiate_with_device_add_yet) {
qerror_report(QERR_INVALID_PARAMETER_VALUE, "driver",
"pluggable device type");
return NULL;
}
/* find bus */
path = qemu_opt_get(opts, "bus");
if (path != NULL) {
bus = qbus_find(path);
if (!bus) {
return NULL;
}
if (!object_dynamic_cast(OBJECT(bus), dc->bus_type)) {
qerror_report(QERR_BAD_BUS_FOR_DEVICE,
driver, object_get_typename(OBJECT(bus)));
return NULL;
}
} else if (dc->bus_type != NULL) {
bus = qbus_find_recursive(sysbus_get_default(), NULL, dc->bus_type);
if (!bus) {
qerror_report(QERR_NO_BUS_FOR_DEVICE,
dc->bus_type, driver);
return NULL;
}
}
if (qdev_hotplug && bus && !bus->allow_hotplug) {
qerror_report(QERR_BUS_NO_HOTPLUG, bus->name);
return NULL;
}
/* create device, set properties */
dev = DEVICE(object_new(driver));
if (bus) {
qdev_set_parent_bus(dev, bus);
}
id = qemu_opts_id(opts);
if (id) {
dev->id = id;
}
if (qemu_opt_foreach(opts, set_property, dev, 1) != 0) {
object_unparent(OBJECT(dev));
object_unref(OBJECT(dev));
return NULL;
}
if (dev->id) {
object_property_add_child(qdev_get_peripheral(), dev->id,
OBJECT(dev), NULL);
} else {
static int anon_count;
gchar *name = g_strdup_printf("device[%d]", anon_count++);
object_property_add_child(qdev_get_peripheral_anon(), name,
OBJECT(dev), NULL);
g_free(name);
}
dev->opts = opts;
object_property_set_bool(OBJECT(dev), true, "realized", &err);
if (err != NULL) {
qerror_report_err(err);
error_free(err);
dev->opts = NULL;
object_unparent(OBJECT(dev));
object_unref(OBJECT(dev));
qerror_report(QERR_DEVICE_INIT_FAILED, driver);
return NULL;
}
return dev;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_501
|
void thread_pool_submit(ThreadPoolFunc *func, void *arg)
{
thread_pool_submit_aio(func, arg, NULL, NULL);
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_510
|
static void pl061_register_devices(void)
{
sysbus_register_dev("pl061", sizeof(pl061_state),
pl061_init_arm);
sysbus_register_dev("pl061_luminary", sizeof(pl061_state),
pl061_init_luminary);
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_539
|
int cpu_exec(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
int ret;
SyncClocks sc;
/* replay_interrupt may need current_cpu */
current_cpu = cpu;
if (cpu_handle_halt(cpu)) {
return EXCP_HALTED;
}
rcu_read_lock();
cc->cpu_exec_enter(cpu);
/* Calculate difference between guest clock and host clock.
* This delay includes the delay of the last cycle, so
* what we have to do is sleep until it is 0. As for the
* advance/delay we gain here, we try to fix it next time.
*/
init_delay_params(&sc, cpu);
/* prepare setjmp context for exception handling */
if (sigsetjmp(cpu->jmp_env, 0) != 0) {
#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
/* Some compilers wrongly smash all local variables after
* siglongjmp. There were bug reports for gcc 4.5.0 and clang.
* Reload essential local variables here for those compilers.
* Newer versions of gcc would complain about this code (-Wclobbered). */
cpu = current_cpu;
cc = CPU_GET_CLASS(cpu);
#else /* buggy compiler */
/* Assert that the compiler does not smash local variables. */
g_assert(cpu == current_cpu);
g_assert(cc == CPU_GET_CLASS(cpu));
#endif /* buggy compiler */
cpu->can_do_io = 1;
tb_lock_reset();
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
}
/* if an exception is pending, we execute it here */
while (!cpu_handle_exception(cpu, &ret)) {
TranslationBlock *last_tb = NULL;
int tb_exit = 0;
while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit);
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(&sc, cpu);
}
}
cc->cpu_exec_exit(cpu);
rcu_read_unlock();
/* fail safe : never use current_cpu outside cpu_exec() */
current_cpu = NULL;
return ret;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_547
|
static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw)
{
int i;
uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
copy_scsw_to_guest(&dest->scsw, &src->scsw);
for (i = 0; i < ARRAY_SIZE(dest->esw); i++) {
dest->esw[i] = cpu_to_be32(src->esw[i]);
}
for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
dest->ecw[i] = cpu_to_be32(src->ecw[i]);
}
/* extended measurements enabled? */
if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
!(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
!(pmcw->chars & PMCW_CHARS_MASK_XMWME)) {
return;
}
/* extended measurements pending? */
if (!(stctl & SCSW_STCTL_STATUS_PEND)) {
return;
}
if ((stctl & SCSW_STCTL_PRIMARY) ||
(stctl == SCSW_STCTL_SECONDARY) ||
((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) {
for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
dest->emw[i] = cpu_to_be32(src->emw[i]);
}
}
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_566
|
static int process_input_packet(InputStream *ist, const AVPacket *pkt)
{
int i;
int got_output;
AVPacket avpkt;
if (ist->next_dts == AV_NOPTS_VALUE)
ist->next_dts = ist->last_dts;
if (pkt == NULL) {
/* EOF handling */
av_init_packet(&avpkt);
avpkt.data = NULL;
avpkt.size = 0;
goto handle_eof;
} else {
avpkt = *pkt;
}
if (pkt->dts != AV_NOPTS_VALUE)
ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
// while we have more to decode or while the decoder did output something on EOF
while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
int ret = 0;
handle_eof:
ist->last_dts = ist->next_dts;
if (avpkt.size && avpkt.size != pkt->size &&
!(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
"Multiple frames in a packet from stream %d\n", pkt->stream_index);
ist->showed_multi_packet_warning = 1;
}
switch (ist->dec_ctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ret = decode_audio (ist, &avpkt, &got_output);
break;
case AVMEDIA_TYPE_VIDEO:
ret = decode_video (ist, &avpkt, &got_output);
if (avpkt.duration)
ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
else if (ist->st->avg_frame_rate.num)
ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
AV_TIME_BASE_Q);
else if (ist->dec_ctx->time_base.num != 0) {
int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
ist->dec_ctx->ticks_per_frame;
ist->next_dts += av_rescale_q(ticks, ist->dec_ctx->time_base, AV_TIME_BASE_Q);
}
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = transcode_subtitles(ist, &avpkt, &got_output);
break;
default:
return -1;
}
if (ret < 0)
return ret;
// touch data and size only if not EOF
if (pkt) {
avpkt.data += ret;
avpkt.size -= ret;
}
if (!got_output) {
continue;
}
}
/* handle stream copy */
if (!ist->decoding_needed) {
ist->last_dts = ist->next_dts;
switch (ist->dec_ctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
ist->dec_ctx->sample_rate;
break;
case AVMEDIA_TYPE_VIDEO:
if (ist->dec_ctx->time_base.num != 0) {
int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
ist->next_dts += ((int64_t)AV_TIME_BASE *
ist->dec_ctx->time_base.num * ticks) /
ist->dec_ctx->time_base.den;
}
break;
}
}
for (i = 0; pkt && i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
if (!check_output_constraints(ist, ost) || ost->encoding_needed)
continue;
do_streamcopy(ist, ost, pkt);
}
return 0;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_569
|
static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
{
CPUX86State *env = cpu->env_ptr;
int b, prefixes;
int shift;
TCGMemOp ot, aflag, dflag;
int modrm, reg, rm, mod, op, opreg, val;
target_ulong next_eip, tval;
int rex_w, rex_r;
target_ulong pc_start = s->base.pc_next;
s->pc_start = s->pc = pc_start;
prefixes = 0;
s->override = -1;
rex_w = -1;
rex_r = 0;
#ifdef TARGET_X86_64
s->rex_x = 0;
s->rex_b = 0;
x86_64_hregs = 0;
#endif
s->rip_offset = 0; /* for relative ip address */
s->vex_l = 0;
s->vex_v = 0;
if (sigsetjmp(s->jmpbuf, 0) != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
return s->pc;
}
next_byte:
b = x86_ldub_code(env, s);
/* Collect prefixes. */
switch (b) {
case 0xf3:
prefixes |= PREFIX_REPZ;
goto next_byte;
case 0xf2:
prefixes |= PREFIX_REPNZ;
goto next_byte;
case 0xf0:
prefixes |= PREFIX_LOCK;
goto next_byte;
case 0x2e:
s->override = R_CS;
goto next_byte;
case 0x36:
s->override = R_SS;
goto next_byte;
case 0x3e:
s->override = R_DS;
goto next_byte;
case 0x26:
s->override = R_ES;
goto next_byte;
case 0x64:
s->override = R_FS;
goto next_byte;
case 0x65:
s->override = R_GS;
goto next_byte;
case 0x66:
prefixes |= PREFIX_DATA;
goto next_byte;
case 0x67:
prefixes |= PREFIX_ADR;
goto next_byte;
#ifdef TARGET_X86_64
case 0x40 ... 0x4f:
if (CODE64(s)) {
/* REX prefix */
rex_w = (b >> 3) & 1;
rex_r = (b & 0x4) << 1;
s->rex_x = (b & 0x2) << 2;
REX_B(s) = (b & 0x1) << 3;
x86_64_hregs = 1; /* select uniform byte register addressing */
goto next_byte;
}
break;
#endif
case 0xc5: /* 2-byte VEX */
case 0xc4: /* 3-byte VEX */
/* VEX prefixes cannot be used except in 32-bit mode.
Otherwise the instruction is LES or LDS. */
if (s->code32 && !s->vm86) {
static const int pp_prefix[4] = {
0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
};
int vex3, vex2 = x86_ldub_code(env, s);
if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
/* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
otherwise the instruction is LES or LDS. */
break;
}
s->pc++;
/* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
| PREFIX_LOCK | PREFIX_DATA)) {
goto illegal_op;
}
#ifdef TARGET_X86_64
if (x86_64_hregs) {
goto illegal_op;
}
#endif
rex_r = (~vex2 >> 4) & 8;
if (b == 0xc5) {
vex3 = vex2;
b = x86_ldub_code(env, s);
} else {
#ifdef TARGET_X86_64
s->rex_x = (~vex2 >> 3) & 8;
s->rex_b = (~vex2 >> 2) & 8;
#endif
vex3 = x86_ldub_code(env, s);
rex_w = (vex3 >> 7) & 1;
switch (vex2 & 0x1f) {
case 0x01: /* Implied 0f leading opcode bytes. */
b = x86_ldub_code(env, s) | 0x100;
break;
case 0x02: /* Implied 0f 38 leading opcode bytes. */
b = 0x138;
break;
case 0x03: /* Implied 0f 3a leading opcode bytes. */
b = 0x13a;
break;
default: /* Reserved for future use. */
goto unknown_op;
}
}
s->vex_v = (~vex3 >> 3) & 0xf;
s->vex_l = (vex3 >> 2) & 1;
prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
}
break;
}
/* Post-process prefixes. */
if (CODE64(s)) {
/* In 64-bit mode, the default data size is 32-bit. Select 64-bit
data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
over 0x66 if both are present. */
dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
/* In 64-bit mode, 0x67 selects 32-bit addressing. */
aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
} else {
/* In 16/32-bit mode, 0x66 selects the opposite data size. */
if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
dflag = MO_32;
} else {
dflag = MO_16;
}
/* In 16/32-bit mode, 0x67 selects the opposite addressing. */
if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
aflag = MO_32;
} else {
aflag = MO_16;
}
}
s->prefix = prefixes;
s->aflag = aflag;
s->dflag = dflag;
/* now check op code */
reswitch:
switch(b) {
case 0x0f:
/**************************/
/* extended op code */
b = x86_ldub_code(env, s) | 0x100;
goto reswitch;
/**************************/
/* arith & logic */
case 0x00 ... 0x05:
case 0x08 ... 0x0d:
case 0x10 ... 0x15:
case 0x18 ... 0x1d:
case 0x20 ... 0x25:
case 0x28 ... 0x2d:
case 0x30 ... 0x35:
case 0x38 ... 0x3d:
{
int op, f, val;
op = (b >> 3) & 7;
f = (b >> 1) & 3;
ot = mo_b_d(b, dflag);
switch(f) {
case 0: /* OP Ev, Gv */
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else if (op == OP_XORL && rm == reg) {
xor_zero:
/* xor reg, reg optimisation */
set_cc_op(s, CC_OP_CLR);
tcg_gen_movi_tl(cpu_T0, 0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
} else {
opreg = rm;
}
gen_op_mov_v_reg(ot, cpu_T1, reg);
gen_op(s, op, ot, opreg);
break;
case 1: /* OP Gv, Ev */
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
} else if (op == OP_XORL && rm == reg) {
goto xor_zero;
} else {
gen_op_mov_v_reg(ot, cpu_T1, rm);
}
gen_op(s, op, ot, reg);
break;
case 2: /* OP A, Iv */
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T1, val);
gen_op(s, op, ot, OR_EAX);
break;
}
}
break;
case 0x82:
if (CODE64(s))
goto illegal_op;
case 0x80: /* GRP1 */
case 0x81:
case 0x83:
{
int val;
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (mod != 3) {
if (b == 0x83)
s->rip_offset = 1;
else
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else {
opreg = rm;
}
switch(b) {
default:
case 0x80:
case 0x81:
case 0x82:
val = insn_get(env, s, ot);
break;
case 0x83:
val = (int8_t)insn_get(env, s, MO_8);
break;
}
tcg_gen_movi_tl(cpu_T1, val);
gen_op(s, op, ot, opreg);
}
break;
/**************************/
/* inc, dec, and other misc arith */
case 0x40 ... 0x47: /* inc Gv */
ot = dflag;
gen_inc(s, ot, OR_EAX + (b & 7), 1);
break;
case 0x48 ... 0x4f: /* dec Gv */
ot = dflag;
gen_inc(s, ot, OR_EAX + (b & 7), -1);
break;
case 0xf6: /* GRP3 */
case 0xf7:
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (mod != 3) {
if (op == 0) {
s->rip_offset = insn_const_size(ot);
}
gen_lea_modrm(env, s, modrm);
/* For those below that handle locked memory, don't load here. */
if (!(s->prefix & PREFIX_LOCK)
|| op != 2) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
}
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
switch(op) {
case 0: /* test */
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T1, val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 2: /* not */
if (s->prefix & PREFIX_LOCK) {
if (mod == 3) {
goto illegal_op;
}
tcg_gen_movi_tl(cpu_T0, ~0);
tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
s->mem_index, ot | MO_LE);
} else {
tcg_gen_not_tl(cpu_T0, cpu_T0);
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
break;
case 3: /* neg */
if (s->prefix & PREFIX_LOCK) {
TCGLabel *label1;
TCGv a0, t0, t1, t2;
if (mod == 3) {
goto illegal_op;
}
a0 = tcg_temp_local_new();
t0 = tcg_temp_local_new();
label1 = gen_new_label();
tcg_gen_mov_tl(a0, cpu_A0);
tcg_gen_mov_tl(t0, cpu_T0);
gen_set_label(label1);
t1 = tcg_temp_new();
t2 = tcg_temp_new();
tcg_gen_mov_tl(t2, t0);
tcg_gen_neg_tl(t1, t0);
tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
s->mem_index, ot | MO_LE);
tcg_temp_free(t1);
tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
tcg_temp_free(t2);
tcg_temp_free(a0);
tcg_gen_mov_tl(cpu_T0, t0);
tcg_temp_free(t0);
} else {
tcg_gen_neg_tl(cpu_T0, cpu_T0);
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
gen_op_update_neg_cc();
set_cc_op(s, CC_OP_SUBB + ot);
break;
case 4: /* mul */
switch(ot) {
case MO_8:
gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
set_cc_op(s, CC_OP_MULB);
break;
case MO_16:
gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
set_cc_op(s, CC_OP_MULW);
break;
default:
case MO_32:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULL);
break;
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
cpu_T0, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULQ);
break;
#endif
}
break;
case 5: /* imul */
switch(ot) {
case MO_8:
gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
set_cc_op(s, CC_OP_MULB);
break;
case MO_16:
gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
set_cc_op(s, CC_OP_MULW);
break;
default:
case MO_32:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
set_cc_op(s, CC_OP_MULL);
break;
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
cpu_T0, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULQ);
break;
#endif
}
break;
case 6: /* div */
switch(ot) {
case MO_8:
gen_helper_divb_AL(cpu_env, cpu_T0);
break;
case MO_16:
gen_helper_divw_AX(cpu_env, cpu_T0);
break;
default:
case MO_32:
gen_helper_divl_EAX(cpu_env, cpu_T0);
break;
#ifdef TARGET_X86_64
case MO_64:
gen_helper_divq_EAX(cpu_env, cpu_T0);
break;
#endif
}
break;
case 7: /* idiv */
switch(ot) {
case MO_8:
gen_helper_idivb_AL(cpu_env, cpu_T0);
break;
case MO_16:
gen_helper_idivw_AX(cpu_env, cpu_T0);
break;
default:
case MO_32:
gen_helper_idivl_EAX(cpu_env, cpu_T0);
break;
#ifdef TARGET_X86_64
case MO_64:
gen_helper_idivq_EAX(cpu_env, cpu_T0);
break;
#endif
}
break;
default:
goto unknown_op;
}
break;
case 0xfe: /* GRP4 */
case 0xff: /* GRP5 */
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (op >= 2 && b == 0xfe) {
goto unknown_op;
}
if (CODE64(s)) {
if (op == 2 || op == 4) {
/* operand size for jumps is 64 bit */
ot = MO_64;
} else if (op == 3 || op == 5) {
ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
} else if (op == 6) {
/* default push size is 64 bit */
ot = mo_pushpop(s, dflag);
}
}
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
if (op >= 2 && op != 3 && op != 5)
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
switch(op) {
case 0: /* inc Ev */
if (mod != 3)
opreg = OR_TMP0;
else
opreg = rm;
gen_inc(s, ot, opreg, 1);
break;
case 1: /* dec Ev */
if (mod != 3)
opreg = OR_TMP0;
else
opreg = rm;
gen_inc(s, ot, opreg, -1);
break;
case 2: /* call Ev */
/* XXX: optimize if memory (no 'and' is necessary) */
if (dflag == MO_16) {
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
next_eip = s->pc - s->cs_base;
tcg_gen_movi_tl(cpu_T1, next_eip);
gen_push_v(s, cpu_T1);
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_jr(s, cpu_T0);
break;
case 3: /* lcall Ev */
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
do_lcall:
if (s->pe && !s->vm86) {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_i32(dflag - 1),
tcg_const_tl(s->pc - s->cs_base));
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
}
tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip));
gen_jr(s, cpu_tmp4);
break;
case 4: /* jmp Ev */
if (dflag == MO_16) {
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_jr(s, cpu_T0);
break;
case 5: /* ljmp Ev */
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
do_ljmp:
if (s->pe && !s->vm86) {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_tl(s->pc - s->cs_base));
} else {
gen_op_movl_seg_T0_vm(R_CS);
gen_op_jmp_v(cpu_T1);
}
tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip));
gen_jr(s, cpu_tmp4);
break;
case 6: /* push Ev */
gen_push_v(s, cpu_T0);
break;
default:
goto unknown_op;
}
break;
case 0x84: /* test Ev, Gv */
case 0x85:
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_v_reg(ot, cpu_T1, reg);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 0xa8: /* test eAX, Iv */
case 0xa9:
ot = mo_b_d(b, dflag);
val = insn_get(env, s, ot);
gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
tcg_gen_movi_tl(cpu_T1, val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 0x98: /* CWDE/CBW */
switch (dflag) {
#ifdef TARGET_X86_64
case MO_64:
gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
break;
#endif
case MO_32:
gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
break;
case MO_16:
gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
break;
default:
tcg_abort();
}
break;
case 0x99: /* CDQ/CWD */
switch (dflag) {
#ifdef TARGET_X86_64
case MO_64:
gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
break;
#endif
case MO_32:
gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
break;
case MO_16:
gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
break;
default:
tcg_abort();
}
break;
case 0x1af: /* imul Gv, Ev */
case 0x69: /* imul Gv, Ev, I */
case 0x6b:
ot = dflag;
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
if (b == 0x69)
s->rip_offset = insn_const_size(ot);
else if (b == 0x6b)
s->rip_offset = 1;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (b == 0x69) {
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T1, val);
} else if (b == 0x6b) {
val = (int8_t)insn_get(env, s, MO_8);
tcg_gen_movi_tl(cpu_T1, val);
} else {
gen_op_mov_v_reg(ot, cpu_T1, reg);
}
switch (ot) {
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
break;
#endif
case MO_32:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
break;
default:
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
}
set_cc_op(s, CC_OP_MULB + ot);
break;
case 0x1c0:
case 0x1c1: /* xadd Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
gen_op_mov_v_reg(ot, cpu_T0, reg);
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(ot, cpu_T1, rm);
tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(ot, reg, cpu_T1);
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
if (s->prefix & PREFIX_LOCK) {
tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0,
s->mem_index, ot | MO_LE);
tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
}
gen_op_mov_reg_v(ot, reg, cpu_T1);
}
gen_op_update2_cc();
set_cc_op(s, CC_OP_ADDB + ot);
break;
case 0x1b0:
case 0x1b1: /* cmpxchg Ev, Gv */
{
TCGv oldv, newv, cmpv;
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
oldv = tcg_temp_new();
newv = tcg_temp_new();
cmpv = tcg_temp_new();
gen_op_mov_v_reg(ot, newv, reg);
tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
if (s->prefix & PREFIX_LOCK) {
if (mod == 3) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv,
s->mem_index, ot | MO_LE);
gen_op_mov_reg_v(ot, R_EAX, oldv);
} else {
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(ot, oldv, rm);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, oldv, cpu_A0);
rm = 0; /* avoid warning */
}
gen_extu(ot, oldv);
gen_extu(ot, cmpv);
/* store value = (old == cmp ? new : old); */
tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
if (mod == 3) {
gen_op_mov_reg_v(ot, R_EAX, oldv);
gen_op_mov_reg_v(ot, rm, newv);
} else {
/* Perform an unconditional store cycle like physical cpu;
must be before changing accumulator to ensure
idempotency if the store faults and the instruction
is restarted */
gen_op_st_v(s, ot, newv, cpu_A0);
gen_op_mov_reg_v(ot, R_EAX, oldv);
}
}
tcg_gen_mov_tl(cpu_cc_src, oldv);
tcg_gen_mov_tl(cpu_cc_srcT, cmpv);
tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
set_cc_op(s, CC_OP_SUBB + ot);
tcg_temp_free(oldv);
tcg_temp_free(newv);
tcg_temp_free(cmpv);
}
break;
case 0x1c7: /* cmpxchg8b */
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
if ((mod == 3) || ((modrm & 0x38) != 0x8))
goto illegal_op;
#ifdef TARGET_X86_64
if (dflag == MO_64) {
if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
goto illegal_op;
gen_lea_modrm(env, s, modrm);
if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else {
gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0);
}
} else
#endif
{
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
gen_lea_modrm(env, s, modrm);
if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
gen_helper_cmpxchg8b(cpu_env, cpu_A0);
} else {
gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0);
}
}
set_cc_op(s, CC_OP_EFLAGS);
break;
/**************************/
/* push/pop */
case 0x50 ... 0x57: /* push */
gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
gen_push_v(s, cpu_T0);
break;
case 0x58 ... 0x5f: /* pop */
ot = gen_pop_T0(s);
/* NOTE: order is important for pop %sp */
gen_pop_update(s, ot);
gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
break;
case 0x60: /* pusha */
if (CODE64(s))
goto illegal_op;
gen_pusha(s);
break;
case 0x61: /* popa */
if (CODE64(s))
goto illegal_op;
gen_popa(s);
break;
case 0x68: /* push Iv */
case 0x6a:
ot = mo_pushpop(s, dflag);
if (b == 0x68)
val = insn_get(env, s, ot);
else
val = (int8_t)insn_get(env, s, MO_8);
tcg_gen_movi_tl(cpu_T0, val);
gen_push_v(s, cpu_T0);
break;
case 0x8f: /* pop Ev */
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
ot = gen_pop_T0(s);
if (mod == 3) {
/* NOTE: order is important for pop %sp */
gen_pop_update(s, ot);
rm = (modrm & 7) | REX_B(s);
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
/* NOTE: order is important too for MMU exceptions */
s->popl_esp_hack = 1 << ot;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
s->popl_esp_hack = 0;
gen_pop_update(s, ot);
}
break;
case 0xc8: /* enter */
{
int level;
val = x86_lduw_code(env, s);
level = x86_ldub_code(env, s);
gen_enter(s, val, level);
}
break;
case 0xc9: /* leave */
gen_leave(s);
break;
case 0x06: /* push es */
case 0x0e: /* push cs */
case 0x16: /* push ss */
case 0x1e: /* push ds */
if (CODE64(s))
goto illegal_op;
gen_op_movl_T0_seg(b >> 3);
gen_push_v(s, cpu_T0);
break;
case 0x1a0: /* push fs */
case 0x1a8: /* push gs */
gen_op_movl_T0_seg((b >> 3) & 7);
gen_push_v(s, cpu_T0);
break;
case 0x07: /* pop es */
case 0x17: /* pop ss */
case 0x1f: /* pop ds */
if (CODE64(s))
goto illegal_op;
reg = b >> 3;
ot = gen_pop_T0(s);
gen_movl_seg_T0(s, reg);
gen_pop_update(s, ot);
/* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
if (s->base.is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
if (reg == R_SS) {
s->tf = 0;
gen_eob_inhibit_irq(s, true);
} else {
gen_eob(s);
}
}
break;
case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */
ot = gen_pop_T0(s);
gen_movl_seg_T0(s, (b >> 3) & 7);
gen_pop_update(s, ot);
if (s->base.is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
/**************************/
/* mov */
case 0x88:
case 0x89: /* mov Gv, Ev */
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0xc6:
case 0xc7: /* mov Ev, Iv */
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
if (mod != 3) {
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm);
}
val = insn_get(env, s, ot);
tcg_gen_movi_tl(cpu_T0, val);
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
}
break;
case 0x8a:
case 0x8b: /* mov Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x8e: /* mov seg, Gv */
modrm = x86_ldub_code(env, s);
reg = (modrm >> 3) & 7;
if (reg >= 6 || reg == R_CS)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_movl_seg_T0(s, reg);
/* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
if (s->base.is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
if (reg == R_SS) {
s->tf = 0;
gen_eob_inhibit_irq(s, true);
} else {
gen_eob(s);
}
}
break;
case 0x8c: /* mov Gv, seg */
modrm = x86_ldub_code(env, s);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (reg >= 6)
goto illegal_op;
gen_op_movl_T0_seg(reg);
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 0x1b6: /* movzbS Gv, Eb */
case 0x1b7: /* movzwS Gv, Eb */
case 0x1be: /* movsbS Gv, Eb */
case 0x1bf: /* movswS Gv, Eb */
{
TCGMemOp d_ot;
TCGMemOp s_ot;
/* d_ot is the size of destination */
d_ot = dflag;
/* ot is the size of source */
ot = (b & 1) + MO_8;
/* s_ot is the sign+size of source */
s_ot = b & 8 ? MO_SIGN | ot : ot;
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
if (s_ot == MO_SB && byte_reg_is_xH(rm)) {
tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8);
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
switch (s_ot) {
case MO_UB:
tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
break;
case MO_SB:
tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
break;
case MO_UW:
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
break;
default:
case MO_SW:
tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
break;
}
}
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
}
}
break;
case 0x8d: /* lea */
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
reg = ((modrm >> 3) & 7) | rex_r;
{
AddressParts a = gen_lea_modrm_0(env, s, modrm);
TCGv ea = gen_lea_modrm_1(a);
gen_lea_v_seg(s, s->aflag, ea, -1, -1);
gen_op_mov_reg_v(dflag, reg, cpu_A0);
}
break;
case 0xa0: /* mov EAX, Ov */
case 0xa1:
case 0xa2: /* mov Ov, EAX */
case 0xa3:
{
target_ulong offset_addr;
ot = mo_b_d(b, dflag);
switch (s->aflag) {
#ifdef TARGET_X86_64
case MO_64:
offset_addr = x86_ldq_code(env, s);
break;
#endif
default:
offset_addr = insn_get(env, s, s->aflag);
break;
}
tcg_gen_movi_tl(cpu_A0, offset_addr);
gen_add_A0_ds_seg(s);
if ((b & 2) == 0) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
} else {
gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
}
}
break;
case 0xd7: /* xlat */
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
gen_extu(s->aflag, cpu_A0);
gen_add_A0_ds_seg(s);
gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
break;
case 0xb0 ... 0xb7: /* mov R, Ib */
val = insn_get(env, s, MO_8);
tcg_gen_movi_tl(cpu_T0, val);
gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
break;
case 0xb8 ... 0xbf: /* mov R, Iv */
#ifdef TARGET_X86_64
if (dflag == MO_64) {
uint64_t tmp;
/* 64 bit case */
tmp = x86_ldq_code(env, s);
reg = (b & 7) | REX_B(s);
tcg_gen_movi_tl(cpu_T0, tmp);
gen_op_mov_reg_v(MO_64, reg, cpu_T0);
} else
#endif
{
ot = dflag;
val = insn_get(env, s, ot);
reg = (b & 7) | REX_B(s);
tcg_gen_movi_tl(cpu_T0, val);
gen_op_mov_reg_v(ot, reg, cpu_T0);
}
break;
case 0x91 ... 0x97: /* xchg R, EAX */
do_xchg_reg_eax:
ot = dflag;
reg = (b & 7) | REX_B(s);
rm = R_EAX;
goto do_xchg_reg;
case 0x86:
case 0x87: /* xchg Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
do_xchg_reg:
gen_op_mov_v_reg(ot, cpu_T0, reg);
gen_op_mov_v_reg(ot, cpu_T1, rm);
gen_op_mov_reg_v(ot, rm, cpu_T0);
gen_op_mov_reg_v(ot, reg, cpu_T1);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_mov_v_reg(ot, cpu_T0, reg);
/* for xchg, lock is implicit */
tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0,
s->mem_index, ot | MO_LE);
gen_op_mov_reg_v(ot, reg, cpu_T1);
}
break;
case 0xc4: /* les Gv */
/* In CODE64 this is VEX3; see above. */
op = R_ES;
goto do_lxx;
case 0xc5: /* lds Gv */
/* In CODE64 this is VEX2; see above. */
op = R_DS;
goto do_lxx;
case 0x1b2: /* lss Gv */
op = R_SS;
goto do_lxx;
case 0x1b4: /* lfs Gv */
op = R_FS;
goto do_lxx;
case 0x1b5: /* lgs Gv */
op = R_GS;
do_lxx:
ot = dflag != MO_16 ? MO_32 : MO_16;
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
/* load the segment first to handle exceptions properly */
gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
gen_movl_seg_T0(s, op);
/* then put the data */
gen_op_mov_reg_v(ot, reg, cpu_T1);
if (s->base.is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
/************************/
/* shifts */
case 0xc0:
case 0xc1:
/* shift Ev,Ib */
shift = 2;
grp2:
{
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
if (mod != 3) {
if (shift == 2) {
s->rip_offset = 1;
}
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else {
opreg = (modrm & 7) | REX_B(s);
}
/* simpler op */
if (shift == 0) {
gen_shift(s, op, ot, opreg, OR_ECX);
} else {
if (shift == 2) {
shift = x86_ldub_code(env, s);
}
gen_shifti(s, op, ot, opreg, shift);
}
}
break;
case 0xd0:
case 0xd1:
/* shift Ev,1 */
shift = 1;
goto grp2;
case 0xd2:
case 0xd3:
/* shift Ev,cl */
shift = 0;
goto grp2;
case 0x1a4: /* shld imm */
op = 0;
shift = 1;
goto do_shiftd;
case 0x1a5: /* shld cl */
op = 0;
shift = 0;
goto do_shiftd;
case 0x1ac: /* shrd imm */
op = 1;
shift = 1;
goto do_shiftd;
case 0x1ad: /* shrd cl */
op = 1;
shift = 0;
do_shiftd:
ot = dflag;
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
} else {
opreg = rm;
}
gen_op_mov_v_reg(ot, cpu_T1, reg);
if (shift) {
TCGv imm = tcg_const_tl(x86_ldub_code(env, s));
gen_shiftd_rm_T1(s, ot, opreg, op, imm);
tcg_temp_free(imm);
} else {
gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
}
break;
/************************/
/* floats */
case 0xd8 ... 0xdf:
if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
/* if CR0.EM or CR0.TS are set, generate an FPU exception */
/* XXX: what to do if illegal op ? */
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = modrm & 7;
op = ((b & 7) << 3) | ((modrm >> 3) & 7);
if (mod != 3) {
/* memory op */
gen_lea_modrm(env, s, modrm);
switch(op) {
case 0x00 ... 0x07: /* fxxxs */
case 0x10 ... 0x17: /* fixxxl */
case 0x20 ... 0x27: /* fxxxl */
case 0x30 ... 0x37: /* fixxx */
{
int op1;
op1 = op & 7;
switch(op >> 4) {
case 0:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
break;
case 1:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
break;
case 2:
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
break;
case 3:
default:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LESW);
gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
break;
}
gen_helper_fp_arith_ST0_FT0(op1);
if (op1 == 3) {
/* fcomp needs pop */
gen_helper_fpop(cpu_env);
}
}
break;
case 0x08: /* flds */
case 0x0a: /* fsts */
case 0x0b: /* fstps */
case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
switch(op & 7) {
case 0:
switch(op >> 4) {
case 0:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
break;
case 1:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
break;
case 2:
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
break;
case 3:
default:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LESW);
gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
break;
}
break;
case 1:
/* XXX: the corresponding CPUID bit must be tested ! */
switch(op >> 4) {
case 1:
gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
break;
case 2:
gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
break;
case 3:
default:
gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
}
gen_helper_fpop(cpu_env);
break;
default:
switch(op >> 4) {
case 0:
gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
break;
case 1:
gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
break;
case 2:
gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
break;
case 3:
default:
gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
}
if ((op & 7) == 3)
gen_helper_fpop(cpu_env);
break;
}
break;
case 0x0c: /* fldenv mem */
gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x0d: /* fldcw mem */
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
break;
case 0x0e: /* fnstenv mem */
gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x0f: /* fnstcw mem */
gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
case 0x1d: /* fldt mem */
gen_helper_fldt_ST0(cpu_env, cpu_A0);
break;
case 0x1f: /* fstpt mem */
gen_helper_fstt_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* frstor mem */
gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x2e: /* fnsave mem */
gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x2f: /* fnstsw mem */
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUW);
break;
case 0x3c: /* fbld */
gen_helper_fbld_ST0(cpu_env, cpu_A0);
break;
case 0x3e: /* fbstp */
gen_helper_fbst_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
case 0x3d: /* fildll */
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
break;
case 0x3f: /* fistpll */
gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
gen_helper_fpop(cpu_env);
break;
default:
goto unknown_op;
}
} else {
/* register float ops */
opreg = rm;
switch(op) {
case 0x08: /* fld sti */
gen_helper_fpush(cpu_env);
gen_helper_fmov_ST0_STN(cpu_env,
tcg_const_i32((opreg + 1) & 7));
break;
case 0x09: /* fxchg sti */
case 0x29: /* fxchg4 sti, undocumented op */
case 0x39: /* fxchg7 sti, undocumented op */
gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
break;
case 0x0a: /* grp d9/2 */
switch(rm) {
case 0: /* fnop */
/* check exceptions (FreeBSD FPU probe) */
gen_helper_fwait(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x0c: /* grp d9/4 */
switch(rm) {
case 0: /* fchs */
gen_helper_fchs_ST0(cpu_env);
break;
case 1: /* fabs */
gen_helper_fabs_ST0(cpu_env);
break;
case 4: /* ftst */
gen_helper_fldz_FT0(cpu_env);
gen_helper_fcom_ST0_FT0(cpu_env);
break;
case 5: /* fxam */
gen_helper_fxam_ST0(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x0d: /* grp d9/5 */
{
switch(rm) {
case 0:
gen_helper_fpush(cpu_env);
gen_helper_fld1_ST0(cpu_env);
break;
case 1:
gen_helper_fpush(cpu_env);
gen_helper_fldl2t_ST0(cpu_env);
break;
case 2:
gen_helper_fpush(cpu_env);
gen_helper_fldl2e_ST0(cpu_env);
break;
case 3:
gen_helper_fpush(cpu_env);
gen_helper_fldpi_ST0(cpu_env);
break;
case 4:
gen_helper_fpush(cpu_env);
gen_helper_fldlg2_ST0(cpu_env);
break;
case 5:
gen_helper_fpush(cpu_env);
gen_helper_fldln2_ST0(cpu_env);
break;
case 6:
gen_helper_fpush(cpu_env);
gen_helper_fldz_ST0(cpu_env);
break;
default:
goto unknown_op;
}
}
break;
case 0x0e: /* grp d9/6 */
switch(rm) {
case 0: /* f2xm1 */
gen_helper_f2xm1(cpu_env);
break;
case 1: /* fyl2x */
gen_helper_fyl2x(cpu_env);
break;
case 2: /* fptan */
gen_helper_fptan(cpu_env);
break;
case 3: /* fpatan */
gen_helper_fpatan(cpu_env);
break;
case 4: /* fxtract */
gen_helper_fxtract(cpu_env);
break;
case 5: /* fprem1 */
gen_helper_fprem1(cpu_env);
break;
case 6: /* fdecstp */
gen_helper_fdecstp(cpu_env);
break;
default:
case 7: /* fincstp */
gen_helper_fincstp(cpu_env);
break;
}
break;
case 0x0f: /* grp d9/7 */
switch(rm) {
case 0: /* fprem */
gen_helper_fprem(cpu_env);
break;
case 1: /* fyl2xp1 */
gen_helper_fyl2xp1(cpu_env);
break;
case 2: /* fsqrt */
gen_helper_fsqrt(cpu_env);
break;
case 3: /* fsincos */
gen_helper_fsincos(cpu_env);
break;
case 5: /* fscale */
gen_helper_fscale(cpu_env);
break;
case 4: /* frndint */
gen_helper_frndint(cpu_env);
break;
case 6: /* fsin */
gen_helper_fsin(cpu_env);
break;
default:
case 7: /* fcos */
gen_helper_fcos(cpu_env);
break;
}
break;
case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
{
int op1;
op1 = op & 7;
if (op >= 0x20) {
gen_helper_fp_arith_STN_ST0(op1, opreg);
if (op >= 0x30)
gen_helper_fpop(cpu_env);
} else {
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fp_arith_ST0_FT0(op1);
}
}
break;
case 0x02: /* fcom */
case 0x22: /* fcom2, undocumented op */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
break;
case 0x03: /* fcomp */
case 0x23: /* fcomp3, undocumented op */
case 0x32: /* fcomp5, undocumented op */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x15: /* da/5 */
switch(rm) {
case 1: /* fucompp */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x1c:
switch(rm) {
case 0: /* feni (287 only, just do nop here) */
break;
case 1: /* fdisi (287 only, just do nop here) */
break;
case 2: /* fclex */
gen_helper_fclex(cpu_env);
break;
case 3: /* fninit */
gen_helper_fninit(cpu_env);
break;
case 4: /* fsetpm (287 only, just do nop here) */
break;
default:
goto unknown_op;
}
break;
case 0x1d: /* fucomi */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x1e: /* fcomi */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x28: /* ffree sti */
gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
break;
case 0x2a: /* fst sti */
gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
break;
case 0x2b: /* fstp sti */
case 0x0b: /* fstp1 sti, undocumented op */
case 0x3a: /* fstp8 sti, undocumented op */
case 0x3b: /* fstp9 sti, undocumented op */
gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* fucom st(i) */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
break;
case 0x2d: /* fucomp st(i) */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x33: /* de/3 */
switch(rm) {
case 1: /* fcompp */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
break;
default:
goto unknown_op;
}
break;
case 0x38: /* ffreep sti, undocumented op */
gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x3c: /* df/4 */
switch(rm) {
case 0:
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
break;
default:
goto unknown_op;
}
break;
case 0x3d: /* fucomip */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x3e: /* fcomip */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x10 ... 0x13: /* fcmovxx */
case 0x18 ... 0x1b:
{
int op1;
TCGLabel *l1;
static const uint8_t fcmov_cc[8] = {
(JCC_B << 1),
(JCC_Z << 1),
(JCC_BE << 1),
(JCC_P << 1),
};
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
l1 = gen_new_label();
gen_jcc1_noeob(s, op1, l1);
gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
gen_set_label(l1);
}
break;
default:
goto unknown_op;
}
}
break;
/************************/
/* string ops */
case 0xa4: /* movsS */
case 0xa5:
ot = mo_b_d(b, dflag);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_movs(s, ot);
}
break;
case 0xaa: /* stosS */
case 0xab:
ot = mo_b_d(b, dflag);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_stos(s, ot);
}
break;
case 0xac: /* lodsS */
case 0xad:
ot = mo_b_d(b, dflag);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_lods(s, ot);
}
break;
case 0xae: /* scasS */
case 0xaf:
ot = mo_b_d(b, dflag);
if (prefixes & PREFIX_REPNZ) {
gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
} else if (prefixes & PREFIX_REPZ) {
gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
} else {
gen_scas(s, ot);
}
break;
case 0xa6: /* cmpsS */
case 0xa7:
ot = mo_b_d(b, dflag);
if (prefixes & PREFIX_REPNZ) {
gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
} else if (prefixes & PREFIX_REPZ) {
gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
} else {
gen_cmps(s, ot);
}
break;
case 0x6c: /* insS */
case 0x6d:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_ins(s, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
break;
case 0x6e: /* outsS */
case 0x6f:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_outs(s, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
break;
/************************/
/* port I/O */
case 0xe4:
case 0xe5:
ot = mo_b_d32(b, dflag);
val = x86_ldub_code(env, s);
tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0xe6:
case 0xe7:
ot = mo_b_d32(b, dflag);
val = x86_ldub_code(env, s);
tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0xec:
case 0xed:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0xee:
case 0xef:
ot = mo_b_d32(b, dflag);
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
/************************/
/* control */
case 0xc2: /* ret im */
val = x86_ldsw_code(env, s);
ot = gen_pop_T0(s);
gen_stack_update(s, val + (1 << ot));
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_jr(s, cpu_T0);
break;
case 0xc3: /* ret */
ot = gen_pop_T0(s);
gen_pop_update(s, ot);
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
gen_jr(s, cpu_T0);
break;
case 0xca: /* lret im */
val = x86_ldsw_code(env, s);
do_lret:
if (s->pe && !s->vm86) {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
tcg_const_i32(val));
} else {
gen_stack_A0(s);
/* pop offset */
gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
/* NOTE: keeping EIP updated is not a problem in case of
exception */
gen_op_jmp_v(cpu_T0);
/* pop selector */
gen_add_A0_im(s, 1 << dflag);
gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
gen_op_movl_seg_T0_vm(R_CS);
/* add stack offset */
gen_stack_update(s, val + (2 << dflag));
}
gen_eob(s);
break;
case 0xcb: /* lret */
val = 0;
goto do_lret;
case 0xcf: /* iret */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
if (!s->pe) {
/* real mode */
gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
set_cc_op(s, CC_OP_EFLAGS);
} else if (s->vm86) {
if (s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
set_cc_op(s, CC_OP_EFLAGS);
}
} else {
gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
set_cc_op(s, CC_OP_EFLAGS);
}
gen_eob(s);
break;
case 0xe8: /* call im */
{
if (dflag != MO_16) {
tval = (int32_t)insn_get(env, s, MO_32);
} else {
tval = (int16_t)insn_get(env, s, MO_16);
}
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (dflag == MO_16) {
tval &= 0xffff;
} else if (!CODE64(s)) {
tval &= 0xffffffff;
}
tcg_gen_movi_tl(cpu_T0, next_eip);
gen_push_v(s, cpu_T0);
gen_bnd_jmp(s);
gen_jmp(s, tval);
}
break;
case 0x9a: /* lcall im */
{
unsigned int selector, offset;
if (CODE64(s))
goto illegal_op;
ot = dflag;
offset = insn_get(env, s, ot);
selector = insn_get(env, s, MO_16);
tcg_gen_movi_tl(cpu_T0, selector);
tcg_gen_movi_tl(cpu_T1, offset);
}
goto do_lcall;
case 0xe9: /* jmp im */
if (dflag != MO_16) {
tval = (int32_t)insn_get(env, s, MO_32);
} else {
tval = (int16_t)insn_get(env, s, MO_16);
}
tval += s->pc - s->cs_base;
if (dflag == MO_16) {
tval &= 0xffff;
} else if (!CODE64(s)) {
tval &= 0xffffffff;
}
gen_bnd_jmp(s);
gen_jmp(s, tval);
break;
case 0xea: /* ljmp im */
{
unsigned int selector, offset;
if (CODE64(s))
goto illegal_op;
ot = dflag;
offset = insn_get(env, s, ot);
selector = insn_get(env, s, MO_16);
tcg_gen_movi_tl(cpu_T0, selector);
tcg_gen_movi_tl(cpu_T1, offset);
}
goto do_ljmp;
case 0xeb: /* jmp Jb */
tval = (int8_t)insn_get(env, s, MO_8);
tval += s->pc - s->cs_base;
if (dflag == MO_16) {
tval &= 0xffff;
}
gen_jmp(s, tval);
break;
case 0x70 ... 0x7f: /* jcc Jb */
tval = (int8_t)insn_get(env, s, MO_8);
goto do_jcc;
case 0x180 ... 0x18f: /* jcc Jv */
if (dflag != MO_16) {
tval = (int32_t)insn_get(env, s, MO_32);
} else {
tval = (int16_t)insn_get(env, s, MO_16);
}
do_jcc:
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (dflag == MO_16) {
tval &= 0xffff;
}
gen_bnd_jmp(s);
gen_jcc(s, b, tval, next_eip);
break;
case 0x190 ... 0x19f: /* setcc Gv */
modrm = x86_ldub_code(env, s);
gen_setcc1(s, b, cpu_T0);
gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
break;
case 0x140 ... 0x14f: /* cmov Gv, Ev */
if (!(s->cpuid_features & CPUID_CMOV)) {
goto illegal_op;
}
ot = dflag;
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_cmovcc1(env, s, ot, b, modrm, reg);
break;
/************************/
/* flags */
case 0x9c: /* pushf */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_helper_read_eflags(cpu_T0, cpu_env);
gen_push_v(s, cpu_T0);
}
break;
case 0x9d: /* popf */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
ot = gen_pop_T0(s);
if (s->cpl == 0) {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK |
IOPL_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK | IOPL_MASK)
& 0xffff));
}
} else {
if (s->cpl <= s->iopl) {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
NT_MASK |
IF_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
NT_MASK |
IF_MASK)
& 0xffff));
}
} else {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)
& 0xffff));
}
}
}
gen_pop_update(s, ot);
set_cc_op(s, CC_OP_EFLAGS);
/* abort translation because TF/AC flag may change */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
case 0x9e: /* sahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
break;
case 0x9f: /* lahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
gen_compute_eflags(s);
/* Note: gen_compute_eflags() only gives the condition codes */
tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
break;
case 0xf5: /* cmc */
gen_compute_eflags(s);
tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
break;
case 0xf8: /* clc */
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
break;
case 0xf9: /* stc */
gen_compute_eflags(s);
tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
break;
case 0xfc: /* cld */
tcg_gen_movi_i32(cpu_tmp2_i32, 1);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
case 0xfd: /* std */
tcg_gen_movi_i32(cpu_tmp2_i32, -1);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
/************************/
/* bit operations */
case 0x1ba: /* bt/bts/btr/btc Gv, im */
ot = dflag;
modrm = x86_ldub_code(env, s);
op = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
s->rip_offset = 1;
gen_lea_modrm(env, s, modrm);
if (!(s->prefix & PREFIX_LOCK)) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
}
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
/* load shift */
val = x86_ldub_code(env, s);
tcg_gen_movi_tl(cpu_T1, val);
if (op < 4)
goto unknown_op;
op -= 4;
goto bt_op;
case 0x1a3: /* bt Gv, Ev */
op = 0;
goto do_btx;
case 0x1ab: /* bts */
op = 1;
goto do_btx;
case 0x1b3: /* btr */
op = 2;
goto do_btx;
case 0x1bb: /* btc */
op = 3;
do_btx:
ot = dflag;
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(MO_32, cpu_T1, reg);
if (mod != 3) {
AddressParts a = gen_lea_modrm_0(env, s, modrm);
/* specific case: we need to add a displacement */
gen_exts(ot, cpu_T1);
tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0);
gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
if (!(s->prefix & PREFIX_LOCK)) {
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
}
} else {
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
bt_op:
tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
tcg_gen_movi_tl(cpu_tmp0, 1);
tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
if (s->prefix & PREFIX_LOCK) {
switch (op) {
case 0: /* bt */
/* Needs no atomic ops; we surpressed the normal
memory load for LOCK above so do it now. */
gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
break;
case 1: /* bts */
tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0,
s->mem_index, ot | MO_LE);
break;
case 2: /* btr */
tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0,
s->mem_index, ot | MO_LE);
break;
default:
case 3: /* btc */
tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0,
s->mem_index, ot | MO_LE);
break;
}
tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
} else {
tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
switch (op) {
case 0: /* bt */
/* Data already loaded; nothing to do. */
break;
case 1: /* bts */
tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
case 2: /* btr */
tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
default:
case 3: /* btc */
tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
}
if (op != 0) {
if (mod != 3) {
gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
}
/* Delay all CC updates until after the store above. Note that
C is the result of the test, Z is unchanged, and the others
are all undefined. */
switch (s->cc_op) {
case CC_OP_MULB ... CC_OP_MULQ:
case CC_OP_ADDB ... CC_OP_ADDQ:
case CC_OP_ADCB ... CC_OP_ADCQ:
case CC_OP_SUBB ... CC_OP_SUBQ:
case CC_OP_SBBB ... CC_OP_SBBQ:
case CC_OP_LOGICB ... CC_OP_LOGICQ:
case CC_OP_INCB ... CC_OP_INCQ:
case CC_OP_DECB ... CC_OP_DECQ:
case CC_OP_SHLB ... CC_OP_SHLQ:
case CC_OP_SARB ... CC_OP_SARQ:
case CC_OP_BMILGB ... CC_OP_BMILGQ:
/* Z was going to be computed from the non-zero status of CC_DST.
We can get that same Z value (and the new C value) by leaving
CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
same width. */
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
break;
default:
/* Otherwise, generate EFLAGS and replace the C bit. */
gen_compute_eflags(s);
tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
ctz32(CC_C), 1);
break;
}
break;
case 0x1bc: /* bsf / tzcnt */
case 0x1bd: /* bsr / lzcnt */
ot = dflag;
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_extu(ot, cpu_T0);
/* Note that lzcnt and tzcnt are in different extensions. */
if ((prefixes & PREFIX_REPZ)
&& (b & 1
? s->cpuid_ext3_features & CPUID_EXT3_ABM
: s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
int size = 8 << ot;
/* For lzcnt/tzcnt, C bit is defined related to the input. */
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
if (b & 1) {
/* For lzcnt, reduce the target_ulong result by the
number of zeros that we expect to find at the top. */
tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS);
tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
} else {
/* For tzcnt, a zero input must return the operand size. */
tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size);
}
/* For lzcnt/tzcnt, Z bit is defined related to the result. */
gen_op_update1_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
} else {
/* For bsr/bsf, only the Z bit is defined and it is related
to the input and not the result. */
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_LOGICB + ot);
/* ??? The manual says that the output is undefined when the
input is zero, but real hardware leaves it unchanged, and
real programs appear to depend on that. Accomplish this
by passing the output as the value to return upon zero. */
if (b & 1) {
/* For bsr, return the bit index of the first 1 bit,
not the count of leading zeros. */
tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1);
tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
} else {
tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]);
}
}
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
/************************/
/* bcd */
case 0x27: /* daa */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_daa(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x2f: /* das */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_das(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x37: /* aaa */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_aaa(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x3f: /* aas */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_helper_aas(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0xd4: /* aam */
if (CODE64(s))
goto illegal_op;
val = x86_ldub_code(env, s);
if (val == 0) {
gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
} else {
gen_helper_aam(cpu_env, tcg_const_i32(val));
set_cc_op(s, CC_OP_LOGICB);
}
break;
case 0xd5: /* aad */
if (CODE64(s))
goto illegal_op;
val = x86_ldub_code(env, s);
gen_helper_aad(cpu_env, tcg_const_i32(val));
set_cc_op(s, CC_OP_LOGICB);
break;
/************************/
/* misc */
case 0x90: /* nop */
/* XXX: correct lock test for all insn */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
/* If REX_B is set, then this is xchg eax, r8d, not a nop. */
if (REX_B(s)) {
goto do_xchg_reg_eax;
}
if (prefixes & PREFIX_REPZ) {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
s->base.is_jmp = DISAS_NORETURN;
}
break;
case 0x9b: /* fwait */
if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
(HF_MP_MASK | HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
} else {
gen_helper_fwait(cpu_env);
}
break;
case 0xcc: /* int3 */
gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
break;
case 0xcd: /* int N */
val = x86_ldub_code(env, s);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
}
break;
case 0xce: /* into */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
break;
#ifdef WANT_ICEBP
case 0xf1: /* icebp (undocumented, exits to external debugger) */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
#if 1
gen_debug(s, pc_start - s->cs_base);
#else
/* start debug */
tb_flush(CPU(x86_env_get_cpu(env)));
qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
#endif
break;
#endif
case 0xfa: /* cli */
if (!s->vm86) {
if (s->cpl <= s->iopl) {
gen_helper_cli(cpu_env);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
} else {
if (s->iopl == 3) {
gen_helper_cli(cpu_env);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
}
break;
case 0xfb: /* sti */
if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
gen_helper_sti(cpu_env);
/* interruptions are enabled only the first insn after sti */
gen_jmp_im(s->pc - s->cs_base);
gen_eob_inhibit_irq(s, true);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
break;
case 0x62: /* bound */
if (CODE64(s))
goto illegal_op;
ot = dflag;
modrm = x86_ldub_code(env, s);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_op_mov_v_reg(ot, cpu_T0, reg);
gen_lea_modrm(env, s, modrm);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
if (ot == MO_16) {
gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
} else {
gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
}
break;
case 0x1c8 ... 0x1cf: /* bswap reg */
reg = (b & 7) | REX_B(s);
#ifdef TARGET_X86_64
if (dflag == MO_64) {
gen_op_mov_v_reg(MO_64, cpu_T0, reg);
tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_64, reg, cpu_T0);
} else
#endif
{
gen_op_mov_v_reg(MO_32, cpu_T0, reg);
tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_32, reg, cpu_T0);
}
break;
case 0xd6: /* salc */
if (CODE64(s))
goto illegal_op;
gen_compute_eflags_c(s, cpu_T0);
tcg_gen_neg_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
break;
case 0xe0: /* loopnz */
case 0xe1: /* loopz */
case 0xe2: /* loop */
case 0xe3: /* jecxz */
{
TCGLabel *l1, *l2, *l3;
tval = (int8_t)insn_get(env, s, MO_8);
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (dflag == MO_16) {
tval &= 0xffff;
}
l1 = gen_new_label();
l2 = gen_new_label();
l3 = gen_new_label();
b &= 3;
switch(b) {
case 0: /* loopnz */
case 1: /* loopz */
gen_op_add_reg_im(s->aflag, R_ECX, -1);
gen_op_jz_ecx(s->aflag, l3);
gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
break;
case 2: /* loop */
gen_op_add_reg_im(s->aflag, R_ECX, -1);
gen_op_jnz_ecx(s->aflag, l1);
break;
default:
case 3: /* jcxz */
gen_op_jz_ecx(s->aflag, l1);
break;
}
gen_set_label(l3);
gen_jmp_im(next_eip);
tcg_gen_br(l2);
gen_set_label(l1);
gen_jmp_im(tval);
gen_set_label(l2);
gen_eob(s);
}
break;
case 0x130: /* wrmsr */
case 0x132: /* rdmsr */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
gen_helper_rdmsr(cpu_env);
} else {
gen_helper_wrmsr(cpu_env);
}
}
break;
case 0x131: /* rdtsc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtsc(cpu_env);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
case 0x133: /* rdpmc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_rdpmc(cpu_env);
break;
case 0x134: /* sysenter */
/* For Intel SYSENTER is valid on 64-bit */
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_sysenter(cpu_env);
gen_eob(s);
}
break;
case 0x135: /* sysexit */
/* For Intel SYSEXIT is valid on 64-bit */
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
gen_eob(s);
}
break;
#ifdef TARGET_X86_64
case 0x105: /* syscall */
/* XXX: is it usable in real mode ? */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
/* TF handling for the syscall insn is different. The TF bit is checked
after the syscall insn completes. This allows #DB to not be
generated after one has entered CPL0 if TF is set in FMASK. */
gen_eob_worker(s, false, true);
break;
case 0x107: /* sysret */
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
/* condition codes are modified only in long mode */
if (s->lma) {
set_cc_op(s, CC_OP_EFLAGS);
}
/* TF handling for the sysret insn is different. The TF bit is
checked after the sysret insn completes. This allows #DB to be
generated "as if" the syscall insn in userspace has just
completed. */
gen_eob_worker(s, false, true);
}
break;
#endif
case 0x1a2: /* cpuid */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_cpuid(cpu_env);
break;
case 0xf4: /* hlt */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
s->base.is_jmp = DISAS_NORETURN;
}
break;
case 0x100:
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
case 0: /* sldt */
if (!s->pe || s->vm86)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State, ldt.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 2: /* lldt */
if (!s->pe || s->vm86)
goto illegal_op;
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lldt(cpu_env, cpu_tmp2_i32);
}
break;
case 1: /* str */
if (!s->pe || s->vm86)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State, tr.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 3: /* ltr */
if (!s->pe || s->vm86)
goto illegal_op;
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_ltr(cpu_env, cpu_tmp2_i32);
}
break;
case 4: /* verr */
case 5: /* verw */
if (!s->pe || s->vm86)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_update_cc_op(s);
if (op == 4) {
gen_helper_verr(cpu_env, cpu_T0);
} else {
gen_helper_verw(cpu_env, cpu_T0);
}
set_cc_op(s, CC_OP_EFLAGS);
break;
default:
goto unknown_op;
}
break;
case 0x101:
modrm = x86_ldub_code(env, s);
switch (modrm) {
CASE_MODRM_MEM_OP(0): /* sgdt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0,
cpu_env, offsetof(CPUX86State, gdt.limit));
gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
gen_add_A0_im(s, 2);
tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
break;
case 0xc8: /* monitor */
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
gen_extu(s->aflag, cpu_A0);
gen_add_A0_ds_seg(s);
gen_helper_monitor(cpu_env, cpu_A0);
break;
case 0xc9: /* mwait */
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
gen_eob(s);
break;
case 0xca: /* clac */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
|| s->cpl != 0) {
goto illegal_op;
}
gen_helper_clac(cpu_env);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0xcb: /* stac */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
|| s->cpl != 0) {
goto illegal_op;
}
gen_helper_stac(cpu_env);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
CASE_MODRM_MEM_OP(1): /* sidt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
gen_add_A0_im(s, 2);
tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
break;
case 0xd0: /* xgetbv */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (s->prefix & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
break;
case 0xd1: /* xsetbv */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (s->prefix & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
/* End TB because translation flags may change. */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0xd8: /* VMRUN */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
tcg_const_i32(s->pc - pc_start));
tcg_gen_exit_tb(0);
s->base.is_jmp = DISAS_NORETURN;
break;
case 0xd9: /* VMMCALL */
if (!(s->flags & HF_SVME_MASK)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmmcall(cpu_env);
break;
case 0xda: /* VMLOAD */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
break;
case 0xdb: /* VMSAVE */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
break;
case 0xdc: /* STGI */
if ((!(s->flags & HF_SVME_MASK)
&& !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
|| !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_stgi(cpu_env);
break;
case 0xdd: /* CLGI */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_clgi(cpu_env);
break;
case 0xde: /* SKINIT */
if ((!(s->flags & HF_SVME_MASK)
&& !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
|| !s->pe) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_skinit(cpu_env);
break;
case 0xdf: /* INVLPGA */
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
goto illegal_op;
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
break;
CASE_MODRM_MEM_OP(2): /* lgdt */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
gen_add_A0_im(s, 2);
gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
break;
CASE_MODRM_MEM_OP(3): /* lidt */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
gen_add_A0_im(s, 2);
gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
if (dflag == MO_16) {
tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
break;
CASE_MODRM_OP(4): /* smsw */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
if (CODE64(s)) {
mod = (modrm >> 6) & 3;
ot = (mod != 3 ? MO_16 : s->dflag);
} else {
ot = MO_16;
}
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 0xee: /* rdpkru */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
break;
case 0xef: /* wrpkru */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
break;
CASE_MODRM_OP(6): /* lmsw */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_helper_lmsw(cpu_env, cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
CASE_MODRM_MEM_OP(7): /* invlpg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_lea_modrm(env, s, modrm);
gen_helper_invlpg(cpu_env, cpu_A0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0xf8: /* swapgs */
#ifdef TARGET_X86_64
if (CODE64(s)) {
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
offsetof(CPUX86State, kernelgsbase));
tcg_gen_st_tl(cpu_T0, cpu_env,
offsetof(CPUX86State, kernelgsbase));
}
break;
}
#endif
goto illegal_op;
case 0xf9: /* rdtscp */
if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtscp(cpu_env);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
break;
default:
goto unknown_op;
}
break;
case 0x108: /* invd */
case 0x109: /* wbinvd */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
/* nothing to do */
}
break;
case 0x63: /* arpl or movslS (x86_64) */
#ifdef TARGET_X86_64
if (CODE64(s)) {
int d_ot;
/* d_ot is the size of destination */
d_ot = dflag;
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
gen_op_mov_v_reg(MO_32, cpu_T0, rm);
/* sign extend */
if (d_ot == MO_64) {
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
}
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
}
} else
#endif
{
TCGLabel *label1;
TCGv t0, t1, t2, a0;
if (!s->pe || s->vm86)
goto illegal_op;
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
t2 = tcg_temp_local_new();
ot = MO_16;
modrm = x86_ldub_code(env, s);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = modrm & 7;
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, t0, cpu_A0);
a0 = tcg_temp_local_new();
tcg_gen_mov_tl(a0, cpu_A0);
} else {
gen_op_mov_v_reg(ot, t0, rm);
TCGV_UNUSED(a0);
}
gen_op_mov_v_reg(ot, t1, reg);
tcg_gen_andi_tl(cpu_tmp0, t0, 3);
tcg_gen_andi_tl(t1, t1, 3);
tcg_gen_movi_tl(t2, 0);
label1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
tcg_gen_andi_tl(t0, t0, ~3);
tcg_gen_or_tl(t0, t0, t1);
tcg_gen_movi_tl(t2, CC_Z);
gen_set_label(label1);
if (mod != 3) {
gen_op_st_v(s, ot, t0, a0);
tcg_temp_free(a0);
} else {
gen_op_mov_reg_v(ot, rm, t0);
}
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
}
break;
case 0x102: /* lar */
case 0x103: /* lsl */
{
TCGLabel *label1;
TCGv t0;
if (!s->pe || s->vm86)
goto illegal_op;
ot = dflag != MO_16 ? MO_32 : MO_16;
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
t0 = tcg_temp_local_new();
gen_update_cc_op(s);
if (b == 0x102) {
gen_helper_lar(t0, cpu_env, cpu_T0);
} else {
gen_helper_lsl(t0, cpu_env, cpu_T0);
}
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
gen_op_mov_reg_v(ot, reg, t0);
gen_set_label(label1);
set_cc_op(s, CC_OP_EFLAGS);
tcg_temp_free(t0);
}
break;
case 0x118:
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
case 0: /* prefetchnta */
case 1: /* prefetchnt0 */
case 2: /* prefetchnt0 */
case 3: /* prefetchnt0 */
if (mod == 3)
goto illegal_op;
gen_nop_modrm(env, s, modrm);
/* nothing more to do */
break;
default: /* nop (multi byte) */
gen_nop_modrm(env, s, modrm);
break;
}
break;
case 0x11a:
modrm = x86_ldub_code(env, s);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
if (prefixes & PREFIX_REPZ) {
/* bndcl */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
} else if (prefixes & PREFIX_REPNZ) {
/* bndcu */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
TCGv_i64 notu = tcg_temp_new_i64();
tcg_gen_not_i64(notu, cpu_bndu[reg]);
gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
tcg_temp_free_i64(notu);
} else if (prefixes & PREFIX_DATA) {
/* bndmov -- from reg/mem */
if (reg >= 4 || s->aflag == MO_16) {
goto illegal_op;
}
if (mod == 3) {
int reg2 = (modrm & 7) | REX_B(s);
if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if (s->flags & HF_MPX_IU_MASK) {
tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
}
} else {
gen_lea_modrm(env, s, modrm);
if (CODE64(s)) {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEQ);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEQ);
} else {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEUL);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEUL);
}
/* bnd registers are now in-use */
gen_set_hflag(s, HF_MPX_IU_MASK);
}
} else if (mod != 3) {
/* bndldx */
AddressParts a = gen_lea_modrm_0(env, s, modrm);
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16
|| a.base < -1) {
goto illegal_op;
}
if (a.base >= 0) {
tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
} else {
tcg_gen_movi_tl(cpu_A0, 0);
}
gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
if (a.index >= 0) {
tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
} else {
tcg_gen_movi_tl(cpu_T0, 0);
}
if (CODE64(s)) {
gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
} else {
gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
}
gen_set_hflag(s, HF_MPX_IU_MASK);
}
}
gen_nop_modrm(env, s, modrm);
break;
case 0x11b:
modrm = x86_ldub_code(env, s);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
if (mod != 3 && (prefixes & PREFIX_REPZ)) {
/* bndmk */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
AddressParts a = gen_lea_modrm_0(env, s, modrm);
if (a.base >= 0) {
tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
if (!CODE64(s)) {
tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
}
} else if (a.base == -1) {
/* no base register has lower bound of 0 */
tcg_gen_movi_i64(cpu_bndl[reg], 0);
} else {
/* rip-relative generates #ud */
goto illegal_op;
}
tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
if (!CODE64(s)) {
tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
}
tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
/* bnd registers are now in-use */
gen_set_hflag(s, HF_MPX_IU_MASK);
break;
} else if (prefixes & PREFIX_REPNZ) {
/* bndcn */
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
} else if (prefixes & PREFIX_DATA) {
/* bndmov -- to reg/mem */
if (reg >= 4 || s->aflag == MO_16) {
goto illegal_op;
}
if (mod == 3) {
int reg2 = (modrm & 7) | REX_B(s);
if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if (s->flags & HF_MPX_IU_MASK) {
tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
}
} else {
gen_lea_modrm(env, s, modrm);
if (CODE64(s)) {
tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEQ);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEQ);
} else {
tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
s->mem_index, MO_LEUL);
tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
s->mem_index, MO_LEUL);
}
}
} else if (mod != 3) {
/* bndstx */
AddressParts a = gen_lea_modrm_0(env, s, modrm);
if (reg >= 4
|| (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16
|| a.base < -1) {
goto illegal_op;
}
if (a.base >= 0) {
tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
} else {
tcg_gen_movi_tl(cpu_A0, 0);
}
gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
if (a.index >= 0) {
tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
} else {
tcg_gen_movi_tl(cpu_T0, 0);
}
if (CODE64(s)) {
gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
cpu_bndl[reg], cpu_bndu[reg]);
} else {
gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
cpu_bndl[reg], cpu_bndu[reg]);
}
}
}
gen_nop_modrm(env, s, modrm);
break;
case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
modrm = x86_ldub_code(env, s);
gen_nop_modrm(env, s, modrm);
break;
case 0x120: /* mov reg, crN */
case 0x122: /* mov crN, reg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
modrm = x86_ldub_code(env, s);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
* are assumed to be 1's, regardless of actual values.
*/
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (CODE64(s))
ot = MO_64;
else
ot = MO_32;
if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
(s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
reg = 8;
}
switch(reg) {
case 0:
case 2:
case 3:
case 4:
case 8:
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
cpu_T0);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_v(ot, rm, cpu_T0);
if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
}
break;
default:
goto unknown_op;
}
}
break;
case 0x121: /* mov reg, drN */
case 0x123: /* mov drN, reg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
modrm = x86_ldub_code(env, s);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
* are assumed to be 1's, regardless of actual values.
*/
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (CODE64(s))
ot = MO_64;
else
ot = MO_32;
if (reg >= 8) {
goto illegal_op;
}
if (b & 2) {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
gen_op_mov_v_reg(ot, cpu_T0, rm);
tcg_gen_movi_i32(cpu_tmp2_i32, reg);
gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
tcg_gen_movi_i32(cpu_tmp2_i32, reg);
gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
break;
case 0x106: /* clts */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
gen_helper_clts(cpu_env);
/* abort block because static cpu state changed */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
break;
/* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
case 0x1c3: /* MOVNTI reg, mem */
if (!(s->cpuid_features & CPUID_SSE2))
goto illegal_op;
ot = mo_64_32(dflag);
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0x1ae:
modrm = x86_ldub_code(env, s);
switch (modrm) {
CASE_MODRM_MEM_OP(0): /* fxsave */
if (!(s->cpuid_features & CPUID_FXSR)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
gen_helper_fxsave(cpu_env, cpu_A0);
break;
CASE_MODRM_MEM_OP(1): /* fxrstor */
if (!(s->cpuid_features & CPUID_FXSR)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
gen_helper_fxrstor(cpu_env, cpu_A0);
break;
CASE_MODRM_MEM_OP(2): /* ldmxcsr */
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
goto illegal_op;
}
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
break;
CASE_MODRM_MEM_OP(3): /* stmxcsr */
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
goto illegal_op;
}
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
break;
CASE_MODRM_MEM_OP(4): /* xsave */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (prefixes & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
break;
CASE_MODRM_MEM_OP(5): /* xrstor */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (prefixes & (PREFIX_LOCK | PREFIX_DATA
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
/* XRSTOR is how MPX is enabled, which changes how
we translate. Thus we need to end the TB. */
gen_update_cc_op(s);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
if (prefixes & PREFIX_DATA) {
/* clwb */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
goto illegal_op;
}
gen_nop_modrm(env, s, modrm);
} else {
/* xsaveopt */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|| (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
|| (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
}
break;
CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
if (prefixes & PREFIX_LOCK) {
goto illegal_op;
}
if (prefixes & PREFIX_DATA) {
/* clflushopt */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
goto illegal_op;
}
} else {
/* clflush */
if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
|| !(s->cpuid_features & CPUID_CLFLUSH)) {
goto illegal_op;
}
}
gen_nop_modrm(env, s, modrm);
break;
case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
if (CODE64(s)
&& (prefixes & PREFIX_REPZ)
&& !(prefixes & PREFIX_LOCK)
&& (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
TCGv base, treg, src, dst;
/* Preserve hflags bits by testing CR4 at runtime. */
tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
treg = cpu_regs[(modrm & 7) | REX_B(s)];
if (modrm & 0x10) {
/* wr*base */
dst = base, src = treg;
} else {
/* rd*base */
dst = treg, src = base;
}
if (s->dflag == MO_32) {
tcg_gen_ext32u_tl(dst, src);
} else {
tcg_gen_mov_tl(dst, src);
}
break;
}
goto unknown_op;
case 0xf8: /* sfence / pcommit */
if (prefixes & PREFIX_DATA) {
/* pcommit */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
break;
}
/* fallthru */
case 0xf9 ... 0xff: /* sfence */
if (!(s->cpuid_features & CPUID_SSE)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
break;
case 0xe8 ... 0xef: /* lfence */
if (!(s->cpuid_features & CPUID_SSE)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
break;
case 0xf0 ... 0xf7: /* mfence */
if (!(s->cpuid_features & CPUID_SSE2)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
break;
default:
goto unknown_op;
}
break;
case 0x10d: /* 3DNow! prefetch(w) */
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_nop_modrm(env, s, modrm);
break;
case 0x1aa: /* rsm */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
if (!(s->flags & HF_SMM_MASK))
goto illegal_op;
gen_update_cc_op(s);
gen_jmp_im(s->pc - s->cs_base);
gen_helper_rsm(cpu_env);
gen_eob(s);
break;
case 0x1b8: /* SSE4.2 popcnt */
if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
PREFIX_REPZ)
goto illegal_op;
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
goto illegal_op;
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
if (s->prefix & PREFIX_DATA) {
ot = MO_16;
} else {
ot = mo_64_32(dflag);
}
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_extu(ot, cpu_T0);
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
tcg_gen_ctpop_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
set_cc_op(s, CC_OP_POPCNT);
break;
case 0x10e ... 0x10f:
/* 3DNow! instructions, ignore prefixes */
s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
case 0x110 ... 0x117:
case 0x128 ... 0x12f:
case 0x138 ... 0x13a:
case 0x150 ... 0x179:
case 0x17c ... 0x17f:
case 0x1c2:
case 0x1c4 ... 0x1c6:
case 0x1d0 ... 0x1fe:
gen_sse(env, s, b, pc_start, rex_r);
break;
default:
goto unknown_op;
}
return s->pc;
illegal_op:
gen_illegal_opcode(s);
return s->pc;
unknown_op:
gen_unknown_opcode(env, s);
return s->pc;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_572
|
static int iscsi_open(BlockDriverState *bs, const char *filename, int flags)
{
IscsiLun *iscsilun = bs->opaque;
struct iscsi_context *iscsi = NULL;
struct iscsi_url *iscsi_url = NULL;
struct IscsiTask task;
char *initiator_name = NULL;
int ret;
if ((BDRV_SECTOR_SIZE % 512) != 0) {
error_report("iSCSI: Invalid BDRV_SECTOR_SIZE. "
"BDRV_SECTOR_SIZE(%lld) is not a multiple "
"of 512", BDRV_SECTOR_SIZE);
return -EINVAL;
}
iscsi_url = iscsi_parse_full_url(iscsi, filename);
if (iscsi_url == NULL) {
error_report("Failed to parse URL : %s %s", filename,
iscsi_get_error(iscsi));
ret = -EINVAL;
goto failed;
}
memset(iscsilun, 0, sizeof(IscsiLun));
initiator_name = parse_initiator_name(iscsi_url->target);
iscsi = iscsi_create_context(initiator_name);
if (iscsi == NULL) {
error_report("iSCSI: Failed to create iSCSI context.");
ret = -ENOMEM;
goto failed;
}
if (iscsi_set_targetname(iscsi, iscsi_url->target)) {
error_report("iSCSI: Failed to set target name.");
ret = -EINVAL;
goto failed;
}
if (iscsi_url->user != NULL) {
ret = iscsi_set_initiator_username_pwd(iscsi, iscsi_url->user,
iscsi_url->passwd);
if (ret != 0) {
error_report("Failed to set initiator username and password");
ret = -EINVAL;
goto failed;
}
}
/* check if we got CHAP username/password via the options */
if (parse_chap(iscsi, iscsi_url->target) != 0) {
error_report("iSCSI: Failed to set CHAP user/password");
ret = -EINVAL;
goto failed;
}
if (iscsi_set_session_type(iscsi, ISCSI_SESSION_NORMAL) != 0) {
error_report("iSCSI: Failed to set session type to normal.");
ret = -EINVAL;
goto failed;
}
iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C);
/* check if we got HEADER_DIGEST via the options */
parse_header_digest(iscsi, iscsi_url->target);
task.iscsilun = iscsilun;
task.status = 0;
task.complete = 0;
task.bs = bs;
iscsilun->iscsi = iscsi;
iscsilun->lun = iscsi_url->lun;
if (iscsi_full_connect_async(iscsi, iscsi_url->portal, iscsi_url->lun,
iscsi_connect_cb, &task)
!= 0) {
error_report("iSCSI: Failed to start async connect.");
ret = -EINVAL;
goto failed;
}
while (!task.complete) {
iscsi_set_events(iscsilun);
qemu_aio_wait();
}
if (task.status != 0) {
error_report("iSCSI: Failed to connect to LUN : %s",
iscsi_get_error(iscsi));
ret = -EINVAL;
goto failed;
}
if (iscsi_url != NULL) {
iscsi_destroy_url(iscsi_url);
}
/* Medium changer or tape. We dont have any emulation for this so this must
* be sg ioctl compatible. We force it to be sg, otherwise qemu will try
* to read from the device to guess the image format.
*/
if (iscsilun->type == TYPE_MEDIUM_CHANGER ||
iscsilun->type == TYPE_TAPE) {
bs->sg = 1;
}
return 0;
failed:
if (initiator_name != NULL) {
g_free(initiator_name);
}
if (iscsi_url != NULL) {
iscsi_destroy_url(iscsi_url);
}
if (iscsi != NULL) {
iscsi_destroy_context(iscsi);
}
memset(iscsilun, 0, sizeof(IscsiLun));
return ret;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_573
|
static void vmxnet3_rx_need_csum_calculate(struct VmxnetRxPkt *pkt,
const void *pkt_data,
size_t pkt_len)
{
struct virtio_net_hdr *vhdr;
bool isip4, isip6, istcp, isudp;
uint8_t *data;
int len;
if (!vmxnet_rx_pkt_has_virt_hdr(pkt)) {
return;
}
vhdr = vmxnet_rx_pkt_get_vhdr(pkt);
if (!VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
return;
}
vmxnet_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
if (!(isip4 || isip6) || !(istcp || isudp)) {
return;
}
vmxnet3_dump_virt_hdr(vhdr);
/* Validate packet len: csum_start + scum_offset + length of csum field */
if (pkt_len < (vhdr->csum_start + vhdr->csum_offset + 2)) {
VMW_PKPRN("packet len:%d < csum_start(%d) + csum_offset(%d) + 2, "
"cannot calculate checksum",
len, vhdr->csum_start, vhdr->csum_offset);
return;
}
data = (uint8_t *)pkt_data + vhdr->csum_start;
len = pkt_len - vhdr->csum_start;
/* Put the checksum obtained into the packet */
stw_be_p(data + vhdr->csum_offset, net_raw_checksum(data, len));
vhdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
vhdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_574
|
CommandInfoList *qmp_query_commands(Error **errp)
{
CommandInfoList *list = NULL;
qmp_for_each_command(&qmp_commands, query_commands_cb, &list);
return list;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_585
|
static int decode_blocks(ALSDecContext *ctx, unsigned int ra_frame,
unsigned int c, const unsigned int *div_blocks,
unsigned int *js_blocks)
{
ALSSpecificConfig *sconf = &ctx->sconf;
unsigned int offset = 0;
unsigned int b;
ALSBlockData bd[2] = { { 0 } };
bd[0].ra_block = ra_frame;
bd[0].const_block = ctx->const_block;
bd[0].shift_lsbs = ctx->shift_lsbs;
bd[0].opt_order = ctx->opt_order;
bd[0].store_prev_samples = ctx->store_prev_samples;
bd[0].use_ltp = ctx->use_ltp;
bd[0].ltp_lag = ctx->ltp_lag;
bd[0].ltp_gain = ctx->ltp_gain[0];
bd[0].quant_cof = ctx->quant_cof[0];
bd[0].lpc_cof = ctx->lpc_cof[0];
bd[0].prev_raw_samples = ctx->prev_raw_samples;
bd[0].js_blocks = *js_blocks;
bd[1].ra_block = ra_frame;
bd[1].const_block = ctx->const_block;
bd[1].shift_lsbs = ctx->shift_lsbs;
bd[1].opt_order = ctx->opt_order;
bd[1].store_prev_samples = ctx->store_prev_samples;
bd[1].use_ltp = ctx->use_ltp;
bd[1].ltp_lag = ctx->ltp_lag;
bd[1].ltp_gain = ctx->ltp_gain[0];
bd[1].quant_cof = ctx->quant_cof[0];
bd[1].lpc_cof = ctx->lpc_cof[0];
bd[1].prev_raw_samples = ctx->prev_raw_samples;
bd[1].js_blocks = *(js_blocks + 1);
// decode all blocks
for (b = 0; b < ctx->num_blocks; b++) {
unsigned int s;
bd[0].block_length = div_blocks[b];
bd[1].block_length = div_blocks[b];
bd[0].raw_samples = ctx->raw_samples[c ] + offset;
bd[1].raw_samples = ctx->raw_samples[c + 1] + offset;
bd[0].raw_other = bd[1].raw_samples;
bd[1].raw_other = bd[0].raw_samples;
if(read_decode_block(ctx, &bd[0]) || read_decode_block(ctx, &bd[1])) {
// damaged block, write zero for the rest of the frame
zero_remaining(b, ctx->num_blocks, div_blocks, bd[0].raw_samples);
zero_remaining(b, ctx->num_blocks, div_blocks, bd[1].raw_samples);
return -1;
}
// reconstruct joint-stereo blocks
if (bd[0].js_blocks) {
if (bd[1].js_blocks)
av_log(ctx->avctx, AV_LOG_WARNING, "Invalid channel pair!\n");
for (s = 0; s < div_blocks[b]; s++)
bd[0].raw_samples[s] = bd[1].raw_samples[s] - bd[0].raw_samples[s];
} else if (bd[1].js_blocks) {
for (s = 0; s < div_blocks[b]; s++)
bd[1].raw_samples[s] = bd[1].raw_samples[s] + bd[0].raw_samples[s];
}
offset += div_blocks[b];
bd[0].ra_block = 0;
bd[1].ra_block = 0;
}
// store carryover raw samples,
// the others channel raw samples are stored by the calling function.
memmove(ctx->raw_samples[c] - sconf->max_order,
ctx->raw_samples[c] - sconf->max_order + sconf->frame_length,
sizeof(*ctx->raw_samples[c]) * sconf->max_order);
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_599
|
static int write_f(int argc, char **argv)
{
struct timeval t1, t2;
int Cflag = 0, pflag = 0, qflag = 0, bflag = 0;
int c, cnt;
char *buf;
int64_t offset;
int count;
/* Some compilers get confused and warn if this is not initialized. */
int total = 0;
int pattern = 0xcd;
while ((c = getopt(argc, argv, "bCpP:q")) != EOF) {
switch (c) {
case 'b':
bflag = 1;
break;
case 'C':
Cflag = 1;
break;
case 'p':
pflag = 1;
break;
case 'P':
pattern = parse_pattern(optarg);
if (pattern < 0) {
return 0;
}
break;
case 'q':
qflag = 1;
break;
default:
return command_usage(&write_cmd);
}
}
if (optind != argc - 2) {
return command_usage(&write_cmd);
}
if (bflag && pflag) {
printf("-b and -p cannot be specified at the same time\n");
return 0;
}
offset = cvtnum(argv[optind]);
if (offset < 0) {
printf("non-numeric length argument -- %s\n", argv[optind]);
return 0;
}
optind++;
count = cvtnum(argv[optind]);
if (count < 0) {
printf("non-numeric length argument -- %s\n", argv[optind]);
return 0;
}
if (!pflag) {
if (offset & 0x1ff) {
printf("offset %" PRId64 " is not sector aligned\n",
offset);
return 0;
}
if (count & 0x1ff) {
printf("count %d is not sector aligned\n",
count);
return 0;
}
}
buf = qemu_io_alloc(count, pattern);
gettimeofday(&t1, NULL);
if (pflag) {
cnt = do_pwrite(buf, offset, count, &total);
} else if (bflag) {
cnt = do_save_vmstate(buf, offset, count, &total);
} else {
cnt = do_write(buf, offset, count, &total);
}
gettimeofday(&t2, NULL);
if (cnt < 0) {
printf("write failed: %s\n", strerror(-cnt));
goto out;
}
if (qflag) {
goto out;
}
/* Finally, report back -- -C gives a parsable format */
t2 = tsub(t2, t1);
print_report("wrote", &t2, offset, count, total, cnt, Cflag);
out:
qemu_io_free(buf);
return 0;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_657
|
static int libopenjpeg_copy_packed12(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
{
int compno;
int x, y;
int *image_line;
int frame_index;
const int numcomps = image->numcomps;
uint16_t *frame_ptr = (uint16_t *)frame->data[0];
for (compno = 0; compno < numcomps; ++compno) {
if (image->comps[compno].w > frame->linesize[0] / numcomps) {
av_log(avctx, AV_LOG_ERROR, "Error: frame's linesize is too small for the image\n");
return 0;
}
}
for (compno = 0; compno < numcomps; ++compno) {
for (y = 0; y < avctx->height; ++y) {
image_line = image->comps[compno].data + y * image->comps[compno].w;
frame_index = y * (frame->linesize[0] / 2) + compno;
for (x = 0; x < avctx->width; ++x) {
image_line[x] = frame_ptr[frame_index] >> 4;
frame_index += numcomps;
}
for (; x < image->comps[compno].w; ++x) {
image_line[x] = image_line[x - 1];
}
}
for (; y < image->comps[compno].h; ++y) {
image_line = image->comps[compno].data + y * image->comps[compno].w;
for (x = 0; x < image->comps[compno].w; ++x) {
image_line[x] = image_line[x - image->comps[compno].w];
}
}
}
return 1;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_666
|
static void build_feed_streams(void)
{
FFStream *stream, *feed;
int i;
/* gather all streams */
for(stream = first_stream; stream != NULL; stream = stream->next) {
feed = stream->feed;
if (feed) {
if (!stream->is_feed) {
/* we handle a stream coming from a feed */
for(i=0;i<stream->nb_streams;i++)
stream->feed_streams[i] = add_av_stream(feed, stream->streams[i]);
}
}
}
/* gather all streams */
for(stream = first_stream; stream != NULL; stream = stream->next) {
feed = stream->feed;
if (feed) {
if (stream->is_feed) {
for(i=0;i<stream->nb_streams;i++)
stream->feed_streams[i] = i;
}
}
}
/* create feed files if needed */
for(feed = first_feed; feed != NULL; feed = feed->next_feed) {
int fd;
if (url_exist(feed->feed_filename)) {
/* See if it matches */
AVFormatContext *s;
int matches = 0;
if (av_open_input_file(&s, feed->feed_filename, NULL, FFM_PACKET_SIZE, NULL) >= 0) {
/* Now see if it matches */
if (s->nb_streams == feed->nb_streams) {
matches = 1;
for(i=0;i<s->nb_streams;i++) {
AVStream *sf, *ss;
sf = feed->streams[i];
ss = s->streams[i];
if (sf->index != ss->index ||
sf->id != ss->id) {
http_log("Index & Id do not match for stream %d (%s)\n",
i, feed->feed_filename);
matches = 0;
} else {
AVCodecContext *ccf, *ccs;
ccf = sf->codec;
ccs = ss->codec;
#define CHECK_CODEC(x) (ccf->x != ccs->x)
if (CHECK_CODEC(codec_id) || CHECK_CODEC(codec_type)) {
http_log("Codecs do not match for stream %d\n", i);
matches = 0;
} else if (CHECK_CODEC(bit_rate) || CHECK_CODEC(flags)) {
http_log("Codec bitrates do not match for stream %d\n", i);
matches = 0;
} else if (ccf->codec_type == AVMEDIA_TYPE_VIDEO) {
if (CHECK_CODEC(time_base.den) ||
CHECK_CODEC(time_base.num) ||
CHECK_CODEC(width) ||
CHECK_CODEC(height)) {
http_log("Codec width, height and framerate do not match for stream %d\n", i);
matches = 0;
}
} else if (ccf->codec_type == AVMEDIA_TYPE_AUDIO) {
if (CHECK_CODEC(sample_rate) ||
CHECK_CODEC(channels) ||
CHECK_CODEC(frame_size)) {
http_log("Codec sample_rate, channels, frame_size do not match for stream %d\n", i);
matches = 0;
}
} else {
http_log("Unknown codec type\n");
matches = 0;
}
}
if (!matches)
break;
}
} else
http_log("Deleting feed file '%s' as stream counts differ (%d != %d)\n",
feed->feed_filename, s->nb_streams, feed->nb_streams);
av_close_input_file(s);
} else
http_log("Deleting feed file '%s' as it appears to be corrupt\n",
feed->feed_filename);
if (!matches) {
if (feed->readonly) {
http_log("Unable to delete feed file '%s' as it is marked readonly\n",
feed->feed_filename);
exit(1);
}
unlink(feed->feed_filename);
}
}
if (!url_exist(feed->feed_filename)) {
AVFormatContext s1 = {0}, *s = &s1;
if (feed->readonly) {
http_log("Unable to create feed file '%s' as it is marked readonly\n",
feed->feed_filename);
exit(1);
}
/* only write the header of the ffm file */
if (avio_open(&s->pb, feed->feed_filename, AVIO_FLAG_WRITE) < 0) {
http_log("Could not open output feed file '%s'\n",
feed->feed_filename);
exit(1);
}
s->oformat = feed->fmt;
s->nb_streams = feed->nb_streams;
for(i=0;i<s->nb_streams;i++) {
AVStream *st;
st = feed->streams[i];
s->streams[i] = st;
}
av_set_parameters(s, NULL);
if (av_write_header(s) < 0) {
http_log("Container doesn't supports the required parameters\n");
exit(1);
}
/* XXX: need better api */
av_freep(&s->priv_data);
avio_close(s->pb);
}
/* get feed size and write index */
fd = open(feed->feed_filename, O_RDONLY);
if (fd < 0) {
http_log("Could not open output feed file '%s'\n",
feed->feed_filename);
exit(1);
}
feed->feed_write_index = FFMAX(ffm_read_write_index(fd), FFM_PACKET_SIZE);
feed->feed_size = lseek(fd, 0, SEEK_END);
/* ensure that we do not wrap before the end of file */
if (feed->feed_max_size && feed->feed_max_size < feed->feed_size)
feed->feed_max_size = feed->feed_size;
close(fd);
}
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_669
|
static void do_interrupt64(CPUX86State *env, int intno, int is_int,
int error_code, target_ulong next_eip, int is_hw)
{
SegmentCache *dt;
target_ulong ptr;
int type, dpl, selector, cpl, ist;
int has_error_code, new_stack;
uint32_t e1, e2, e3, ss;
target_ulong old_eip, esp, offset;
has_error_code = 0;
if (!is_int && !is_hw) {
has_error_code = exception_has_error_code(intno);
}
if (is_int) {
old_eip = next_eip;
} else {
old_eip = env->eip;
}
dt = &env->idt;
if (intno * 16 + 15 > dt->limit) {
raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
}
ptr = dt->base + intno * 16;
e1 = cpu_ldl_kernel(env, ptr);
e2 = cpu_ldl_kernel(env, ptr + 4);
e3 = cpu_ldl_kernel(env, ptr + 8);
/* check gate type */
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
switch (type) {
case 14: /* 386 interrupt gate */
case 15: /* 386 trap gate */
break;
default:
raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
break;
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
cpl = env->hflags & HF_CPL_MASK;
/* check privilege if software int */
if (is_int && dpl < cpl) {
raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
}
/* check valid bit */
if (!(e2 & DESC_P_MASK)) {
raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
}
selector = e1 >> 16;
offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
ist = e2 & 7;
if ((selector & 0xfffc) == 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
}
if (load_segment(env, &e1, &e2, selector) != 0) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (dpl > cpl) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
if (!(e2 & DESC_P_MASK)) {
raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
}
if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
/* to inner privilege */
new_stack = 1;
esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
ss = 0;
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
/* to same privilege */
if (env->eflags & VM_MASK) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
new_stack = 0;
esp = env->regs[R_ESP];
dpl = cpl;
} else {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
new_stack = 0; /* avoid warning */
esp = 0; /* avoid warning */
}
esp &= ~0xfLL; /* align stack */
PUSHQ(esp, env->segs[R_SS].selector);
PUSHQ(esp, env->regs[R_ESP]);
PUSHQ(esp, cpu_compute_eflags(env));
PUSHQ(esp, env->segs[R_CS].selector);
PUSHQ(esp, old_eip);
if (has_error_code) {
PUSHQ(esp, error_code);
}
/* interrupt gate clear IF mask */
if ((type & 1) == 0) {
env->eflags &= ~IF_MASK;
}
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
if (new_stack) {
ss = 0 | dpl;
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
}
env->regs[R_ESP] = esp;
selector = (selector & ~3) | dpl;
cpu_x86_load_seg_cache(env, R_CS, selector,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
env->eip = offset;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_674
|
void palette8tobgr24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
long i;
/*
writes 1 byte o much and might cause alignment issues on some architectures?
for(i=0; i<num_pixels; i++)
((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
*/
for(i=0; i<num_pixels; i++)
{
//FIXME slow?
dst[0]= palette[ src[i]*4+0 ];
dst[1]= palette[ src[i]*4+1 ];
dst[2]= palette[ src[i]*4+2 ];
dst+= 3;
}
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_677
|
static inline void codeblock(DiracContext *s, SubBand *b,
GetBitContext *gb, DiracArith *c,
int left, int right, int top, int bottom,
int blockcnt_one, int is_arith)
{
int x, y, zero_block;
int qoffset, qfactor;
IDWTELEM *buf;
/* check for any coded coefficients in this codeblock */
if (!blockcnt_one) {
if (is_arith)
zero_block = dirac_get_arith_bit(c, CTX_ZERO_BLOCK);
else
zero_block = get_bits1(gb);
if (zero_block)
return;
}
if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
if (is_arith)
b->quant += dirac_get_arith_int(c, CTX_DELTA_Q_F, CTX_DELTA_Q_DATA);
else
b->quant += dirac_get_se_golomb(gb);
}
b->quant = FFMIN(b->quant, MAX_QUANT);
qfactor = qscale_tab[b->quant];
/* TODO: context pointer? */
if (!s->num_refs)
qoffset = qoffset_intra_tab[b->quant];
else
qoffset = qoffset_inter_tab[b->quant];
buf = b->ibuf + top * b->stride;
for (y = top; y < bottom; y++) {
for (x = left; x < right; x++) {
/* [DIRAC_STD] 13.4.4 Subband coefficients. coeff_unpack() */
if (is_arith)
coeff_unpack_arith(c, qfactor, qoffset, b, buf+x, x, y);
else
buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset);
}
buf += b->stride;
}
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_683
|
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
FFV1Context *f = avctx->priv_data;
CABACContext * const c= &f->c;
AVFrame *pict = data;
const int width= f->width;
const int height= f->height;
AVFrame * const p= &f->picture;
int used_count= 0;
if(avctx->strict_std_compliance >= 0){
av_log(avctx, AV_LOG_ERROR, "this codec is under development, files encoded with it wont be decodeable with future versions!!!\n"
"use vstrict=-1 to use it anyway\n");
return -1;
}
ff_init_cabac_encoder(c, buf, buf_size);
ff_init_cabac_states(c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64);
c->lps_state[2] = 1;
c->lps_state[3] = 0;
*p = *pict;
p->pict_type= FF_I_TYPE;
if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
put_cabac_bypass(c, 1);
p->key_frame= 1;
write_header(f);
clear_state(f);
}else{
put_cabac_bypass(c, 0);
p->key_frame= 0;
}
if(!f->ac){
used_count += put_cabac_terminate(c, 1);
//printf("pos=%d\n", used_count);
init_put_bits(&f->pb, buf + used_count, buf_size - used_count);
}
if(f->colorspace==0){
const int chroma_width = -((-width )>>f->chroma_h_shift);
const int chroma_height= -((-height)>>f->chroma_v_shift);
encode_plane(f, p->data[0], width, height, p->linesize[0], 0);
encode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1);
encode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 1);
}else{
encode_rgb_frame(f, (uint32_t*)(p->data[0]), width, height, p->linesize[0]/4);
}
emms_c();
f->picture_number++;
if(f->ac){
return put_cabac_terminate(c, 1);
}else{
flush_put_bits(&f->pb); //nicer padding FIXME
return used_count + (put_bits_count(&f->pb)+7)/8;
}
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_686
|
void do_POWER_maskg (void)
{
uint32_t ret;
if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
ret = -1;
} else {
ret = (((uint32_t)(-1)) >> ((uint32_t)T0)) ^
(((uint32_t)(-1) >> ((uint32_t)T1)) >> 1);
if ((uint32_t)T0 > (uint32_t)T1)
ret = ~ret;
}
T0 = ret;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_707
|
static void qed_aio_read_data(void *opaque, int ret,
uint64_t offset, size_t len)
{
QEDAIOCB *acb = opaque;
BDRVQEDState *s = acb_to_s(acb);
BlockDriverState *bs = acb->common.bs;
/* Adjust offset into cluster */
offset += qed_offset_into_cluster(s, acb->cur_pos);
trace_qed_aio_read_data(s, acb, ret, offset, len);
if (ret < 0) {
goto err;
}
qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
/* Handle zero cluster and backing file reads */
if (ret == QED_CLUSTER_ZERO) {
qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
qed_aio_next_io(acb, 0);
return;
} else if (ret != QED_CLUSTER_FOUND) {
qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
qed_aio_next_io, acb);
return;
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
&acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
qed_aio_next_io, acb);
return;
err:
qed_aio_complete(acb, ret);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_710
|
int qcow2_update_header(BlockDriverState *bs)
{
BDRVQcowState *s = bs->opaque;
QCowHeader *header;
char *buf;
size_t buflen = s->cluster_size;
int ret;
uint64_t total_size;
uint32_t refcount_table_clusters;
size_t header_length;
Qcow2UnknownHeaderExtension *uext;
buf = qemu_blockalign(bs, buflen);
/* Header structure */
header = (QCowHeader*) buf;
if (buflen < sizeof(*header)) {
ret = -ENOSPC;
goto fail;
}
header_length = sizeof(*header) + s->unknown_header_fields_size;
total_size = bs->total_sectors * BDRV_SECTOR_SIZE;
refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
*header = (QCowHeader) {
/* Version 2 fields */
.magic = cpu_to_be32(QCOW_MAGIC),
.version = cpu_to_be32(s->qcow_version),
.backing_file_offset = 0,
.backing_file_size = 0,
.cluster_bits = cpu_to_be32(s->cluster_bits),
.size = cpu_to_be64(total_size),
.crypt_method = cpu_to_be32(s->crypt_method_header),
.l1_size = cpu_to_be32(s->l1_size),
.l1_table_offset = cpu_to_be64(s->l1_table_offset),
.refcount_table_offset = cpu_to_be64(s->refcount_table_offset),
.refcount_table_clusters = cpu_to_be32(refcount_table_clusters),
.nb_snapshots = cpu_to_be32(s->nb_snapshots),
.snapshots_offset = cpu_to_be64(s->snapshots_offset),
/* Version 3 fields */
.incompatible_features = cpu_to_be64(s->incompatible_features),
.compatible_features = cpu_to_be64(s->compatible_features),
.autoclear_features = cpu_to_be64(s->autoclear_features),
.refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT),
.header_length = cpu_to_be32(header_length),
};
/* For older versions, write a shorter header */
switch (s->qcow_version) {
case 2:
ret = offsetof(QCowHeader, incompatible_features);
break;
case 3:
ret = sizeof(*header);
break;
default:
return -EINVAL;
}
buf += ret;
buflen -= ret;
memset(buf, 0, buflen);
/* Preserve any unknown field in the header */
if (s->unknown_header_fields_size) {
if (buflen < s->unknown_header_fields_size) {
ret = -ENOSPC;
goto fail;
}
memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size);
buf += s->unknown_header_fields_size;
buflen -= s->unknown_header_fields_size;
}
/* Backing file format header extension */
if (*bs->backing_format) {
ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT,
bs->backing_format, strlen(bs->backing_format),
buflen);
if (ret < 0) {
goto fail;
}
buf += ret;
buflen -= ret;
}
/* Feature table */
Qcow2Feature features[] = {
/* no feature defined yet */
};
ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE,
features, sizeof(features), buflen);
if (ret < 0) {
goto fail;
}
buf += ret;
buflen -= ret;
/* Keep unknown header extensions */
QLIST_FOREACH(uext, &s->unknown_header_ext, next) {
ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen);
if (ret < 0) {
goto fail;
}
buf += ret;
buflen -= ret;
}
/* End of header extensions */
ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen);
if (ret < 0) {
goto fail;
}
buf += ret;
buflen -= ret;
/* Backing file name */
if (*bs->backing_file) {
size_t backing_file_len = strlen(bs->backing_file);
if (buflen < backing_file_len) {
ret = -ENOSPC;
goto fail;
}
strncpy(buf, bs->backing_file, buflen);
header->backing_file_offset = cpu_to_be64(buf - ((char*) header));
header->backing_file_size = cpu_to_be32(backing_file_len);
}
/* Write the new header */
ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size);
if (ret < 0) {
goto fail;
}
ret = 0;
fail:
qemu_vfree(header);
return ret;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_729
|
void tcp_start_incoming_migration(const char *host_port, Error **errp)
{
int s;
s = inet_listen(host_port, NULL, 256, SOCK_STREAM, 0, errp);
if (s < 0) {
return;
}
qemu_set_fd_handler2(s, NULL, tcp_accept_incoming_migration, NULL,
(void *)(intptr_t)s);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_740
|
static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
sPAPRPHBState *sphb)
{
ResourceProps rp;
bool is_bridge = false;
int pci_status, err;
char *buf = NULL;
uint32_t drc_index = spapr_phb_get_pci_drc_index(sphb, dev);
uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3);
uint32_t max_msi, max_msix;
if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) ==
PCI_HEADER_TYPE_BRIDGE) {
is_bridge = true;
}
/* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
_FDT(fdt_setprop_cell(fdt, offset, "vendor-id",
pci_default_read_config(dev, PCI_VENDOR_ID, 2)));
_FDT(fdt_setprop_cell(fdt, offset, "device-id",
pci_default_read_config(dev, PCI_DEVICE_ID, 2)));
_FDT(fdt_setprop_cell(fdt, offset, "revision-id",
pci_default_read_config(dev, PCI_REVISION_ID, 1)));
_FDT(fdt_setprop_cell(fdt, offset, "class-code", ccode));
if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) {
_FDT(fdt_setprop_cell(fdt, offset, "interrupts",
pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)));
}
if (!is_bridge) {
_FDT(fdt_setprop_cell(fdt, offset, "min-grant",
pci_default_read_config(dev, PCI_MIN_GNT, 1)));
_FDT(fdt_setprop_cell(fdt, offset, "max-latency",
pci_default_read_config(dev, PCI_MAX_LAT, 1)));
}
if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) {
_FDT(fdt_setprop_cell(fdt, offset, "subsystem-id",
pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)));
}
if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) {
_FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)));
}
_FDT(fdt_setprop_cell(fdt, offset, "cache-line-size",
pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1)));
/* the following fdt cells are masked off the pci status register */
pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
_FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
PCI_STATUS_DEVSEL_MASK & pci_status));
if (pci_status & PCI_STATUS_FAST_BACK) {
_FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
}
if (pci_status & PCI_STATUS_66MHZ) {
_FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
}
if (pci_status & PCI_STATUS_UDF) {
_FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
}
_FDT(fdt_setprop_string(fdt, offset, "name",
pci_find_device_name((ccode >> 16) & 0xff,
(ccode >> 8) & 0xff,
ccode & 0xff)));
buf = spapr_phb_get_loc_code(sphb, dev);
if (!buf) {
error_report("Failed setting the ibm,loc-code");
return -1;
}
err = fdt_setprop_string(fdt, offset, "ibm,loc-code", buf);
g_free(buf);
if (err < 0) {
return err;
}
if (drc_index) {
_FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index));
}
_FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
RESOURCE_CELLS_ADDRESS));
_FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
RESOURCE_CELLS_SIZE));
max_msi = msi_nr_vectors_allocated(dev);
if (max_msi) {
_FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi", max_msi));
}
max_msix = dev->msix_entries_nr;
if (max_msix) {
_FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x", max_msix));
}
populate_resource_props(dev, &rp);
_FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
_FDT(fdt_setprop(fdt, offset, "assigned-addresses",
(uint8_t *)rp.assigned, rp.assigned_len));
if (sphb->pcie_ecs && pci_is_express(dev)) {
_FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1));
}
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_748
|
static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb)
{
MLPHeaderInfo mh;
int substr, ret;
if ((ret = ff_mlp_read_major_sync(m->avctx, &mh, gb)) != 0)
return ret;
if (mh.group1_bits == 0) {
av_log(m->avctx, AV_LOG_ERROR, "invalid/unknown bits per sample\n");
return AVERROR_INVALIDDATA;
if (mh.group2_bits > mh.group1_bits) {
av_log(m->avctx, AV_LOG_ERROR,
"Channel group 2 cannot have more bits per sample than group 1.\n");
return AVERROR_INVALIDDATA;
if (mh.group2_samplerate && mh.group2_samplerate != mh.group1_samplerate) {
av_log(m->avctx, AV_LOG_ERROR,
"Channel groups with differing sample rates are not currently supported.\n");
return AVERROR_INVALIDDATA;
if (mh.group1_samplerate == 0) {
av_log(m->avctx, AV_LOG_ERROR, "invalid/unknown sampling rate\n");
return AVERROR_INVALIDDATA;
if (mh.group1_samplerate > MAX_SAMPLERATE) {
av_log(m->avctx, AV_LOG_ERROR,
"Sampling rate %d is greater than the supported maximum (%d).\n",
mh.group1_samplerate, MAX_SAMPLERATE);
return AVERROR_INVALIDDATA;
if (mh.access_unit_size > MAX_BLOCKSIZE) {
av_log(m->avctx, AV_LOG_ERROR,
"Block size %d is greater than the supported maximum (%d).\n",
mh.access_unit_size, MAX_BLOCKSIZE);
return AVERROR_INVALIDDATA;
if (mh.access_unit_size_pow2 > MAX_BLOCKSIZE_POW2) {
av_log(m->avctx, AV_LOG_ERROR,
"Block size pow2 %d is greater than the supported maximum (%d).\n",
mh.access_unit_size_pow2, MAX_BLOCKSIZE_POW2);
return AVERROR_INVALIDDATA;
if (mh.num_substreams == 0)
return AVERROR_INVALIDDATA;
if (m->avctx->codec_id == AV_CODEC_ID_MLP && mh.num_substreams > 2) {
av_log(m->avctx, AV_LOG_ERROR, "MLP only supports up to 2 substreams.\n");
return AVERROR_INVALIDDATA;
if (mh.num_substreams > MAX_SUBSTREAMS) {
"%d substreams (more than the "
"maximum supported by the decoder)",
mh.num_substreams);
m->access_unit_size = mh.access_unit_size;
m->access_unit_size_pow2 = mh.access_unit_size_pow2;
m->num_substreams = mh.num_substreams;
m->max_decoded_substream = m->num_substreams - 1;
m->avctx->sample_rate = mh.group1_samplerate;
m->avctx->frame_size = mh.access_unit_size;
m->avctx->bits_per_raw_sample = mh.group1_bits;
if (mh.group1_bits > 16)
m->avctx->sample_fmt = AV_SAMPLE_FMT_S32;
else
m->avctx->sample_fmt = AV_SAMPLE_FMT_S16;
m->params_valid = 1;
for (substr = 0; substr < MAX_SUBSTREAMS; substr++)
m->substream[substr].restart_seen = 0;
/* Set the layout for each substream. When there's more than one, the first
* substream is Stereo. Subsequent substreams' layouts are indicated in the
* major sync. */
if (m->avctx->codec_id == AV_CODEC_ID_MLP) {
if ((substr = (mh.num_substreams > 1)))
m->substream[0].ch_layout = AV_CH_LAYOUT_STEREO;
m->substream[substr].ch_layout = mh.channel_layout_mlp;
} else {
if ((substr = (mh.num_substreams > 1)))
m->substream[0].ch_layout = AV_CH_LAYOUT_STEREO;
if (mh.num_substreams > 2)
if (mh.channel_layout_thd_stream2)
m->substream[2].ch_layout = mh.channel_layout_thd_stream2;
else
m->substream[2].ch_layout = mh.channel_layout_thd_stream1;
m->substream[substr].ch_layout = mh.channel_layout_thd_stream1;
if (m->avctx->channels<=2 && m->substream[substr].ch_layout == AV_CH_LAYOUT_MONO && m->max_decoded_substream == 1) {
av_log(m->avctx, AV_LOG_DEBUG, "Mono stream with 2 substreams, ignoring 2nd\n");
m->max_decoded_substream = 0;
if (m->avctx->channels==2)
m->avctx->channel_layout = AV_CH_LAYOUT_STEREO;
m->needs_reordering = mh.channel_arrangement >= 18 && mh.channel_arrangement <= 20;
return 0;
The vulnerability label is: Vulnerable
|
devign_test_set_data_767
|
static AVStream * init_stream(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return NULL;
st->codec->codec_tag = 0;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
if (!bin->width) {
st->codec->width = (80<<3);
st->codec->height = (25<<4);
}
avpriv_set_pts_info(st, 60, bin->framerate.den, bin->framerate.num);
/* simulate tty display speed */
bin->chars_per_frame = FFMAX(av_q2d(st->time_base) * bin->chars_per_frame, 1);
return st;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_781
|
static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov)
{
SheepdogAIOCB *acb;
int ret;
if (bs->growable && sector_num + nb_sectors > bs->total_sectors) {
ret = sd_truncate(bs, (sector_num + nb_sectors) * BDRV_SECTOR_SIZE);
if (ret < 0) {
return ret;
}
bs->total_sectors = sector_num + nb_sectors;
}
acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors);
acb->aio_done_func = sd_write_done;
acb->aiocb_type = AIOCB_WRITE_UDATA;
ret = sd_co_rw_vector(acb);
if (ret <= 0) {
qemu_aio_release(acb);
return ret;
}
qemu_coroutine_yield();
return acb->ret;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_794
|
static void sdl_send_mouse_event(int dx, int dy, int x, int y, int state)
{
static uint32_t bmap[INPUT_BUTTON__MAX] = {
[INPUT_BUTTON_LEFT] = SDL_BUTTON(SDL_BUTTON_LEFT),
[INPUT_BUTTON_MIDDLE] = SDL_BUTTON(SDL_BUTTON_MIDDLE),
[INPUT_BUTTON_RIGHT] = SDL_BUTTON(SDL_BUTTON_RIGHT),
[INPUT_BUTTON_WHEEL_UP] = SDL_BUTTON(SDL_BUTTON_WHEELUP),
[INPUT_BUTTON_WHEEL_DOWN] = SDL_BUTTON(SDL_BUTTON_WHEELDOWN),
};
static uint32_t prev_state;
if (prev_state != state) {
qemu_input_update_buttons(dcl->con, bmap, prev_state, state);
prev_state = state;
}
if (qemu_input_is_absolute()) {
qemu_input_queue_abs(dcl->con, INPUT_AXIS_X, x,
real_screen->w);
qemu_input_queue_abs(dcl->con, INPUT_AXIS_Y, y,
real_screen->h);
} else {
if (guest_cursor) {
x -= guest_x;
y -= guest_y;
guest_x += x;
guest_y += y;
dx = x;
dy = y;
}
qemu_input_queue_rel(dcl->con, INPUT_AXIS_X, dx);
qemu_input_queue_rel(dcl->con, INPUT_AXIS_Y, dy);
}
qemu_input_event_sync();
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_797
|
static void device_initfn(Object *obj)
{
DeviceState *dev = DEVICE(obj);
Property *prop;
if (qdev_hotplug) {
dev->hotplugged = 1;
qdev_hot_added = true;
}
dev->instance_id_alias = -1;
dev->state = DEV_STATE_CREATED;
qdev_prop_set_defaults(dev, qdev_get_props(dev));
for (prop = qdev_get_props(dev); prop && prop->name; prop++) {
qdev_property_add_legacy(dev, prop, NULL);
qdev_property_add_static(dev, prop, NULL);
}
object_property_add_str(OBJECT(dev), "type", qdev_get_type, NULL, NULL);
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_837
|
static void scsi_write_data(SCSIRequest *req)
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
/* No data transfer may already be in progress */
assert(r->req.aiocb == NULL);
/* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req);
if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
DPRINTF("Data transfer direction invalid\n");
scsi_write_complete(r, -EINVAL);
return;
}
if (!r->req.sg && !r->qiov.size) {
/* Called for the first time. Ask the driver to send us more data. */
r->started = true;
scsi_write_complete(r, 0);
return;
}
if (s->tray_open) {
scsi_write_complete(r, -ENOMEDIUM);
return;
}
if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
r->req.cmd.buf[0] == VERIFY_16) {
if (r->req.sg) {
scsi_dma_complete(r, 0);
} else {
scsi_write_complete(r, 0);
}
return;
}
if (r->req.sg) {
dma_acct_start(s->qdev.conf.bs, &r->acct, r->req.sg, BDRV_ACCT_WRITE);
r->req.resid -= r->req.sg->size;
r->req.aiocb = dma_bdrv_write(s->qdev.conf.bs, r->req.sg, r->sector,
scsi_dma_complete, r);
} else {
n = r->qiov.size / 512;
bdrv_acct_start(s->qdev.conf.bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_WRITE);
r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, r->sector, &r->qiov, n,
scsi_write_complete, r);
}
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_848
|
static void idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
{
int sa, sb;
sa = ((int64_t)*phigh < 0);
if (sa)
neg128(plow, phigh);
sb = (b < 0);
if (sb)
b = -b;
div64(plow, phigh, b);
if (sa ^ sb)
*plow = - *plow;
if (sa)
*phigh = - *phigh;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_855
|
void bios_linker_loader_alloc(GArray *linker,
const char *file,
uint32_t alloc_align,
bool alloc_fseg)
{
BiosLinkerLoaderEntry entry;
assert(!(alloc_align & (alloc_align - 1)));
memset(&entry, 0, sizeof entry);
strncpy(entry.alloc.file, file, sizeof entry.alloc.file - 1);
entry.command = cpu_to_le32(BIOS_LINKER_LOADER_COMMAND_ALLOCATE);
entry.alloc.align = cpu_to_le32(alloc_align);
entry.alloc.zone = cpu_to_le32(alloc_fseg ?
BIOS_LINKER_LOADER_ALLOC_ZONE_FSEG :
BIOS_LINKER_LOADER_ALLOC_ZONE_HIGH);
/* Alloc entries must come first, so prepend them */
g_array_prepend_vals(linker, &entry, sizeof entry);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_860
|
static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
{
/* We don't actually refresh here, but just return data queried in
* iscsi_open(): iscsi targets don't change their limits. */
IscsiLun *iscsilun = bs->opaque;
uint64_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff;
bs->bl.request_alignment = iscsilun->block_size;
if (iscsilun->bl.max_xfer_len) {
max_xfer_len = MIN(max_xfer_len, iscsilun->bl.max_xfer_len);
}
if (max_xfer_len * iscsilun->block_size < INT_MAX) {
bs->bl.max_transfer = max_xfer_len * iscsilun->block_size;
}
if (iscsilun->lbp.lbpu) {
if (iscsilun->bl.max_unmap < 0xffffffff / iscsilun->block_size) {
bs->bl.max_pdiscard =
iscsilun->bl.max_unmap * iscsilun->block_size;
}
bs->bl.pdiscard_alignment =
iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
} else {
bs->bl.pdiscard_alignment = iscsilun->block_size;
}
if (iscsilun->bl.max_ws_len < 0xffffffff / iscsilun->block_size) {
bs->bl.max_pwrite_zeroes =
iscsilun->bl.max_ws_len * iscsilun->block_size;
}
if (iscsilun->lbp.lbpws) {
bs->bl.pwrite_zeroes_alignment =
iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
} else {
bs->bl.pwrite_zeroes_alignment = iscsilun->block_size;
}
if (iscsilun->bl.opt_xfer_len &&
iscsilun->bl.opt_xfer_len < INT_MAX / iscsilun->block_size) {
bs->bl.opt_transfer = pow2floor(iscsilun->bl.opt_xfer_len *
iscsilun->block_size);
}
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_883
|
static void tap_receive(void *opaque, const uint8_t *buf, size_t size)
{
TAPState *s = opaque;
int ret;
for(;;) {
ret = write(s->fd, buf, size);
if (ret < 0 && (errno == EINTR || errno == EAGAIN)) {
} else {
break;
}
}
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_886
|
static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
{
DriveInfo *dinfo;
Error *local_err = NULL;
if (!dev->conf.bs) {
scsi_realize(dev, &local_err);
assert(local_err);
error_propagate(errp, local_err);
return;
}
dinfo = drive_get_by_blockdev(dev->conf.bs);
if (dinfo->media_cd) {
scsi_cd_realize(dev, errp);
} else {
scsi_hd_realize(dev, errp);
}
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_899
|
static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
{
BDRVSheepdogState *s = bs->opaque;
int ret, fd;
uint32_t new_vid;
SheepdogInode *inode;
unsigned int datalen;
dprintf("sn_info: name %s id_str %s s: name %s vm_state_size %" PRId64 " "
"is_snapshot %d\n", sn_info->name, sn_info->id_str,
s->name, sn_info->vm_state_size, s->is_snapshot);
if (s->is_snapshot) {
error_report("You can't create a snapshot of a snapshot VDI, "
"%s (%" PRIu32 ").", s->name, s->inode.vdi_id);
return -EINVAL;
}
dprintf("%s %s\n", sn_info->name, sn_info->id_str);
s->inode.vm_state_size = sn_info->vm_state_size;
s->inode.vm_clock_nsec = sn_info->vm_clock_nsec;
/* It appears that inode.tag does not require a NUL terminator,
* which means this use of strncpy is ok.
*/
strncpy(s->inode.tag, sn_info->name, sizeof(s->inode.tag));
/* we don't need to update entire object */
datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id);
/* refresh inode. */
fd = connect_to_sdog(s->addr, s->port);
if (fd < 0) {
ret = fd;
goto cleanup;
}
ret = write_object(fd, (char *)&s->inode, vid_to_vdi_oid(s->inode.vdi_id),
s->inode.nr_copies, datalen, 0, false, s->cache_enabled);
if (ret < 0) {
error_report("failed to write snapshot's inode.");
goto cleanup;
}
ret = do_sd_create(s->name, s->inode.vdi_size, s->inode.vdi_id, &new_vid, 1,
s->addr, s->port);
if (ret < 0) {
error_report("failed to create inode for snapshot. %s",
strerror(errno));
goto cleanup;
}
inode = (SheepdogInode *)g_malloc(datalen);
ret = read_object(fd, (char *)inode, vid_to_vdi_oid(new_vid),
s->inode.nr_copies, datalen, 0, s->cache_enabled);
if (ret < 0) {
error_report("failed to read new inode info. %s", strerror(errno));
goto cleanup;
}
memcpy(&s->inode, inode, datalen);
dprintf("s->inode: name %s snap_id %x oid %x\n",
s->inode.name, s->inode.snap_id, s->inode.vdi_id);
cleanup:
closesocket(fd);
return ret;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_906
|
static int vnc_display_listen(VncDisplay *vd,
SocketAddress **saddr,
size_t nsaddr,
SocketAddress **wsaddr,
size_t nwsaddr,
Error **errp)
{
size_t i;
for (i = 0; i < nsaddr; i++) {
if (vnc_display_listen_addr(vd, saddr[i],
"vnc-listen",
&vd->lsock,
&vd->lsock_tag,
&vd->nlsock,
errp) < 0) {
return -1;
}
}
for (i = 0; i < nwsaddr; i++) {
if (vnc_display_listen_addr(vd, wsaddr[i],
"vnc-ws-listen",
&vd->lwebsock,
&vd->lwebsock_tag,
&vd->nlwebsock,
errp) < 0) {
return -1;
}
}
return 0;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_913
|
static char *enumerate_cpus(unsigned long *cpus, int max_cpus)
{
int cpu;
bool first = true;
GString *s = g_string_new(NULL);
for (cpu = find_first_bit(cpus, max_cpus);
cpu < max_cpus;
cpu = find_next_bit(cpus, max_cpus, cpu + 1)) {
g_string_append_printf(s, "%s%d", first ? "" : " ", cpu);
first = false;
}
return g_string_free(s, FALSE);
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_921
|
MigrationState *exec_start_outgoing_migration(const char *command,
int64_t bandwidth_limit,
int async)
{
FdMigrationState *s;
FILE *f;
s = qemu_mallocz(sizeof(*s));
if (s == NULL) {
dprintf("Unable to allocate FdMigrationState\n");
goto err;
}
f = popen(command, "w");
if (f == NULL) {
dprintf("Unable to popen exec target\n");
goto err_after_alloc;
}
s->fd = fileno(f);
if (s->fd == -1) {
dprintf("Unable to retrieve file descriptor for popen'd handle\n");
goto err_after_open;
}
if (fcntl(s->fd, F_SETFD, O_NONBLOCK) == -1) {
dprintf("Unable to set nonblocking mode on file descriptor\n");
goto err_after_open;
}
s->opaque = qemu_popen(f, "w");
s->get_error = file_errno;
s->write = file_write;
s->mig_state.cancel = migrate_fd_cancel;
s->mig_state.get_status = migrate_fd_get_status;
s->mig_state.release = migrate_fd_release;
s->state = MIG_STATE_ACTIVE;
s->detach = !async;
s->bandwidth_limit = bandwidth_limit;
if (s->detach == 1) {
dprintf("detaching from monitor\n");
monitor_suspend();
s->detach = 2;
}
migrate_fd_connect(s);
return &s->mig_state;
err_after_open:
pclose(f);
err_after_alloc:
qemu_free(s);
err:
return NULL;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_924
|
static void register_core_rtas(void)
{
spapr_rtas_register("display-character", rtas_display_character);
spapr_rtas_register("get-time-of-day", rtas_get_time_of_day);
spapr_rtas_register("power-off", rtas_power_off);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_937
|
static target_ulong h_put_tce(CPUPPCState *env, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong liobn = args[0];
target_ulong ioba = args[1];
target_ulong tce = args[2];
VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, liobn);
VIOsPAPR_RTCE *rtce;
if (!dev) {
hcall_dprintf("LIOBN 0x" TARGET_FMT_lx " does not exist\n", liobn);
return H_PARAMETER;
}
ioba &= ~(SPAPR_VIO_TCE_PAGE_SIZE - 1);
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_vio_put_tce on %s ioba 0x" TARGET_FMT_lx
" TCE 0x" TARGET_FMT_lx "\n", dev->qdev.id, ioba, tce);
#endif
if (ioba >= dev->rtce_window_size) {
hcall_dprintf("Out-of-bounds IOBA 0x" TARGET_FMT_lx "\n", ioba);
return H_PARAMETER;
}
rtce = dev->rtce_table + (ioba >> SPAPR_VIO_TCE_PAGE_SHIFT);
rtce->tce = tce;
return H_SUCCESS;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_941
|
static void nvme_get_bootindex(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
NvmeCtrl *s = NVME(obj);
visit_type_int32(v, &s->conf.bootindex, name, errp);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_944
|
ff_rm_parse_packet (AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *ast, int len, AVPacket *pkt,
int *seq, int flags, int64_t timestamp)
{
RMDemuxContext *rm = s->priv_data;
int ret;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
rm->current_stream= st->id;
ret = rm_assemble_video_frame(s, pb, rm, ast, pkt, len, seq, ×tamp);
if(ret)
return ret < 0 ? ret : -1; //got partial frame or error
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if ((ast->deint_id == DEINT_ID_GENR) ||
(ast->deint_id == DEINT_ID_INT4) ||
(ast->deint_id == DEINT_ID_SIPR)) {
int x;
int sps = ast->sub_packet_size;
int cfs = ast->coded_framesize;
int h = ast->sub_packet_h;
int y = ast->sub_packet_cnt;
int w = ast->audio_framesize;
if (flags & 2)
y = ast->sub_packet_cnt = 0;
if (!y)
ast->audiotimestamp = timestamp;
switch (ast->deint_id) {
case DEINT_ID_INT4:
for (x = 0; x < h/2; x++)
avio_read(pb, ast->pkt.data+x*2*w+y*cfs, cfs);
break;
case DEINT_ID_GENR:
for (x = 0; x < w/sps; x++)
avio_read(pb, ast->pkt.data+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps);
break;
case DEINT_ID_SIPR:
avio_read(pb, ast->pkt.data + y * w, w);
break;
}
if (++(ast->sub_packet_cnt) < h)
return -1;
if (ast->deint_id == DEINT_ID_SIPR)
ff_rm_reorder_sipr_data(ast->pkt.data, h, w);
ast->sub_packet_cnt = 0;
rm->audio_stream_num = st->index;
rm->audio_pkt_cnt = h * w / st->codec->block_align;
} else if ((ast->deint_id == DEINT_ID_VBRF) ||
(ast->deint_id == DEINT_ID_VBRS)) {
int x;
rm->audio_stream_num = st->index;
ast->sub_packet_cnt = (avio_rb16(pb) & 0xf0) >> 4;
if (ast->sub_packet_cnt) {
for (x = 0; x < ast->sub_packet_cnt; x++)
ast->sub_packet_lengths[x] = avio_rb16(pb);
rm->audio_pkt_cnt = ast->sub_packet_cnt;
ast->audiotimestamp = timestamp;
} else
return -1;
} else {
av_get_packet(pb, pkt, len);
rm_ac3_swap_bytes(st, pkt);
}
} else
av_get_packet(pb, pkt, len);
pkt->stream_index = st->index;
#if 0
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if(st->codec->codec_id == AV_CODEC_ID_RV20){
int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1);
av_log(s, AV_LOG_DEBUG, "%d %"PRId64" %d\n", *timestamp, *timestamp*512LL/25, seq);
seq |= (timestamp&~0x3FFF);
if(seq - timestamp > 0x2000) seq -= 0x4000;
if(seq - timestamp < -0x2000) seq += 0x4000;
}
}
#endif
pkt->pts = timestamp;
if (flags & 2)
pkt->flags |= AV_PKT_FLAG_KEY;
return st->codec->codec_type == AVMEDIA_TYPE_AUDIO ? rm->audio_pkt_cnt : 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_946
|
Object *object_resolve_path_component(Object *parent, const gchar *part)
{
ObjectProperty *prop = object_property_find(parent, part, NULL);
if (prop == NULL) {
return NULL;
}
if (object_property_is_link(prop)) {
return *(Object **)prop->opaque;
} else if (object_property_is_child(prop)) {
return prop->opaque;
} else {
return NULL;
}
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_961
|
static void vscsi_process_login(VSCSIState *s, vscsi_req *req)
{
union viosrp_iu *iu = &req->iu;
struct srp_login_rsp *rsp = &iu->srp.login_rsp;
uint64_t tag = iu->srp.rsp.tag;
trace_spapr_vscsi__process_login();
/* TODO handle case that requested size is wrong and
* buffer format is wrong
*/
memset(iu, 0, sizeof(struct srp_login_rsp));
rsp->opcode = SRP_LOGIN_RSP;
/* Don't advertise quite as many request as we support to
* keep room for management stuff etc...
*/
rsp->req_lim_delta = cpu_to_be32(VSCSI_REQ_LIMIT-2);
rsp->tag = tag;
rsp->max_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
rsp->max_ti_iu_len = cpu_to_be32(sizeof(union srp_iu));
/* direct and indirect */
rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
vscsi_send_iu(s, req, sizeof(*rsp), VIOSRP_SRP_FORMAT);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_963
|
void cpu_tick_set_count(CPUTimer *timer, uint64_t count)
{
uint64_t real_count = count & ~timer->disabled_mask;
uint64_t disabled_bit = count & timer->disabled_mask;
int64_t vm_clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
cpu_to_timer_ticks(real_count, timer->frequency);
TIMER_DPRINTF("%s set_count count=0x%016lx (%s) p=%p\n",
timer->name, real_count,
timer->disabled?"disabled":"enabled", timer);
timer->disabled = disabled_bit ? 1 : 0;
timer->clock_offset = vm_clock_offset;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_971
|
void commit_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, BlockDriverState *top, int64_t speed,
BlockdevOnError on_error, const char *backing_file_str,
const char *filter_node_name, Error **errp)
{
CommitBlockJob *s;
BlockReopenQueue *reopen_queue = NULL;
int orig_overlay_flags;
int orig_base_flags;
BlockDriverState *iter;
BlockDriverState *overlay_bs;
BlockDriverState *commit_top_bs = NULL;
Error *local_err = NULL;
int ret;
assert(top != bs);
if (top == base) {
error_setg(errp, "Invalid files for merge: top and base are the same");
return;
overlay_bs = bdrv_find_overlay(bs, top);
if (overlay_bs == NULL) {
error_setg(errp, "Could not find overlay image for %s:", top->filename);
return;
s = block_job_create(job_id, &commit_job_driver, bs, 0, BLK_PERM_ALL,
speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp);
if (!s) {
return;
orig_base_flags = bdrv_get_flags(base);
orig_overlay_flags = bdrv_get_flags(overlay_bs);
/* convert base & overlay_bs to r/w, if necessary */
if (!(orig_base_flags & BDRV_O_RDWR)) {
reopen_queue = bdrv_reopen_queue(reopen_queue, base, NULL,
orig_base_flags | BDRV_O_RDWR);
if (!(orig_overlay_flags & BDRV_O_RDWR)) {
reopen_queue = bdrv_reopen_queue(reopen_queue, overlay_bs, NULL,
orig_overlay_flags | BDRV_O_RDWR);
if (reopen_queue) {
bdrv_reopen_multiple(bdrv_get_aio_context(bs), reopen_queue, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
goto fail;
/* Insert commit_top block node above top, so we can block consistent read
* on the backing chain below it */
commit_top_bs = bdrv_new_open_driver(&bdrv_commit_top, filter_node_name, 0,
errp);
if (commit_top_bs == NULL) {
goto fail;
commit_top_bs->total_sectors = top->total_sectors;
bdrv_set_aio_context(commit_top_bs, bdrv_get_aio_context(top));
bdrv_set_backing_hd(commit_top_bs, top, &local_err);
if (local_err) {
bdrv_unref(commit_top_bs);
commit_top_bs = NULL;
error_propagate(errp, local_err);
goto fail;
bdrv_set_backing_hd(overlay_bs, commit_top_bs, &local_err);
if (local_err) {
bdrv_unref(commit_top_bs);
commit_top_bs = NULL;
error_propagate(errp, local_err);
goto fail;
s->commit_top_bs = commit_top_bs;
bdrv_unref(commit_top_bs);
/* Block all nodes between top and base, because they will
* disappear from the chain after this operation. */
assert(bdrv_chain_contains(top, base));
for (iter = top; iter != base; iter = backing_bs(iter)) {
/* XXX BLK_PERM_WRITE needs to be allowed so we don't block ourselves
* at s->base (if writes are blocked for a node, they are also blocked
* for its backing file). The other options would be a second filter
* driver above s->base. */
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
errp);
if (ret < 0) {
goto fail;
ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
if (ret < 0) {
goto fail;
/* overlay_bs must be blocked because it needs to be modified to
* update the backing image string. */
ret = block_job_add_bdrv(&s->common, "overlay of top", overlay_bs,
BLK_PERM_GRAPH_MOD, BLK_PERM_ALL, errp);
if (ret < 0) {
goto fail;
s->base = blk_new(BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE
| BLK_PERM_RESIZE,
BLK_PERM_CONSISTENT_READ
| BLK_PERM_GRAPH_MOD
| BLK_PERM_WRITE_UNCHANGED);
ret = blk_insert_bs(s->base, base, errp);
if (ret < 0) {
goto fail;
/* Required permissions are already taken with block_job_add_bdrv() */
s->top = blk_new(0, BLK_PERM_ALL);
ret = blk_insert_bs(s->top, top, errp);
if (ret < 0) {
goto fail;
s->active = bs;
s->base_flags = orig_base_flags;
s->orig_overlay_flags = orig_overlay_flags;
s->backing_file_str = g_strdup(backing_file_str);
s->on_error = on_error;
trace_commit_start(bs, base, top, s);
block_job_start(&s->common);
return;
fail:
if (s->base) {
blk_unref(s->base);
if (s->top) {
blk_unref(s->top);
if (commit_top_bs) {
bdrv_set_backing_hd(overlay_bs, top, &error_abort);
block_job_early_fail(&s->common);
The vulnerability label is: Vulnerable
|
devign_test_set_data_982
|
int bdrv_is_removable(BlockDriverState *bs)
{
return bs->removable;
}
The vulnerability label is: Non-vulnerable
|
devign_test_set_data_1035
|
static int null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_1038
|
void qmp_blockdev_add(BlockdevOptions *options, Error **errp)
{
QmpOutputVisitor *ov = qmp_output_visitor_new();
QObject *obj;
QDict *qdict;
Error *local_err = NULL;
/* Require an ID in the top level */
if (!options->has_id) {
error_setg(errp, "Block device needs an ID");
goto fail;
}
/* TODO Sort it out in raw-posix and drive_init: Reject aio=native with
* cache.direct=false instead of silently switching to aio=threads, except
* if called from drive_init.
*
* For now, simply forbidding the combination for all drivers will do. */
if (options->has_aio && options->aio == BLOCKDEV_AIO_OPTIONS_NATIVE) {
bool direct = options->cache->has_direct && options->cache->direct;
if (!options->has_cache && !direct) {
error_setg(errp, "aio=native requires cache.direct=true");
goto fail;
}
}
visit_type_BlockdevOptions(qmp_output_get_visitor(ov),
&options, NULL, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto fail;
}
obj = qmp_output_get_qobject(ov);
qdict = qobject_to_qdict(obj);
qdict_flatten(qdict);
blockdev_init(NULL, qdict, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto fail;
}
fail:
qmp_output_visitor_cleanup(ov);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_1040
|
void do_divwuo (void)
{
if (likely((uint32_t)T1 != 0)) {
xer_ov = 0;
T0 = (uint32_t)T0 / (uint32_t)T1;
} else {
xer_so = 1;
xer_ov = 1;
T0 = 0;
}
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_1046
|
static void kvmclock_pre_save(void *opaque)
{
KVMClockState *s = opaque;
struct kvm_clock_data data;
int ret;
if (s->clock_valid) {
return;
}
ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data);
if (ret < 0) {
fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret));
data.clock = 0;
}
s->clock = data.clock;
/*
* If the VM is stopped, declare the clock state valid to avoid re-reading
* it on next vmsave (which would return a different value). Will be reset
* when the VM is continued.
*/
s->clock_valid = !runstate_is_running();
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_1056
|
static void handle_satn(ESPState *s)
{
uint8_t buf[32];
int len;
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn;
return;
}
len = get_cmd(s, buf);
if (len)
do_cmd(s, buf);
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_1059
|
static int check_checksum(ByteIOContext *bc){
unsigned long checksum= get_checksum(bc);
// return checksum != get_be32(bc);
av_log(NULL, AV_LOG_ERROR, "%08X %08X\n", checksum, (int)get_be32(bc));
return 0;
}
The vulnerability label is: Vulnerable
|
devign_test_set_data_1060
|
int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
const char *version, const char *serial, const char *model,
uint64_t wwn,
uint32_t cylinders, uint32_t heads, uint32_t secs,
int chs_trans)
{
uint64_t nb_sectors;
s->blk = blk;
s->drive_kind = kind;
blk_get_geometry(blk, &nb_sectors);
s->cylinders = cylinders;
s->heads = heads;
s->sectors = secs;
s->chs_trans = chs_trans;
s->nb_sectors = nb_sectors;
s->wwn = wwn;
/* The SMART values should be preserved across power cycles
but they aren't. */
s->smart_enabled = 1;
s->smart_autosave = 1;
s->smart_errors = 0;
s->smart_selftest_count = 0;
if (kind == IDE_CD) {
blk_set_dev_ops(blk, &ide_cd_block_ops, s);
blk_set_guest_block_size(blk, 2048);
} else {
if (!blk_is_inserted(s->blk)) {
error_report("Device needs media, but drive is empty");
return -1;
}
if (blk_is_read_only(blk)) {
error_report("Can't use a read-only drive");
return -1;
}
blk_set_dev_ops(blk, &ide_hd_block_ops, s);
}
if (serial) {
pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
} else {
snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
"QM%05d", s->drive_serial);
}
if (model) {
pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
} else {
switch (kind) {
case IDE_CD:
strcpy(s->drive_model_str, "QEMU DVD-ROM");
break;
case IDE_CFATA:
strcpy(s->drive_model_str, "QEMU MICRODRIVE");
break;
default:
strcpy(s->drive_model_str, "QEMU HARDDISK");
break;
}
}
if (version) {
pstrcpy(s->version, sizeof(s->version), version);
} else {
pstrcpy(s->version, sizeof(s->version), qemu_get_version());
}
ide_reset(s);
blk_iostatus_enable(blk);
return 0;
}
The vulnerability label is: Non-vulnerable
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.